aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitattributes19
-rw-r--r--.gitignore6
-rw-r--r--AUTHORS7
-rw-r--r--CONTRIBUTING.md13
-rw-r--r--INSTALL229
-rw-r--r--INSTALL-CI-DEV131
-rw-r--r--INSTALL-DEV17
-rw-r--r--INSTALL-PROXY136
-rw-r--r--LEGAL6
-rw-r--r--LICENSE35
-rw-r--r--NEWS53
-rw-r--r--brep/handler/buildfile4
-rw-r--r--brep/handler/ci/buildfile1
-rw-r--r--brep/handler/ci/ci-dir.in1
-rw-r--r--brep/handler/ci/ci-load.in96
-rw-r--r--brep/handler/ci/ci.bash.in1
-rw-r--r--brep/handler/handler.bash.in14
-rw-r--r--brep/handler/submit/.gitignore1
-rw-r--r--brep/handler/submit/buildfile5
-rw-r--r--brep/handler/submit/submit-dir.in10
-rw-r--r--brep/handler/submit/submit-git.bash.in102
-rw-r--r--brep/handler/submit/submit-git.in50
-rw-r--r--brep/handler/submit/submit-pub.in435
-rw-r--r--brep/handler/submit/submit.bash.in57
-rw-r--r--brep/handler/upload/.gitignore2
-rw-r--r--brep/handler/upload/buildfile13
-rw-r--r--brep/handler/upload/upload-bindist-clean.in224
-rw-r--r--brep/handler/upload/upload-bindist.in595
-rw-r--r--brep/handler/upload/upload.bash.in40
-rw-r--r--build/bootstrap.build1
-rw-r--r--build/export.build1
-rw-r--r--build/root.build43
-rw-r--r--buildfile5
-rw-r--r--clean/buildfile8
-rw-r--r--clean/clean.cli5
-rw-r--r--clean/clean.cxx83
-rw-r--r--doc/buildfile10
-rwxr-xr-xdoc/cli.sh38
-rw-r--r--doc/manual.cli236
m---------doc/style0
-rw-r--r--etc/brep-module.conf192
-rw-r--r--etc/buildfile22
-rw-r--r--etc/private/README335
-rw-r--r--etc/private/install/README75
-rw-r--r--etc/private/install/brep-apache2.conf99
-rwxr-xr-xetc/private/install/brep-install479
-rw-r--r--etc/private/install/brep-load.service11
-rw-r--r--etc/private/install/brep-load.timer33
-rw-r--r--etc/private/install/brep-logrotate20
-rw-r--r--etc/private/install/brep-module.conf532
-rwxr-xr-xetc/private/install/brep-startup88
-rw-r--r--etc/private/install/brep-startup.service17
-rwxr-xr-xetc/private/install/vm-gen-service207
-rw-r--r--etc/private/systemd-networkd/10-br0.netdev8
-rw-r--r--etc/private/systemd-networkd/10-tap0.netdev12
-rw-r--r--etc/private/systemd-networkd/20-br0-eth0.network12
-rw-r--r--etc/private/systemd-networkd/20-br0-tap0.network16
-rw-r--r--etc/private/systemd-networkd/30-br0-dhcp.network17
-rw-r--r--etc/private/systemd-networkd/README106
-rwxr-xr-xetc/private/vm-gen-macaddress60
-rwxr-xr-xetc/private/vm-login33
-rwxr-xr-xetc/private/vm-start98
-rwxr-xr-xetc/private/vm-start-base206
-rwxr-xr-xetc/private/vm-stop37
-rw-r--r--etc/proxy-apache2.conf144
-rw-r--r--etc/systemd/brep-clean.service5
-rw-r--r--etc/systemd/brep-clean.timer4
-rw-r--r--etc/systemd/brep-monitor.service14
-rw-r--r--etc/systemd/brep-monitor.timer23
-rw-r--r--libbrep/build-extra.sql249
-rw-r--r--libbrep/build-package.hxx270
-rw-r--r--libbrep/build.cxx192
-rw-r--r--libbrep/build.hxx355
-rw-r--r--libbrep/build.xml159
-rw-r--r--libbrep/buildfile1
-rw-r--r--libbrep/common-traits.hxx39
-rw-r--r--libbrep/common.cxx27
-rw-r--r--libbrep/common.hxx457
-rw-r--r--libbrep/database-lock.cxx1
-rw-r--r--libbrep/database-lock.hxx1
-rwxr-xr-xlibbrep/odb.sh4
-rw-r--r--libbrep/package-extra.sql39
-rw-r--r--libbrep/package-traits.cxx1
-rw-r--r--libbrep/package-traits.hxx1
-rw-r--r--libbrep/package.cxx125
-rw-r--r--libbrep/package.hxx508
-rw-r--r--libbrep/package.xml578
-rw-r--r--libbrep/types.hxx25
-rw-r--r--libbrep/utility.hxx17
-rw-r--r--libbrep/version.hxx.in1
-rw-r--r--libbrep/wrapper-traits.hxx3
-rw-r--r--load/buildfile9
-rw-r--r--load/load.cli67
-rw-r--r--load/load.cxx986
-rw-r--r--load/types-parsers.cxx1
-rw-r--r--load/types-parsers.hxx1
-rw-r--r--manifest51
-rw-r--r--migrate/buildfile5
-rw-r--r--migrate/migrate.cli5
-rw-r--r--migrate/migrate.cxx39
-rw-r--r--mod/.gitignore2
-rw-r--r--mod/build-config-module.cxx288
-rw-r--r--mod/build-config-module.hxx85
-rw-r--r--mod/build-result-module.cxx349
-rw-r--r--mod/build-result-module.hxx78
-rw-r--r--mod/build-target-config.cxx254
-rw-r--r--mod/build-target-config.hxx96
-rw-r--r--mod/build.cxx180
-rw-r--r--mod/build.hxx20
-rw-r--r--mod/buildfile37
-rw-r--r--mod/ci-common.cxx494
-rw-r--r--mod/ci-common.hxx96
-rw-r--r--mod/database-module.cxx59
-rw-r--r--mod/database-module.hxx26
-rw-r--r--mod/database.cxx9
-rw-r--r--mod/database.hxx1
-rw-r--r--mod/diagnostics.cxx1
-rw-r--r--mod/diagnostics.hxx3
-rw-r--r--mod/external-handler.cxx29
-rw-r--r--mod/external-handler.hxx3
-rw-r--r--mod/mod-build-configs.cxx74
-rw-r--r--mod/mod-build-configs.hxx3
-rw-r--r--mod/mod-build-force.cxx167
-rw-r--r--mod/mod-build-force.hxx11
-rw-r--r--mod/mod-build-log.cxx85
-rw-r--r--mod/mod-build-log.hxx3
-rw-r--r--mod/mod-build-result.cxx714
-rw-r--r--mod/mod-build-result.hxx16
-rw-r--r--mod/mod-build-task.cxx2338
-rw-r--r--mod/mod-build-task.hxx11
-rw-r--r--mod/mod-builds.cxx823
-rw-r--r--mod/mod-builds.hxx3
-rw-r--r--mod/mod-ci.cxx618
-rw-r--r--mod/mod-ci.hxx62
-rw-r--r--mod/mod-package-details.cxx39
-rw-r--r--mod/mod-package-details.hxx3
-rw-r--r--mod/mod-package-version-details.cxx570
-rw-r--r--mod/mod-package-version-details.hxx3
-rw-r--r--mod/mod-packages.cxx33
-rw-r--r--mod/mod-packages.hxx3
-rw-r--r--mod/mod-repository-details.cxx16
-rw-r--r--mod/mod-repository-details.hxx3
-rw-r--r--mod/mod-repository-root.cxx58
-rw-r--r--mod/mod-repository-root.hxx8
-rw-r--r--mod/mod-submit.cxx53
-rw-r--r--mod/mod-submit.hxx5
-rw-r--r--mod/mod-upload.cxx763
-rw-r--r--mod/mod-upload.hxx41
-rw-r--r--mod/module.cli (renamed from mod/options.cli)444
-rw-r--r--mod/module.cxx70
-rw-r--r--mod/module.hxx10
-rw-r--r--mod/options-types.hxx14
-rw-r--r--mod/page.cxx314
-rw-r--r--mod/page.hxx80
-rw-r--r--mod/services.cxx3
-rw-r--r--mod/tenant-service.hxx155
-rw-r--r--mod/types-parsers.cxx145
-rw-r--r--mod/types-parsers.hxx43
-rw-r--r--mod/utility.hxx1
-rw-r--r--monitor/.gitignore2
-rw-r--r--monitor/buildfile45
-rw-r--r--monitor/module.cli16
-rw-r--r--monitor/monitor.cli208
-rw-r--r--monitor/monitor.cxx1174
-rw-r--r--repositories.manifest4
-rw-r--r--tests/ci/buildfile1
-rw-r--r--tests/ci/ci-dir.testscript1
-rw-r--r--tests/ci/ci-load.testscript39
-rw-r--r--tests/ci/data.testscript17
-rw-r--r--tests/load/1/math/libexp-+2-1.2+1.tar.gzbin426 -> 498 bytes
-rw-r--r--tests/load/1/math/libfoo-+0-X.Y.tar.gzbin222 -> 301 bytes
-rw-r--r--tests/load/1/math/libfoo-1.0.tar.gzbin327 -> 410 bytes
-rw-r--r--tests/load/1/math/libfoo-1.2.4+1.tar.gzbin1025 -> 1515 bytes
-rw-r--r--tests/load/1/math/libfoo-benchmarks-1.2.4.tar.gzbin262 -> 340 bytes
-rw-r--r--tests/load/1/math/libfoo-examples-1.2.4.tar.gzbin268 -> 335 bytes
-rw-r--r--tests/load/1/math/libfoo-tests-1.2.4.tar.gzbin259 -> 392 bytes
-rw-r--r--tests/load/1/math/libpq-0.tar.gzbin807 -> 881 bytes
-rw-r--r--tests/load/1/math/libstudxml-1.0.0+1.tar.gzbin456 -> 535 bytes
-rw-r--r--tests/load/1/math/packages.manifest126
-rw-r--r--tests/load/1/math/repositories.manifest2
-rw-r--r--tests/load/1/misc/packages.manifest5
-rw-r--r--tests/load/1/stable/libfoo-1.0.tar.gzbin327 -> 410 bytes
-rw-r--r--tests/load/1/stable/libfoo-1.2.2-alpha.1.tar.gzbin353 -> 433 bytes
-rw-r--r--tests/load/1/stable/libfoo-1.2.2.tar.gzbin301 -> 378 bytes
-rw-r--r--tests/load/1/stable/libfoo-1.2.3+4.tar.gzbin301 -> 379 bytes
-rw-r--r--tests/load/1/stable/libfoo-1.2.4.tar.gzbin351 -> 433 bytes
-rw-r--r--tests/load/1/stable/packages.manifest33
-rw-r--r--tests/load/1/stable/repositories.manifest59
-rw-r--r--tests/load/1/stable/signature.manifest22
-rw-r--r--tests/load/1/testing/packages.manifest2
-rw-r--r--tests/load/buildfile1
-rwxr-xr-xtests/load/cert13
-rw-r--r--tests/load/cert.pem57
-rw-r--r--tests/load/driver.cxx178
-rw-r--r--tests/load/loadtab7
-rw-r--r--tests/load/pkg/1/dev.cppget.org/signed/packages.manifest2
-rw-r--r--tests/load/pkg/1/dev.cppget.org/signed/repositories.manifest59
-rw-r--r--tests/load/pkg/1/dev.cppget.org/signed/signature.manifest22
-rw-r--r--tests/submit/buildfile10
-rw-r--r--tests/submit/data.testscript22
-rw-r--r--tests/submit/submit-dir.testscript12
-rw-r--r--tests/submit/submit-git.testscript133
-rw-r--r--tests/submit/submit-pub.testscript213
-rw-r--r--tests/upload/545f1f78-63ea-4acf-bcb8-37b2513a78c8/archive.tarbin0 -> 10240 bytes
-rw-r--r--tests/upload/545f1f78-63ea-4acf-bcb8-37b2513a78c8/request.manifest22
-rw-r--r--tests/upload/buildfile13
-rw-r--r--tests/upload/data.testscript34
-rw-r--r--tests/upload/upload-bindist.testscript126
-rw-r--r--tests/web/xhtml/buildfile5
-rw-r--r--tests/web/xhtml/driver.cxx6
-rw-r--r--web/server/apache/log.hxx (renamed from web/apache/log.hxx)11
-rw-r--r--web/server/apache/request.cxx (renamed from web/apache/request.cxx)15
-rw-r--r--web/server/apache/request.hxx (renamed from web/apache/request.hxx)15
-rw-r--r--web/server/apache/request.ixx (renamed from web/apache/request.ixx)3
-rw-r--r--web/server/apache/service.cxx (renamed from web/apache/service.cxx)15
-rw-r--r--web/server/apache/service.hxx (renamed from web/apache/service.hxx)17
-rw-r--r--web/server/apache/service.txx (renamed from web/apache/service.txx)5
-rw-r--r--web/server/apache/stream.hxx (renamed from web/apache/stream.hxx)11
-rw-r--r--web/server/buildfile (renamed from web/buildfile)8
-rw-r--r--web/server/mime-url-encoding.cxx (renamed from web/mime-url-encoding.cxx)7
-rw-r--r--web/server/mime-url-encoding.hxx (renamed from web/mime-url-encoding.hxx)9
-rw-r--r--web/server/module.hxx (renamed from web/module.hxx)16
-rw-r--r--web/version.hxx.in12
-rw-r--r--web/xhtml/.gitignore (renamed from web/.gitignore)0
-rw-r--r--web/xhtml/buildfile10
-rw-r--r--web/xhtml/fragment.cxx (renamed from web/xhtml-fragment.cxx)7
-rw-r--r--web/xhtml/fragment.hxx (renamed from web/xhtml-fragment.hxx)3
-rw-r--r--web/xhtml/serialization.hxx (renamed from web/xhtml.hxx)11
-rw-r--r--web/xhtml/version.hxx.in11
-rw-r--r--www/buildfile1
-rw-r--r--www/builds-body.css7
-rw-r--r--www/ci.xhtml4
-rw-r--r--www/package-details-body.css1
-rw-r--r--www/package-version-details-body.css115
-rw-r--r--www/packages-body.css1
235 files changed, 18986 insertions, 3790 deletions
diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 0000000..9756e4d
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1,19 @@
+# This is a good default: files that are auto-detected by git to be text are
+# converted to the platform-native line ending (LF on Unix, CRLF on Windows)
+# in the working tree and to LF in the repository.
+#
+* text=auto
+
+# Use `eol=crlf` for files that should have the CRLF line ending both in the
+# working tree (even on Unix) and in the repository.
+#
+#*.bat text eol=crlf
+
+# Use `eol=lf` for files that should have the LF line ending both in the
+# working tree (even on Windows) and in the repository.
+#
+#*.sh text eol=lf
+
+# Use `binary` to make sure certain files are never auto-detected as text.
+#
+*.gz binary
diff --git a/.gitignore b/.gitignore
index c3de2e7..5046596 100644
--- a/.gitignore
+++ b/.gitignore
@@ -5,10 +5,16 @@
*.d
*.t
*.i
+*.i.*
*.ii
+*.ii.*
*.o
*.obj
+*.gcm
+*.pcm
+*.ifc
*.so
+*.dylib
*.dll
*.a
*.lib
diff --git a/AUTHORS b/AUTHORS
new file mode 100644
index 0000000..9780708
--- /dev/null
+++ b/AUTHORS
@@ -0,0 +1,7 @@
+This file contains information about the build2 authors for copyright
+purposes.
+
+The copyright for the code is held by the contributors of the code. The
+revision history in the version control system is the primary source of
+authorship information for copyright purposes. Contributors that have
+requested to also be noted explicitly in this file are listed below:
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index a3216bb..6bfc34f 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -1,3 +1,16 @@
This project is part of the `build2` toolchain; see its
[Community](https://build2.org/community.xhtml) page for various ways to
contribute.
+
+The copyright for the code is held by the contributors of the code (see the
+`AUTHORS` file). The code is licensed under permissive open source licensing
+terms (see the `LICENSE` file). When you contribute code to this project, you
+license it under these terms. Before contributing please make sure that these
+terms are acceptable to you (and to your employer(s), if they have rights to
+intellectual property that you create) and that the code being contributed is
+your original creation.
+
+The revision history in the version control system is the primary source of
+authorship information for copyright purposes. If, however, you would like
+to also be noted explicitly, please include the appropriate change to the
+`AUTHORS` file along with your contribution.
diff --git a/INSTALL b/INSTALL
index 59ec07e..79d698f 100644
--- a/INSTALL
+++ b/INSTALL
@@ -11,8 +11,9 @@ can be omitted.
1. Create 'brep' User
This user will be used to run the brep package database loader, build database
-cleaner, and the database schemes migration utility. We will also use its home
-directory to build and install the brep module, store its configuration, etc.
+cleaner, monitor, and database schemas migration utility. We will also use its
+home directory to build and install the brep module, store its configuration,
+etc.
Note: if the deployment machine employs SELinux, then this approach may
require additional configuration steps (not shown) in order to allow Apache2
@@ -43,7 +44,7 @@ b) Install PostgreSQL 9.3 or above (including the contrib package containing
the postgres_fdw extension) as well as Apache2 using your distribution's
packages. Below are the names of these packages for some distributions:
- Debian/Ubuntu: postgresql-server postgresql-contrib apache2
+ Debian/Ubuntu: postgresql postgresql-contrib apache2
Fedora/RHEL: postgresql-server postgresql-contrib httpd
FreeBSD: postgresqlXY-server postgresqlXY-contrib apache24
@@ -65,7 +66,7 @@ c) Install PostgreSQL and Apache2 development files. Specifically, we need
files. Below are the names of their packages for some distributions:
Debian/Ubuntu: libpq-dev libapr1-dev libapreq2-dev apache2-dev
- Fedora/RHEL: posqtgresql-devel apr-devel libapreq2-devel httpd-devel
+ Fedora/RHEL: postgresql-devel apr-devel libapreq2-devel httpd-devel
FreeBSD: postgresqlXY-client apr libapreq2 apache24
d) Unless you already have the build2 toolchain, install it by following
@@ -76,24 +77,13 @@ d) Unless you already have the build2 toolchain, install it by following
3. Build and Install brep
-Normally the only extra information that you need to provide on this step is
-the location of the Apache2 headers (httpd.h, etc). Below are their locations
-for some distributions:
-
-Debian/Ubuntu: /usr/include/apache2
-Fedora/RHEL: /usr/include/httpd
-FreeBSD: /usr/local/include/apache24
-
-You can also use the Apache2 apxs utility to obtain this information as shown
-below.
-
$ mkdir brep
$ cd brep
-$ bpkg create \
- cc \
- config.cc.poptions="-I$(apxs -q includedir)" \
- config.bin.rpath=$HOME/install/lib \
+$ bpkg create \
+ cc \
+ config.cc.coptions=-O3 \
+ config.bin.rpath=$HOME/install/lib \
config.install.root=$HOME/install
$ bpkg add https://pkg.cppget.org/1/alpha
@@ -103,6 +93,22 @@ $ bpkg install brep
$ cd .. # Back to brep home.
+Note that by default the location of the Apache2 headers (httpd.h, etc) is
+detected automatically, using the Apache2 apxs utility. Below are their
+locations for some distributions:
+
+Debian/Ubuntu: /usr/include/apache2
+Fedora/RHEL: /usr/include/httpd
+FreeBSD: /usr/local/include/apache24
+
+To disable this functionality and specify the location explicitly, you can
+configure brep in the development mode and specify the respective preprocessor
+option by, for example, adding the following configuration variables to the
+above bpkg-build command:
+
+config.brep.develop=true
+config.cc.poptions="-I..."
+
4. Create PostgreSQL User and Databases
@@ -157,7 +163,7 @@ CREATE EXTENSION postgres_fdw;
CREATE SERVER package_server
FOREIGN DATA WRAPPER postgres_fdw
- OPTIONS (dbname 'brep_package', updatable 'false');
+ OPTIONS (dbname 'brep_package', updatable 'true');
GRANT USAGE ON FOREIGN SERVER package_server to brep;
@@ -165,6 +171,17 @@ CREATE USER MAPPING FOR PUBLIC
SERVER package_server
OPTIONS (user 'brep-build', password '-');
+Note that starting with PostgreSQL 15 only the database owner can create the
+objects in the public schema by default. Thus, if the PostgreSQL version is 15
+or above, then all the privileges on this schema in the created databases need
+to be granted explicitly by the postgres user to the brep user:
+
+\c brep_package
+GRANT ALL PRIVILEGES ON SCHEMA public TO brep;
+
+\c brep_build
+GRANT ALL PRIVILEGES ON SCHEMA public TO brep;
+
Exit psql (^D)
The user brep-build is required (by the postgres_fdw extension) to login with
@@ -194,7 +211,7 @@ CREATE EXTENSION citext;
Exit psql (^D)
-5. Create Database Schemes and Load Repositories
+5. Create Database Schemas and Load Repositories
$ mkdir config
$ edit config/loadtab # Loader configuration, see brep-load(1).
@@ -217,7 +234,21 @@ $ cp install/share/brep/etc/brep-module.conf config/
$ edit config/brep-module.conf # Adjust default values if required.
To enable the build2 build bot controller functionality you will need to set
-the build-config option in brep-module.conf.
+the build-config option in brep-module.conf. To also enable the build
+artifacts upload functionality you will need to specify the upload-data
+directory for the desired upload types in brep-module.conf. For example, for
+generated binary distribution packages it can be as follows:
+
+upload-data bindist=/home/brep/bindist-data
+
+Note that this directory must exist and have read, write, and execute
+permissions granted to the www-data user. This, for example, can be achieved
+with the following commands:
+
+$ mkdir /home/brep/bindist-data
+$ setfacl -m g:www-data:rwx /home/brep/bindist-data
+
+For sample upload handler implementations see brep/handler/upload/.
To enable the package submission functionality you will need to specify the
submit-data and submit-temp directories in brep-module.conf. Note that these
@@ -261,75 +292,8 @@ $ edit config/ci.xhtml # Add custom form fields, adjust CSS style, etc.
For sample CI request handler implementations see brep/handler/ci/.
Here we assume you have setup an appropriate Apache2 virtual server. Open the
-corresponding Apache2 .conf file and add the following inside VirtualHost (you
-can also find this fragment in install/share/brep/etc/brep-apache2.conf):
-
- # Load the brep module.
- #
- <IfModule !brep_module>
- LoadModule brep_module /home/brep/install/libexec/brep/mod_brep.so
- </IfModule>
-
- # Repository email. This email is used for the From: header in emails
- # send by brep (for example, build failure notifications).
- #
- brep-email admin@example.org
-
- # Repository host. It specifies the scheme and the host address (but
- # not the root path; see brep-root below) that will be used whenever
- # brep needs to construct an absolute URL to one of its locations (for
- # example, a link to a build log that is being send via email).
- #
- brep-host https://example.org
-
- # Repository root. This is the part of the URL between the host name
- # and the start of the repository. For example, root value /pkg means
- # the repository URL is http://example.org/pkg/. Specify / to use the
- # web server root (e.g., http://example.org/). If using a different
- # repository root, don't forget to also change Location and Alias
- # directives below.
- #
- brep-root /pkg
-
- <Location "/pkg">
- SetHandler brep
-
- <IfModule dir_module>
- DirectoryIndex disabled
- DirectorySlash Off
- </IfModule>
- </Location>
-
- # Brep module configuration. If you prefer, you can paste the contents
- # of this file here. However, you will need to prefix every option with
- # 'brep-'.
- #
- brep-conf /home/brep/config/brep-module.conf
-
- # Static brep content (CSS files).
- #
- <IfModule !alias_module>
- Error "mod_alias is not enabled"
- </IfModule>
-
- # Note: trailing slashes are important!
- #
- Alias /pkg/@/ /home/brep/install/share/brep/www/
-
- <Directory "/home/brep/install/share/brep/www">
- Require all granted
- </Directory>
-
- # You can also serve the repository files from the repository root.
- # For example:
- #
- # http://example.org/pkg/1/... -> /path/to/repo/1/...
- #
- #AliasMatch ^/pkg/(\d+)/(.+) /path/to/repo/$1/$2
- #
- #<Directory "/path/to/repo">
- # Require all granted
- #</Directory>
+corresponding Apache2 .conf file and add the contents of
+brep/etc/brep-apache2.conf into the <VirtualHost> section.
The output content types of the brep module are application/xhtml+xml,
text/manifest and text/plain. If you would like to make sure they get
@@ -384,47 +348,71 @@ $ cd install/share/brep/www/
$ for i in *.scss; do sassc -s compressed $i `basename -s .scss $i`.css; done
-8. Setup Periodic Loader and Cleaner Execution
+8. Setup Periodic Loader, Cleaner, and Monitor Execution
Initially this guide suggested using systemd user session support to run the
-loader and the cleaner. However, the current state of user sessions has one
-major drawback: they are not started/attached-to when logging in with su -l
-(see Debian bug #813789 for details). This limitation makes them unusable in
-our setup. If you still would like to use systemd to run the loader and the
-cleaner, then you can set it up as a system-wide service which runs the
-utilities as the brep user/group. Otherwise, a cron job is a natural choice.
+loader, cleaner, and monitor. However, the current state of user sessions has
+one major drawback: they are not started/attached-to when logging in with su
+-l (see Debian bug #813789 for details). This limitation makes them unusable
+in our setup. If you still would like to use systemd to run the utilities,
+then you can set it up as a system-wide service which runs them as the brep
+user/group. Otherwise, a cron job is a natural choice.
Note that the builds cleaner execution is optional and is only required if the
build2 build bot functionality is enabled (see the build bot documentation for
-details). If it is disabled in you setup, then skip the cleaner-related
-parts in the subsequent subsections.
+details). If it is disabled in you setup, then skip the cleaner-related parts
+in the subsequent subsections.
+
+If the build artifacts upload functionality is enabled in addition to the
+build2 build bot functionality you most likely will want to additionally setup
+the cleanup of the outdated build artifacts. For example, for binary
+distribution package uploads handled by brep-upload-bindist the cleanup needs
+to be performed by periodic execution of brep-upload-bindist-clean script.
+Note that the directory where the uploads are saved to must exist and have
+read, write, and execute permissions granted to the brep user. This, for
+example, can be achieved with the following commands:
+
+# mkdir /var/bindist
+# chown www-data:www-data /var/bindist
+# setfacl -m u:brep:rwx /var/bindist
+# setfacl -dm u:brep:rwx /var/bindist
If the CI request functionality is enabled you most likely will want to
additionally setup the tenants cleanup.
+The monitor execution is also optional and currently only makes sense if the
+build2 build bot functionality is enabled. Note that you may need to replace
+the public toolchain name argument in the monitor utility command with a real
+list of toolchain names (and optionally versions) used in the brep build
+infrastructure.
+
-8.a Setup Periodic Loader and Cleaner Execution with cron
+8.a Setup Periodic Loader, Cleaner, and Monitor Execution with cron
-The following crontab entries will execute the loader every five minutes
-and the tenants and builds cleaners once a day at midnight:
+The following crontab entries will execute the loader every five minutes, the
+tenants, builds, and binary distribution cleaners once a day at midnight, and
+the monitor every hour (all shifted by a few minutes in order not to clash
+with other jobs):
$ crontab -l
MAILTO=<brep-admin-email>
PATH=/usr/local/bin:/bin:/usr/bin
*/5 * * * * $HOME/install/bin/brep-load $HOME/config/loadtab
-0 0 * * * $HOME/install/bin/brep-clean tenants 240
-0 0 * * * $HOME/install/bin/brep-clean builds $HOME/config/buildtab
+1 0 * * * $HOME/install/bin/brep-clean tenants 240
+2 0 * * * $HOME/install/bin/brep-clean builds $HOME/config/buildtab
+3 0 * * * $HOME/install/bin/brep-upload-bindist-clean /var/bindist 2880
+4 * * * * $HOME/install/bin/brep-monitor --report-timeout 86400 --clean $HOME/config/brep-module.conf public
^D
Note that here we assume that bpkg (which is executed by brep-load) is in one
of the PATH's directories (usually /usr/local/bin).
-8.b Setup Periodic Loader and Cleaner Execution with systemd
+8.b Setup Periodic Loader, Cleaner, and Monitor Execution with systemd
In this version we will use the systemd user session to periodically run the
-loader and the cleaner as the brep user. If your installation doesn't use
-systemd, then a cron job would be a natural alternative (see above).
+loader, cleaner, and monitor as the brep user. If your installation doesn't
+use systemd, then a cron job would be a natural alternative (see above).
As the first step, make sure systemd user sessions support is working for the
brep user:
@@ -443,6 +431,7 @@ $ sudo loginctl enable-linger brep
$ mkdir -p .config/systemd/user
$ cp install/share/brep/etc/systemd/brep-load.* .config/systemd/user/
$ cp install/share/brep/etc/systemd/brep-clean.* .config/systemd/user/
+$ cp install/share/brep/etc/systemd/brep-monitor.* .config/systemd/user/
Start the service to make sure there are no issues:
@@ -452,16 +441,21 @@ $ journalctl
$ systemctl --user start brep-clean.service
$ journalctl
+$ systemctl --user start brep-monitor.service
+$ journalctl
+
Start the timers and monitor them to make sure they fire:
$ systemctl --user start brep-load.timer
$ systemctl --user start brep-clean.timer
+$ systemctl --user start brep-monitor.timer
$ journalctl -f
If everything looks good, enable the timer to be started at boot time:
$ systemctl --user enable brep-load.timer
$ systemctl --user enable brep-clean.timer
+$ systemctl --user enable brep-monitor.timer
9. Upgrade Procedure
@@ -483,18 +477,20 @@ $ cd brep
$ bpkg fetch
$ bpkg build brep
-If you are using a systemd-based setup, then stop and disable the loader and
-the cleaner:
+If you are using a systemd-based setup, then stop and disable the loader,
+cleaner, and monitor:
$ systemctl --user disable --now brep-load.timer
$ systemctl --user disable --now brep-clean.timer
+$ systemctl --user disable --now brep-monitor.timer
$ systemctl --user stop brep-load.service
$ systemctl --user stop brep-clean.service
+$ systemctl --user stop brep-monitor.service
If you are using a cron-based setup, then it is not worth it commenting out the
-job entries. If the new version of the loader or the cleaner gets executed
-before or during the migration, then it will fail and you will get an email
-with the diagnostics. Other than that, it should be harmless.
+job entries. If the new version of the brep utilities gets executed before or
+during the migration, then it will fail and you will get an email with the
+diagnostics. Other than that, it should be harmless.
Stop apache:
@@ -510,7 +506,7 @@ Review brep-module.conf changes that may need to be merged:
$ diff -u install/share/brep/etc/brep-module.conf config/brep-module.conf
-Migrate database schemes:
+Migrate database schemas:
$ install/bin/brep-migrate package
$ install/bin/brep-migrate build
@@ -521,17 +517,20 @@ is not possible), then one way to do it would be:
$ psql -d brep_package -c 'DROP OWNED BY brep'
$ psql -d brep_build -c 'DROP OWNED BY brep'
-If using systemd, then start and enable the loader and the cleaner:
+If using systemd, then start and enable the loader, cleaner, and monitor:
$ systemctl --user start brep-load.service
$ systemctl --user status brep-load.service
$ systemctl --user start brep-clean.service
$ systemctl --user status brep-clean.service
+$ systemctl --user start brep-monitor.service
+$ systemctl --user status brep-monitor.service
If everything looks good, enable periodic execution:
$ systemctl --user enable --now brep-load.timer
$ systemctl --user enable --now brep-clean.timer
+$ systemctl --user enable --now brep-monitor.timer
If using cron, then simply wait for the next run.
diff --git a/INSTALL-CI-DEV b/INSTALL-CI-DEV
new file mode 100644
index 0000000..a80b727
--- /dev/null
+++ b/INSTALL-CI-DEV
@@ -0,0 +1,131 @@
+This guide shows how to configure the brep module for serving the CI and
+build2 build bot requests and how to smoke-test it.
+
+Note that during the testing both the user and CI submission handler (executed
+by the brep module) will run the build2 toolchain utilities. Thus, the user
+needs to arrange the toolchain availability for her and for the user the
+Apache2 process runs under. The easiest, would be to install the toolchain
+into the system using, for example, the build2-install-*-a.0-stage.sh script
+(can be downloaded from https://stage.build2.org/0/). If the being developed
+brep module is not compatible with the staged toolchain, then installing the
+development version of the toolchain may be required.
+
+In the below instructions replace <BREP-SRC-ROOT>, <BREP-OUT-ROOT>, and <HOME>
+with the actual absolute paths of the brep source, brep output, and the user
+home directories. Replace <HOST> with the actual hostname of the local brep
+repository instance.
+
+Here we assume that the brep instance is already configured according to the
+instructions in the INSTALL-DEV file. Now, the instance needs to additionally
+be configured as the build2 build bot controller and the CI request service,
+as it is described in the INSTALL file. This, in particular, requires to
+specify the build-config and the number of ci-* configuration options in the
+brep module configuration file. For example:
+
+$ mkdir ~/brep
+$ cd ~/brep
+$ mkdir ci-data config
+$ setfacl -m g:www-data:rwx ci-data
+$ cd config
+$ cp <BREP-SRC-ROOT>/etc/brep-module.conf .
+
+Edit brep-module.conf:
+
+- Uncomment the Builds=?builds menu.
+- Set the build-config option as <HOME>/brep/config/buildtab.
+- Set the ci-data option as <HOME>/brep/ci-data.
+- Set the ci-handler option as <BREP-OUT-ROOT>/brep/handler/ci/brep-ci-load.
+
+- Add the following options:
+
+ci-handler-argument --result-url
+ci-handler-argument http://<HOST>
+ci-handler-argument <BREP-OUT-ROOT>/load/brep-load
+
+Create the buildtab file:
+
+$ cat <<EOF >buildtab
+linux_debian_12*-gcc_13.1 linux_debian_12-gcc_13.1 x86_64-linux-gnu "all default"
+linux_debian_12*-gcc_13.1 linux_debian_12-gcc_13.1-O3 x86_64-linux-gnu "all default" config.cc.coptions="-O3"
+EOF
+
+Point the brep module to the newly created configuration file:
+
+$ sudo systemctl stop apache2
+
+Open the corresponding Apache2 .conf file and change the brep-conf directive
+to refer to <HOME>/brep/config/brep-module.conf.
+
+$ sudo systemctl start apache2
+$ sudo systemctl status apache2
+
+Submit a package for CI, for example, foo/1.0.0:
+
+$ cd ~/brep
+$ git clone https://.../foo
+$ cd foo
+$ bdep init -C @cfg --
+$ bdep ci --server http://<HOST>
+
+Verify that the CI request is successfully submitted by opening the link
+contained in the bdep-ci's stderr. The submitted package should be present on
+the Packages page.
+
+Send the task request query on the behalf of the build2 build bot agent, for
+example:
+
+$ cd ~/brep
+$ cat <<EOF >task-request.manifest
+: 1
+agent: bot
+toolchain-name: dev
+toolchain-version: 0.17.0-a.1
+
+:
+id: a2b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855
+name: linux_debian_12-gcc_13.1
+summary: Linux Debian 12 GCC 13.1
+EOF
+
+$ cat task-request.manifest | \
+ curl -s -S --data-binary @- \
+ --header 'Content-Type: text/manifest' \
+ --include "http://<HOST>/?build-task"
+
+Stash the session and result-url manifest values contained in the curl's
+stdout. We will refer them as <SESSION> and <RESULT-URL> down the road.
+
+Verify that the CI task is successfully created by clicking the 'Builds' link
+in the menu of the previously opened brep page. A single package build in the
+building state should be present on the Builds page.
+
+Send the result request query on the behalf of the build2 build bot agent:
+
+$ cat <<EOF >result-request.manifest
+: 1
+session: <SESSION>
+agent-checksum: 1
+:
+name: foo
+version: 1.0.0
+status: success
+EOF
+
+$ cat result-request.manifest | \
+ curl -s -S --data-binary @- \
+ --header 'Content-Type: text/manifest' \
+ --include <RESULT-URL>
+
+Refresh the Builds page and make sure that the build is now in the built state
+(the 'success' status is printed in the result field).
+
+Re-submit the task-request.manifest file, refresh the Builds page, and make
+sure that the second package build appears on the page in the building state.
+Edit the session value in the result-request.manifest, re-submit it to the new
+result URL, refresh the Builds page, and make sure that the latest build is
+now in the built state as well.
+
+You can also track the brep objects state transitions in the database. For
+example, by executing the following query before/after each curl command:
+
+$ psql -d brep_build -c 'select * from build_tenant'
diff --git a/INSTALL-DEV b/INSTALL-DEV
index 101d9d7..8ebc5a3 100644
--- a/INSTALL-DEV
+++ b/INSTALL-DEV
@@ -55,6 +55,17 @@ CREATE USER "www-data" INHERIT IN ROLE <user>;
CREATE USER "brep-build" INHERIT IN ROLE <user> PASSWORD '-';
+Note that starting with PostgreSQL 15 only the database owner can create the
+objects in the public schema by default. Thus, if the PostgreSQL version is 15
+or above, then all the privileges on this schema in the created databases need
+to be granted explicitly by the postgres user to <user>:
+
+\c brep_package
+GRANT ALL PRIVILEGES ON SCHEMA public TO <user>;
+
+\c brep_build
+GRANT ALL PRIVILEGES ON SCHEMA public TO <user>;
+
Exit psql (^D), then make sure the logins work:
$ psql -d brep_package
@@ -72,7 +83,7 @@ CREATE EXTENSION postgres_fdw;
CREATE SERVER package_server
FOREIGN DATA WRAPPER postgres_fdw
- OPTIONS (dbname 'brep_package', updatable 'false');
+ OPTIONS (dbname 'brep_package', updatable 'true');
GRANT USAGE ON FOREIGN SERVER package_server to <user>;
@@ -113,7 +124,7 @@ CREATE EXTENSION citext;
Exit psql (^D)
-2. Create Database Schemes and Load the Repository
+2. Create Database Schemas and Load the Repository
All the commands are executed from brep project root.
@@ -205,7 +216,7 @@ $ sudo tail -f /var/log/apache2/error.log
4. Reloading During Development
-To do a "complete reload" (i.e., recreate database schemes, load the repository
+To do a "complete reload" (i.e., recreate database schemas, load the repository
data, and reload the Apache2 plugin), execute the following from brep/:
$ migrate/brep-migrate --recreate package
diff --git a/INSTALL-PROXY b/INSTALL-PROXY
new file mode 100644
index 0000000..418846a
--- /dev/null
+++ b/INSTALL-PROXY
@@ -0,0 +1,136 @@
+This guide shows how to configure the Apache2-based HTTP proxy server for
+proxying HTTP(S) requests and caching the responses.
+
+Note that for security reasons most clients (curl, wget, etc) perform HTTPS
+requests via HTTP proxies by establishing a tunnel using the HTTP CONNECT
+method and encrypting all the communications, thus making the origin server's
+responses non-cacheable. This proxy setup uses the over-HTTP caching for cases
+when the HTTPS response caching is desirable and presumed safe (for example,
+signed repository manifests, checksum'ed package archives, etc., or the proxy
+is located inside a trusted, private network).
+
+Specifically, this setup interprets the requested HTTP URLs as HTTPS URLs by
+default, effectively replacing the http URL scheme with https. If desired, to
+also support proxying/caching of the HTTP URL requests, the proxy can be
+configured to either recognize certain hosts as HTTP-only or to recognize a
+custom HTTP header that can be sent by an HTTP client to prevent the
+http-to-https scheme conversion.
+
+In this guide commands that start with the # shell prompt are expected to be
+executed as root and those starting with $ -- as a regular user in their home
+directory. All the commands are provided for Debian, so you may need to adjust
+them to match your distribution/OS.
+
+1. Enable Apache2 Modules
+
+Here we assume you have the Apache2 server installed and running.
+
+Enable the following Apache2 modules used in the proxy setup:
+
+ rewrite
+ headers
+ ssl
+ proxy
+ proxy_http
+ cache
+ cache_disk
+
+These modules are commonly used and are likely to be installed together with
+the Apache2 server. After the modules are enabled restart Apache2 and make
+sure that the server has started successfully. For example:
+
+# a2enmod rewrite # Enable the rewrite module.
+ ...
+# systemctl restart apache2
+# systemctl status apache2 # Verify started.
+
+To troubleshoot, see Apache logs.
+
+
+2. Setup Proxy in Apache2 Configuration File
+
+Create the directory for the proxy logs. For example:
+
+# mkdir -p /var/www/cache.lan/log
+
+Note that here and below we assume that the host name the Apache2 instance is
+running is cache.lan.
+
+Create a separate <VirtualHost> section intended for proxying HTTP(S) requests
+and caching the responses in the Apache2 configuration file. Note that there
+is no single commonly used HTTP proxy port, thus you may want to use the port
+80 if it is not already assigned to some other virtual host. If you decide to
+use some other port, make sure the corresponding `Listen <port>` directive is
+present in the Apache2 configuration file.
+
+Inside <VirtualHost> replace DocumentRoot (and anything else related to the
+normal document serving) with the contents of brep/etc/proxy-apache2.conf and
+adjust CacheRoot (see below) as well as any other values if desired.
+
+<VirtualHost *:80>
+ LogLevel warn
+ ErrorLog /var/www/cache.lan/log/error.log
+ CustomLog /var/www/cache.lan/log/access.log combined
+
+ <contents of proxy-apache2.conf>
+
+</VirtualHost>
+
+We will assume that the default /var/cache/apache2/mod_cache_disk directory is
+specified for the CacheRoot directive. If that's not the case, then make sure
+the specified directory is writable by the user under which Apache2 is
+running, for example, executing the following command:
+
+# setfacl -m g:www-data:rwx /path/to/proxy/cache
+
+Restart Apache2 and make sure that the server has started successfully.
+
+# systemctl restart apache2
+# systemctl status apache2 # Verify started.
+
+Make sure the proxy functions properly and caches the HTTP responses, for
+example:
+
+$ ls /var/cache/apache2/mod_cache_disk # Empty.
+$ curl --proxy http://cache.lan:80 http://www.example.com # Prints HTML.
+$ ls /var/cache/apache2/mod_cache_disk # Non-empty.
+
+To troubleshoot, see Apache logs.
+
+
+3. Setup Periodic Cache Cleanup
+
+The cache directory cleanup is performed with the htcacheclean utility
+(normally installed together with the Apache2 server) that you can run as a
+cron job or as a systemd service. If you are running a single Apache2-based
+cache on the host, the natural choice is to run it as a system-wide service
+customizing the apache-htcacheclean systemd unit configuration, if required.
+Specifically, you may want to change the max disk cache size limit and/or the
+cache root directory path, so it matches the CacheRoot Apache2 configuration
+directive value (see above). Run the following command to see the current
+cache cleaner service setup.
+
+# systemctl cat apache-htcacheclean
+
+The output may look as follows:
+
+...
+[Service]
+...
+Environment=HTCACHECLEAN_SIZE=300M
+Environment=HTCACHECLEAN_DAEMON_INTERVAL=120
+Environment=HTCACHECLEAN_PATH=/var/cache/apache2/mod_cache_disk
+Environment=HTCACHECLEAN_OPTIONS=-n
+EnvironmentFile=-/etc/default/apache-htcacheclean
+ExecStart=/usr/bin/htcacheclean -d $HTCACHECLEAN_DAEMON_INTERVAL -p $HTCACHECLEAN_PATH -l $HTCACHECLEAN_SIZE $HTCACHECLEAN_OPTIONS
+...
+
+To change the service configuration either use the `systemctl edit
+apache-htcacheclean` command or, as for the above example, edit the
+environment file (/etc/default/apache-htcacheclean).
+
+Restart the cache cleaner service and make sure that it is started
+successfully and the process arguments match the expectations.
+
+# systemctl restart apache-htcacheclean
+# systemctl status apache-htcacheclean # Verify process arguments.
diff --git a/LEGAL b/LEGAL
new file mode 100644
index 0000000..4ccebd9
--- /dev/null
+++ b/LEGAL
@@ -0,0 +1,6 @@
+This file contains additional (to the LICENSE file) notes and details on the
+copyright and/or license for the code.
+
+ODB is licensed under the MIT-compatible terms with a license exception:
+
+https://git.codesynthesis.com/cgit/odb/odb-etc/tree/license-exceptions/build2-odb-license-exception.txt
diff --git a/LICENSE b/LICENSE
index 215ce1d..5a25163 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,20 +1,21 @@
-Copyright (c) 2014-2019 Code Synthesis Ltd
+MIT License
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
+Copyright (c) 2014-2024 the build2 authors (see the AUTHORS and LEGAL files).
-The above copyright notice and this permission notice shall be included
-in all copies or substantial portions of the Software.
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/NEWS b/NEWS
index 1d3a6e8..5382c22 100644
--- a/NEWS
+++ b/NEWS
@@ -1,3 +1,56 @@
+Version 0.16.0
+
+ * Note: brep_build database schema migration from version 18 is unsupported.
+
+ * Support for build artifact upload.
+
+ * Support for *-package-config package manifest value functionality.
+
+ * Support for interrupted build results.
+
+ * Support for random package ordering when issuing build tasks.
+
+ * Support for package-description, package-description-{file,type} package
+ manifest values.
+
+Version 0.15.0
+
+ * Support for disabling build notification emails per toolchain.
+
+ * The submit-git and submit-pub handlers now deny submissions of older
+ package version revisions.
+
+ * Support for footnotes in cmark-gfm.
+
+Version 0.14.0
+
+ * Support for interactive CI.
+
+ * Support for soft and hard rebuilds.
+
+ * Support for build-time dependencies and the target/host configuration
+ split awareness.
+
+ * Initial support for private brep-as-VM setup (see etc/private/README).
+
+ * Build notifications are no longer sent if the build-email package manifest
+ value is unspecified.
+
+Version 0.13.0
+
+ * Support for the alternative package rebuild timeout.
+
+ The alternative rebuild timeout can be used to "pull" the rebuild window
+ to the specified time of day, for example, to optimize load and/or power
+ consumption of the build infrastructure (off-work hours, solar, off-peak
+ electricity tariffs, etc).
+
+ * New brep-monitor utility for monitoring for and reporting on brep state
+ issues. Currently it only reports build delays. See brep-monitor(1) for
+ details.
+
+ * Support for test-exclude task manifest value.
+
Version 0.12.0
* Support for specifying explicit environment names in buildtabs.
diff --git a/brep/handler/buildfile b/brep/handler/buildfile
index 3b9245b..cd11231 100644
--- a/brep/handler/buildfile
+++ b/brep/handler/buildfile
@@ -1,10 +1,10 @@
# file : brep/handler/buildfile
-# copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
# license : MIT; see accompanying LICENSE file
import mods = libbutl.bash%bash{manifest-parser}
import mods += libbutl.bash%bash{manifest-serializer}
+import mods += bpkg-util%bash{package-archive}
-./: bash{handler} submit/ ci/
+./: bash{handler} submit/ ci/ upload/
bash{handler}: in{handler} $mods
diff --git a/brep/handler/ci/buildfile b/brep/handler/ci/buildfile
index 3ed6807..69234d6 100644
--- a/brep/handler/ci/buildfile
+++ b/brep/handler/ci/buildfile
@@ -1,5 +1,4 @@
# file : brep/handler/ci/buildfile
-# copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
# license : MIT; see accompanying LICENSE file
./: exe{brep-ci-dir} exe{brep-ci-load}
diff --git a/brep/handler/ci/ci-dir.in b/brep/handler/ci/ci-dir.in
index 47387ea..58aa991 100644
--- a/brep/handler/ci/ci-dir.in
+++ b/brep/handler/ci/ci-dir.in
@@ -1,7 +1,6 @@
#!/usr/bin/env bash
# file : brep/handler/ci/ci-dir.in
-# copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
# license : MIT; see accompanying LICENSE file
# Simple package CI request handler with directory storage.
diff --git a/brep/handler/ci/ci-load.in b/brep/handler/ci/ci-load.in
index 915f9a6..3f04ea8 100644
--- a/brep/handler/ci/ci-load.in
+++ b/brep/handler/ci/ci-load.in
@@ -1,7 +1,6 @@
#!/usr/bin/env bash
# file : brep/handler/ci/ci-load.in
-# copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
# license : MIT; see accompanying LICENSE file
# Package CI request handler that loads the packages into the brep database.
@@ -26,7 +25,10 @@ verbose= #true
fetch_timeout=60
trap "{ exit 1; }" ERR
-set -o errtrace # Trap ERR in functions.
+set -o errtrace # Trap ERR in functions.
+set -o pipefail # Fail if any pipeline command fails.
+shopt -s lastpipe # Execute last pipeline command in the current shell.
+shopt -s nullglob # Expand no-match globs to nothing rather than themselves.
@import brep/handler/handler@
@import brep/handler/ci/ci@
@@ -34,7 +36,7 @@ set -o errtrace # Trap ERR in functions.
# The handler's own options.
#
result_url=
-while [ $# -gt 0 ]; do
+while [[ "$#" -gt 0 ]]; do
case $1 in
--result-url)
shift
@@ -51,7 +53,7 @@ done
#
loader="$1"
-if [ -z "$loader" ]; then
+if [[ -z "$loader" ]]; then
error "$usage"
fi
@@ -61,7 +63,7 @@ shift
# options.
#
loader_options=()
-while [ $# -gt 1 ]; do
+while [[ "$#" -gt 1 ]]; do
loader_options+=("$1")
shift
done
@@ -70,11 +72,11 @@ done
#
data_dir="${1%/}"
-if [ -z "$data_dir" ]; then
+if [[ -z "$data_dir" ]]; then
error "$usage"
fi
-if [ ! -d "$data_dir" ]; then
+if [[ ! -d "$data_dir" ]]; then
error "'$data_dir' does not exist or is not a directory"
fi
@@ -85,8 +87,9 @@ reference="$(basename "$data_dir")"
#
manifest_parser_start "$data_dir/request.manifest"
-simulate=
repository=
+interactive=
+simulate=
# Package map. We first enter packages from the request manifest as keys and
# setting the values to true. Then we go through the repository package list
@@ -105,40 +108,58 @@ declare -A packages
#
spec=
+# Third party service information which, if specified, needs to be associated
+# with the being created tenant.
+#
+service_id=
+service_type=
+service_data=
+
while IFS=: read -ru "$manifest_parser_ofd" -d '' n v; do
case "$n" in
- simulate) simulate="$v" ;;
- repository) repository="$v" ;;
+ repository) repository="$v" ;;
+ interactive) interactive="$v" ;;
+ simulate) simulate="$v" ;;
package)
packages["$v"]=true
- if [ -n "$spec" ]; then
+ if [[ -n "$spec" ]]; then
spec="$spec,"
fi
spec="$spec$v"
;;
+
+ service-id) service_id="$v" ;;
+ service-type) service_type="$v" ;;
+ service-data) service_data="$v" ;;
esac
done
manifest_parser_finish
-if [ -n "$spec" ]; then
+if [[ -n "$spec" ]]; then
spec="$spec@"
fi
spec="$spec$repository"
-if [ -z "$repository" ]; then
+if [[ -z "$repository" ]]; then
error "repository manifest value expected"
fi
-if [ -n "$simulate" -a "$simulate" != "success" ]; then
+if [[ -n "$simulate" && "$simulate" != "success" ]]; then
exit_with_manifest 400 "unrecognized simulation outcome '$simulate'"
fi
+# Use the generated reference if the tenant service id is not specified.
+#
+if [[ -n "$service_type" && -z "$service_id" ]]; then
+ service_id="$reference"
+fi
+
message_suffix=
-if [ -n "$result_url" ]; then
+if [[ -n "$result_url" ]]; then
message_suffix=": $result_url/@$reference" # Append the tenant id.
fi
@@ -147,7 +168,7 @@ fi
# Note that we can't assume a real repository URL is specified if simulating
# so trying to query the repository info is not a good idea.
#
-if [ -n "$simulate" ]; then
+if [[ -n "$simulate" ]]; then
run rm -r "$data_dir"
trace "CI request for '$spec' is simulated$message_suffix"
@@ -189,9 +210,9 @@ manifest_values=()
manifest_version=
more=true
-while [ "$more" ]; do
+while [[ "$more" ]]; do
- if [ -n "$manifest_version" ]; then
+ if [[ -n "$manifest_version" ]]; then
manifest_names=("")
manifest_values=("$manifest_version")
fi
@@ -214,35 +235,32 @@ while [ "$more" ]; do
manifest_names+=("$n")
manifest_values+=("$v")
-
done
# Reduce the first manifest case.
#
- if [ ${#manifest_names[@]} -eq 0 ]; then
+ if [[ "${#manifest_names[@]}" -eq 0 ]]; then
continue
fi
# Add or filter out the manifest, if present.
#
- if [ ${#packages[@]} -ne 0 ]; then
-
- if [[ -v packages["$name"] ]]; then
+ if [[ "${#packages[@]}" -ne 0 ]]; then
+ if [[ -v "packages[$name]" ]]; then
packages["$name"]=
packages["$name/$version"]= # Clear it either, as may also be present.
- elif [[ -v packages["$name/$version"] ]]; then
+ elif [[ -v "packages[$name/$version]" ]]; then
packages["$name/$version"]=
else
continue # Skip.
fi
-
fi
packages_manifest_names+=("${manifest_names[@]}")
packages_manifest_values+=("${manifest_values[@]}")
- if [ -z "$display_name" ]; then
- if [ -n "$project" ]; then
+ if [[ -z "$display_name" ]]; then
+ if [[ -n "$project" ]]; then
display_name="$project"
else
display_name="$name"
@@ -256,7 +274,7 @@ manifest_parser_finish
# the repository.
#
for p in "${!packages[@]}"; do
- if [ "${packages[$p]}" ]; then
+ if [[ "${packages[$p]}" ]]; then
exit_with_manifest 422 "unknown package $p"
fi
done
@@ -264,7 +282,7 @@ done
# Verify that the repository is not empty. Failed that, the repository display
# name wouldn't be set.
#
-if [ -z "$display_name" ]; then
+if [[ -z "$display_name" ]]; then
exit_with_manifest 422 "no packages in repository"
fi
@@ -276,7 +294,7 @@ run mv "$cache_dir/packages.manifest" "$cache_dir/packages.manifest.orig"
#
manifest_serializer_start "$cache_dir/packages.manifest"
-for ((i=0; i <= ${#packages_manifest_names[@]}; ++i)); do
+for ((i=0; i != "${#packages_manifest_names[@]}"; ++i)); do
manifest_serialize "${packages_manifest_names[$i]}" \
"${packages_manifest_values[$i]}"
done
@@ -290,7 +308,7 @@ run echo "$repository $display_name cache:cache" >"$loadtab"
# Apply overrides, if uploaded.
#
-if [ -f "$data_dir/overrides.manifest" ]; then
+if [[ -f "$data_dir/overrides.manifest" ]]; then
loader_options+=(--overrides-file "$data_dir/overrides.manifest")
fi
@@ -299,6 +317,22 @@ fi
#
loader_options+=(--force --shallow --tenant "$reference")
+# Build the packages interactively, if requested.
+#
+if [[ -n "$interactive" ]]; then
+ loader_options+=(--interactive "$interactive")
+fi
+
+# Pass the tenant service information, if specified, to the loader.
+#
+if [[ -n "$service_id" ]]; then
+ loader_options+=(--service-id "$service_id" --service-type "$service_type")
+
+ if [[ -n "$service_data" ]]; then
+ loader_options+=(--service-data "$service_data")
+ fi
+fi
+
run "$loader" "${loader_options[@]}" "$loadtab"
# Remove the no longer needed CI request data directory.
diff --git a/brep/handler/ci/ci.bash.in b/brep/handler/ci/ci.bash.in
index c188ab9..4ed5fab 100644
--- a/brep/handler/ci/ci.bash.in
+++ b/brep/handler/ci/ci.bash.in
@@ -1,5 +1,4 @@
# file : brep/handler/ci/ci.bash.in
-# copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
# license : MIT; see accompanying LICENSE file
# Utility functions useful for implementing CI request handlers.
diff --git a/brep/handler/handler.bash.in b/brep/handler/handler.bash.in
index 2e66afc..d9e7eaa 100644
--- a/brep/handler/handler.bash.in
+++ b/brep/handler/handler.bash.in
@@ -1,5 +1,4 @@
# file : brep/handler/handler.bash.in
-# copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
# license : MIT; see accompanying LICENSE file
# Utility functions useful for implementing request handlers.
@@ -10,8 +9,11 @@ else
brep_handler=true
fi
-@import libbutl/manifest-parser@
-@import libbutl/manifest-serializer@
+@import libbutl.bash/manifest-parser@
+@import libbutl.bash/manifest-serializer@
+
+bpkg_util_bpkg=bpkg
+@import bpkg-util/package-archive@
# Diagnostics.
#
@@ -52,7 +54,7 @@ function info () # <severity> <text>
ts=
fi
- echo "[$ts] [brep:$severity] [ref $info_ref] [$info_self]: $*" 1>&2;
+ echo "[$ts] [brep:$severity] [ref $info_ref] [$info_self]: $*" 1>&2
}
function error () { info "error" "$*"; exit 1; }
@@ -149,3 +151,7 @@ function manifest_serialize () # <name> <value>
# trace "$1: $2"
printf "%s:%s\0" "$1" "$2" >&"$manifest_serializer_ifd"
}
+
+function pkg_verify_archive () { bpkg_util_pkg_verify_archive "$@"; }
+function pkg_find_archives () { bpkg_util_pkg_find_archives "$@"; }
+function pkg_find_archive () { bpkg_util_pkg_find_archive "$@"; }
diff --git a/brep/handler/submit/.gitignore b/brep/handler/submit/.gitignore
index cbbd541..098bf75 100644
--- a/brep/handler/submit/.gitignore
+++ b/brep/handler/submit/.gitignore
@@ -1,2 +1,3 @@
brep-submit-dir
brep-submit-git
+brep-submit-pub
diff --git a/brep/handler/submit/buildfile b/brep/handler/submit/buildfile
index 7951a46..1747c64 100644
--- a/brep/handler/submit/buildfile
+++ b/brep/handler/submit/buildfile
@@ -1,8 +1,7 @@
# file : brep/handler/submit/buildfile
-# copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
# license : MIT; see accompanying LICENSE file
-./: exe{brep-submit-dir} exe{brep-submit-git}
+./: exe{brep-submit-dir} exe{brep-submit-git} exe{brep-submit-pub}
include ../
@@ -11,5 +10,7 @@ exe{brep-submit-dir}: in{submit-dir} bash{submit} ../bash{handler}
exe{brep-submit-git}: in{submit-git} \
bash{submit-git} bash{submit} ../bash{handler}
+exe{brep-submit-pub}: in{submit-pub} bash{submit} ../bash{handler}
+
bash{submit}: in{submit} ../bash{handler}
bash{submit-git}: in{submit-git} bash{submit} ../bash{handler}
diff --git a/brep/handler/submit/submit-dir.in b/brep/handler/submit/submit-dir.in
index ae0dcbd..b28ab38 100644
--- a/brep/handler/submit/submit-dir.in
+++ b/brep/handler/submit/submit-dir.in
@@ -1,7 +1,6 @@
#!/usr/bin/env bash
# file : brep/handler/submit/submit-dir.in
-# copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
# license : MIT; see accompanying LICENSE file
# Simple package submission handler with directory storage.
@@ -67,20 +66,17 @@ fi
m="$data_dir/package.manifest"
extract_package_manifest "$data_dir/$archive" "$m"
-# Parse the package manifest and obtain the package name, version, and
-# project.
+# Parse the package manifest and obtain the package name and version.
#
manifest_parser_start "$m"
name=
version=
-project=
while IFS=: read -ru "$manifest_parser_ofd" -d '' n v; do
case "$n" in
name) name="$v" ;;
version) version="$v" ;;
- project) project="$v" ;;
esac
done
@@ -94,10 +90,6 @@ if [ -z "$version" ]; then
error "version manifest value expected"
fi
-if [ -z "$project" ]; then
- project="$name"
-fi
-
if [ -n "$simulate" ]; then
run rm -r "$data_dir"
trace "package submission is simulated: $name/$version"
diff --git a/brep/handler/submit/submit-git.bash.in b/brep/handler/submit/submit-git.bash.in
index 56cce33..cf7300d 100644
--- a/brep/handler/submit/submit-git.bash.in
+++ b/brep/handler/submit/submit-git.bash.in
@@ -1,5 +1,4 @@
# file : brep/handler/submit/submit-git.bash.in
-# copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
# license : MIT; see accompanying LICENSE file
# Utility functions for the submit-git handler.
@@ -60,6 +59,10 @@ function owners_dir () # <repo-dir>
# Check if a repository already contains the package. Respond with the
# 'duplicate submission' result manifest and exit if that's the case.
#
+# Also check if the repository contains newer revision of this package
+# version. Respond with the 'newer revision is present' result manifest and
+# exit if that's the case.
+#
function check_package_duplicate () # <name> <version> <repo-dir>
{
trace_func "$@"
@@ -73,22 +76,54 @@ function check_package_duplicate () # <name> <version> <repo-dir>
run source "$rep/submit.config.bash"
- # Check for duplicate package in all sections. Use <name>-<version>.*
- # without .tar.gz in case we want to support more archive types later.
+ local rev
+ rev="$(version_revision "$ver")"
+
+ # Check for duplicate package and its newer revisions in all sections. Use
+ # <name>-<version>.* without .tar.gz in case we want to support more archive
+ # types later.
#
local s
for s in "${!sections[@]}"; do
local d="$rep/${sections[$s]}"
- if [ -d "$d" ]; then
- local f
- f="$(run find "$d" -name "$nam-$ver.*")"
+ # Check for duplicate.
+ #
+ local p
+ run pkg_find_archive "$nam-$ver.*" "$d" | readarray -t p
+
+ if [ "${#p[@]}" -ne 0 ]; then
+ local n="${p[1]}"
+ local v="${p[2]}"
- if [ -n "$f" ]; then
- trace "found: $f"
+ trace "found: $n/$v in ${p[0]}"
+
+ if [ "$n" == "$nam" ]; then
exit_with_manifest 422 "duplicate submission"
+ else
+ exit_with_manifest 422 "submission conflicts with $n/$v"
fi
fi
+
+ # Check for newer revision.
+ #
+ local arcs
+ run pkg_find_archives "$nam" "$ver*" "$d" | readarray -t arcs
+
+ local f
+ for f in "${arcs[@]}"; do
+ local p
+ pkg_verify_archive "$f" | readarray -t p
+
+ local v="${p[1]}"
+
+ local rv
+ rv="$(version_revision "$v")"
+
+ if [ "$rv" -gt "$rev" ]; then
+ exit_with_manifest 422 "newer revision $nam/$v is present"
+ fi
+ done
done
}
@@ -164,6 +199,7 @@ function auth_project () # <project> <control> <repo-dir>
local r="unknown"
local m="$d/$prj/project-owner.manifest"
+ local info=
# If the project owner manifest exists then parse it and try to authenticate
# the submitter as the project owner.
@@ -176,16 +212,31 @@ function auth_project () # <project> <control> <repo-dir>
local n v
while IFS=: read -ru "$manifest_parser_ofd" -d '' n v; do
- if [[ "$n" == "control" && "$ctl" == "$v"* ]]; then
- r="project"
- break
+ if [[ "$n" == "control" ]]; then
+ if [[ "$ctl" == "$v"* ]]; then
+ r="project"
+ break
+ fi
+
+ # If the control URLs don't match, then compare them case-
+ # insensitively, converting them to the lower case. If they match
+ # case-insensitively, then still fail the authentication but provide
+ # additional information in the manifest message value.
+ #
+ if [[ "${ctl,,}" == "${v,,}"* ]]; then
+ info="
+ info: control repository URL differs only in character case
+ info: submitted URL: $ctl
+ info: project owner's URL: $v
+ info: consider using --control to specify exact URL"
+ fi
fi
done
manifest_parser_finish
if [ "$r" != "project" ]; then
- exit_with_manifest 401 "project owner authentication failed"
+ exit_with_manifest 401 "project owner authentication failed$info"
fi
fi
@@ -211,7 +262,8 @@ function auth_package () # <project> <package> <control> <repo-dir>
local prj="$1"
local pkg="$2"
- local ctl="${3%.git}" # Strip the potential .git extension.
+ local ctl="${3%.git}" # For comparison strip the potential .git extension.
+ local ctl_orig="$3" # For diagnostics use the original URL.
local rep="$4"
local d
@@ -228,6 +280,7 @@ function auth_package () # <project> <package> <control> <repo-dir>
local r="unknown"
local m="$d/$prj/$pkg/package-owner.manifest"
+ local info=
# If the package owner manifest exists then parse it and try to authenticate
# the submitter as the package owner.
@@ -242,16 +295,31 @@ function auth_package () # <project> <package> <control> <repo-dir>
#
local n v
while IFS=: read -ru "$manifest_parser_ofd" -d '' n v; do
- if [ "$n" == "control" -a "${v%.git}" == "$ctl" ]; then
- r="package"
- break
+ if [ "$n" == "control" ]; then
+ local u="${v%.git}"
+
+ if [ "$u" == "$ctl" ]; then
+ r="package"
+ break
+ fi
+
+ # If the control URLs don't match, then compare them case-
+ # insensitively (see auth_project() for details).
+ #
+ if [ "${u,,}" == "${ctl,,}" ]; then
+ info="
+ info: control repository URL differs only in character case
+ info: submitted URL: $ctl_orig
+ info: package owner's URL: $v
+ info: consider using --control to specify exact URL"
+ fi
fi
done
manifest_parser_finish
if [ "$r" != "package" ]; then
- exit_with_manifest 401 "package owner authentication failed"
+ exit_with_manifest 401 "package owner authentication failed$info"
fi
fi
diff --git a/brep/handler/submit/submit-git.in b/brep/handler/submit/submit-git.in
index 8263efe..c882b84 100644
--- a/brep/handler/submit/submit-git.in
+++ b/brep/handler/submit/submit-git.in
@@ -1,7 +1,6 @@
#!/usr/bin/env bash
# file : brep/handler/submit/submit-git.in
-# copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
# license : MIT; see accompanying LICENSE file
# Package submission handler with git repository storage.
@@ -187,8 +186,10 @@ git_timeout=10
ref_lock_timeout=30
trap "{ exit 1; }" ERR
-set -o errtrace # Trap ERR in functions.
-set -o pipefail # Return the rightmost non-zero exit status in a pipeline.
+set -o errtrace # Trap in functions and subshells.
+set -o pipefail # Fail if any pipeline command fails.
+shopt -s lastpipe # Execute last pipeline command in the current shell.
+shopt -s nullglob # Expand no-match globs to nothing rather than themselves.
@import brep/handler/handler@
@import brep/handler/submit/submit@
@@ -404,7 +405,7 @@ function git_add () # <repo-dir> <path>...
local d="$1"
shift
- run git -C "$d" add $gvo "$@" >&2
+ run git -C "$d" add --force $gvo "$@" >&2
}
# For now we make 10 re-tries to add the package and push to target. Push can
@@ -440,8 +441,12 @@ for i in {1..11}; do
trace "+ exec {fd}<$l"
exec {fd}<"$l"
+ # Note that on the locking failure we don't suggest the user to try again,
+ # since the client program may suggest to re-try later for all server
+ # errors (as bdep-publish(1) does).
+ #
if ! run flock -w "$ref_lock_timeout" "$fd"; then
- exit_with_manifest 503 "submission service temporarily unavailable"
+ exit_with_manifest 503 "submission service is busy"
fi
# Pull the reference repository.
@@ -558,7 +563,7 @@ for i in {1..11}; do
#
prj_man="$d/project-owner.manifest"
- if [ ! -f "$prj_man" ]; then
+ if [ ! -f "$prj_man" -a "$ref_auth" != "project" ]; then
run mkdir -p "$d" # Also creates the owners directory if not exist.
ctl="$(repository_base "$control")"
@@ -575,7 +580,7 @@ for i in {1..11}; do
# Now the package name.
#
d="$d/$name"
- run mkdir "$d"
+ run mkdir -p "$d" # Also creates the project directory if not exist.
pkg_man="$d/package-owner.manifest"
@@ -636,34 +641,21 @@ for i in {1..11}; do
exit_with_manifest 400 "unrecognized section '$section'"
fi
- # Strips the version revision part, if present.
- #
- v="$(sed -n -re 's%^(\+?[^+]+)(\+[0-9]+)?$%\1%p' <<<"$version")"
-
- # Make sure the section directory exists before we run find in it.
- #
- d="$tgt_dir/$s/$project"
- run mkdir -p "$d" # Create all the parent directories as well.
+ run pkg_find_archives "$name" "$version*" "$tgt_dir/$s" | readarray -t arcs
- # Go through the potentially matching archives (for example, for
- # foo-1.2.3+2: foo-1.2.3.tar.gz, foo-1.2.3+1.tar.gz, foo-1.2.30.tar.gz, etc)
- # and remove those that match exactly.
- #
- # Change CWD to the section directory to make sure that the found archive
- # paths don't contain spaces.
- #
- fs=($(run cd "$tgt_dir/$s" && run find -name "$name-$v*"))
-
- for f in "${fs[@]}"; do
- if [[ "$f" =~ ^\./[^/]+/"$name-$v"(\+[0-9]+)?\.[^/]+$ ]]; then
- run git -C "$tgt_dir" rm $gqo "$s/$f" >&2
- fi
+ for f in "${arcs[@]}"; do
+ run git -C "$tgt_dir" rm $gqo "${f#$tgt_dir/}" >&2
done
# Finally, add the package archive to the target repository.
#
- # We copy the archive rather than move it since we may need it for a re-try.
+ # Copy the archive rather than move it since we may need it for a re-try.
+ # Make sure the project directory exists before we copy the archive into it.
+ # Note that it was removed by git-rm if it became empty.
#
+ d="$tgt_dir/$s/$project"
+ run mkdir -p "$d" # Create all the parent directories as well.
+
a="$d/$archive"
run cp "$data_dir/$archive" "$a"
diff --git a/brep/handler/submit/submit-pub.in b/brep/handler/submit/submit-pub.in
new file mode 100644
index 0000000..42d478d
--- /dev/null
+++ b/brep/handler/submit/submit-pub.in
@@ -0,0 +1,435 @@
+#!/usr/bin/env bash
+
+# file : brep/handler/submit/submit-pub.in
+# license : MIT; see accompanying LICENSE file
+
+# Package submission handler with direct repository publishing.
+#
+# The overall idea behind this handler is to directly add the package to a
+# private/trusted (unsigned) pkg repository with a simple structure (no
+# sections). Upon successful execution of this handler no additional steps are
+# required.
+#
+# Specifically, the handler performs the following steps:
+#
+# - Lock the repository directory for the duration of the package submission.
+#
+# - Check for the package duplicate.
+#
+# - Create the new repository as a hardlink-copy of the current one.
+#
+# - Remove any package revisions, if present.
+#
+# - Validate and add the package archive to the new repository (with project
+# subdirectory).
+#
+# - Re-generate the new repository without signing.
+#
+# - Verify that the new repository is loadable into the brep package database.
+#
+# - Atomically switch the repository symlink to refer to the new repository.
+#
+# - Release the lock and remove the old repository.
+#
+# The repository argument (<repo>) should be an absolute path to a symbolic
+# link to the pkg repository directory, with the archive and manifest files
+# residing in its 1/ subdirectory. The base name of the <repo> path is used
+# as a base for new repository directories.
+#
+# Unless the handler is called for testing, the loader program's absolute path
+# and options should be specified so that the handler can verify that the
+# package is loadable into the brep package database (this makes sure the
+# package dependencies are resolvable, etc).
+#
+# Notes:
+#
+# - Filesystem entries that exist or are created in the data directory:
+#
+# <pkg>-<ver>.tar.gz saved by brep (could be other archives in the future)
+# request.manifest created by brep
+# package.manifest extracted by the handler
+# loadtab created by the handler
+# result.manifest saved by brep
+#
+# Options:
+#
+# --user <name>
+#
+# Re-execute itself under the specified user.
+#
+# Note that the repository can also be modified manually (e.g., to remove
+# packages). This option is normally specified to make sure that all the
+# repository filesystem entries belong to a single user, which, in
+# particular, can simplify their permissions handling (avoid extra ACLs,
+# etc).
+#
+# Note that if this option is specified, then current user (normally the
+# user under which Apache2 is running) must be allowed to execute sudo
+# without a password, which is only recommended in private/trusted
+# environments.
+#
+# --result-url <url>
+#
+# Result URL base for the response. If specified, the handler appends the
+# <package>/<version> to this value and includes the resulting URL in the
+# response message.
+#
+usage="usage: $0 [<options>] [<loader-path> <loader-options>] <repo> <dir>"
+
+# Diagnostics.
+#
+verbose= #true
+
+# The repository lock timeout (seconds).
+#
+rep_lock_timeout=60
+
+trap "{ exit 1; }" ERR
+set -o errtrace # Trap in functions and subshells.
+set -o pipefail # Fail if any pipeline command fails.
+shopt -s lastpipe # Execute last pipeline command in the current shell.
+shopt -s nullglob # Expand no-match globs to nothing rather than themselves.
+
+@import brep/handler/handler@
+@import brep/handler/submit/submit@
+
+# Parse the command line options and, while at it, compose the arguments array
+# for potential re-execution under a different user.
+#
+user=
+result_url=
+
+scr_exe="$(realpath "${BASH_SOURCE[0]}")"
+scr_dir="$(dirname "$scr_exe")"
+
+args=("$scr_exe")
+
+while [ "$#" -gt 0 ]; do
+ case $1 in
+ --user)
+ shift
+ user="$1"
+ shift
+ ;;
+ --result-url)
+ args+=("$1")
+ shift
+ result_url="${1%/}"
+ args+=("$1")
+ shift
+ ;;
+ *)
+ break; # The end of options is encountered.
+ ;;
+ esac
+done
+
+loader_args=() # The loader path and options.
+
+# Assume all the remaining arguments except for the last two (repository
+# symlink and data directory) as the loader program path and arguments.
+#
+while [ "$#" -gt 2 ]; do
+ loader_args+=("$1")
+ args+=("$1")
+ shift
+done
+
+if [ "$#" -ne 2 ]; then
+ error "$usage"
+fi
+
+# pkg repository symlink.
+#
+repo="${1%/}"
+shift
+
+if [ -z "$repo" ]; then
+ error "$usage"
+fi
+
+# Submission data directory.
+#
+data_dir="${1%/}"
+shift
+
+if [ -z "$data_dir" ]; then
+ error "$usage"
+fi
+
+# Re-execute itself under a different user, if requested.
+#
+if [ -n "$user" ]; then
+ args+=("$repo" "$data_dir")
+
+ # Compose the arguments string to pass to the su program, quoting empty
+ # arguments as well as those that contain spaces. Note that here, for
+ # simplicity, we assume that the arguments may not contain '"'.
+ #
+ as=
+ for a in "${args[@]}"; do
+ if [ -z "$a" -o -z "${a##* *}" ]; then
+ a="\"$a\""
+ fi
+ if [ -n "$as" ]; then
+ a=" $a"
+ fi
+ as="$as$a"
+ done
+
+ run exec sudo --non-interactive su -l "$user" -c "$as"
+fi
+
+# Check path presence (do it after user switch for permissions).
+#
+if [ ! -L "$repo" ]; then
+ error "'$repo' does not exist or is not a symlink"
+fi
+
+if [ ! -d "$data_dir" ]; then
+ error "'$data_dir' does not exist or is not a directory"
+fi
+
+reference="$(basename "$data_dir")"
+
+# Parse the submission request manifest and obtain the archive path as well as
+# the simulate value.
+#
+manifest_parser_start "$data_dir/request.manifest"
+
+archive=
+simulate=
+
+while IFS=: read -ru "$manifest_parser_ofd" -d '' n v; do
+ case "$n" in
+ archive) archive="$v" ;;
+ simulate) simulate="$v" ;;
+ esac
+done
+
+manifest_parser_finish
+
+if [ -z "$archive" ]; then
+ error "archive manifest value expected"
+fi
+
+if [ -n "$simulate" -a "$simulate" != "success" ]; then
+ exit_with_manifest 400 "unrecognized simulation outcome '$simulate'"
+fi
+
+m="$data_dir/package.manifest"
+extract_package_manifest "$data_dir/$archive" "$m"
+
+# Parse the package manifest and obtain the package name, version, and
+# project.
+#
+manifest_parser_start "$m"
+
+name=
+version=
+project=
+
+while IFS=: read -ru "$manifest_parser_ofd" -d '' n v; do
+ case "$n" in
+ name) name="$v" ;;
+ version) version="$v" ;;
+ project) project="$v" ;;
+ esac
+done
+
+manifest_parser_finish
+
+if [ -z "$name" ]; then
+ error "name manifest value expected"
+fi
+
+if [ -z "$version" ]; then
+ error "version manifest value expected"
+fi
+
+if [ -z "$project" ]; then
+ project="$name"
+fi
+
+if [ -n "$result_url" ]; then
+ message_suffix=": $result_url/$name/$version"
+else
+ message_suffix=": $name/$version"
+fi
+
+revision="$(version_revision "$version")"
+
+# Open the reading file descriptor and lock the repository. Fail if unable to
+# lock before timeout.
+#
+l="$repo.lock"
+run touch "$l"
+trace "+ exec {lfd}<$l"
+exec {lfd}<"$l"
+
+# Note that on the locking failure we don't suggest the user to try again,
+# since the client program may suggest to re-try later for all server errors
+# (as bdep-publish(1) does).
+#
+if ! run flock -w "$rep_lock_timeout" "$lfd"; then
+ exit_with_manifest 503 "submission service is busy"
+fi
+
+repo_old="$(realpath "$repo")" # Old repo path.
+repo_name="$(basename "$repo")-$(date "+%Y%m%d-%H%M%S-%N")" # New repo name.
+repo_new="$(dirname "$repo_old")/$repo_name" # New repo path.
+repo_link="$repo_new.link" # New repo symlink.
+
+# On exit, remove the new repository symlink and directory, unless the link
+# doesn't exist or the directory removal is canceled (for example, the new
+# repository is made current).
+#
+function exit_trap ()
+{
+ if [ -L "$repo_link" ]; then
+ run rm -r -f "$repo_link"
+ fi
+
+ if [ -n "$repo_new" -a -d "$repo_new" ]; then
+ run rm -r -f "$repo_new"
+ fi
+}
+
+trap exit_trap EXIT
+
+# Check for the package duplicate (in all projects).
+#
+# Use <name>-<version>.* without .tar.gz in case we want to support more
+# archive types later.
+#
+run pkg_find_archive "$name-$version.*" "$repo_old/1" | readarray -t p
+
+if [ "${#p[@]}" -ne 0 ]; then
+ n="${p[1]}"
+ v="${p[2]}"
+
+ trace "found: $n/$v in ${p[0]}"
+
+ if [ "$n" == "$name" ]; then
+ exit_with_manifest 422 "duplicate submission"
+ else
+ exit_with_manifest 422 "submission conflicts with $n/$v"
+ fi
+fi
+
+# Copy the current repository using hardlinks.
+#
+# -r (recursive)
+# -t (preserve timestamps)
+# -O (omit dir timestamps)
+# --link-dest (hardlink files instead of copying)
+#
+# We also exclude the packages.manifest file that will be re-generated anyway.
+#
+run rsync -rtO --exclude 'packages.manifest' --link-dest="$repo_old" \
+ "$repo_old/" "$repo_new"
+
+# Remove the package version revision archives that may exist in the
+# repository.
+#
+# But first check if the repository contains newer revision of this package
+# version. Respond with the 'newer revision is present' result manifest and
+# exit if that's the case.
+#
+run pkg_find_archives "$name" "$version*" "$repo_new/1" | readarray -t arcs
+
+for f in "${arcs[@]}"; do
+ pkg_verify_archive "$f" | readarray -t p
+
+ v="${p[1]}"
+ rv="$(version_revision "$v")"
+
+ if [ "$rv" -gt "$revision" ]; then
+ exit_with_manifest 422 "newer revision $name/$v is present"
+ fi
+done
+
+for f in "${arcs[@]}"; do
+ run rm "$f"
+done
+
+# Copy the archive rather than moving it since we may need it for
+# troubleshooting. Note: the data and repository directories can be on
+# different filesystems and so hardlinking could fail.
+#
+run mkdir -p "$repo_new/1/$project"
+run cp "$data_dir/$archive" "$repo_new/1/$project"
+
+# Create the new repository.
+#
+# Note that if bpkg-rep-create fails, we can't reliably distinguish if this is
+# a user or internal error (broken package archive vs broken repository).
+# Thus, we always treat is as a user error, providing the full error
+# description in the response and assuming that the submitter can either fix
+# the issue or report it to the repository maintainers. This again assumes
+# private/trusted environment.
+#
+trace "+ bpkg rep-create '$repo_new/1' 2>&1"
+
+if ! e="$(bpkg rep-create "$repo_new/1" 2>&1)"; then
+ exit_with_manifest 400 "submitted archive is not a valid package
+$e"
+fi
+
+# If requested, verify that the new repository is loadable into the package
+# database and, as in the above case, treat the potential error as a user
+# error.
+#
+if [ "${#loader_args[@]}" -ne 0 ]; then
+ f="$data_dir/loadtab"
+ echo "http://testrepo/1 private cache:$repo_new/1" >"$f"
+
+ trace "+ ${loader_args[@]} '$f' 2>&1"
+
+ if ! e="$("${loader_args[@]}" "$f" 2>&1)"; then
+
+ # Sanitize the error message, removing the confusing lines.
+ #
+ e="$(run sed -re '/testrepo/d' <<<"$e")"
+ exit_with_manifest 400 "unable to add package to repository
+$e"
+ fi
+fi
+
+# Finally, create the new repository symlink and replace the current symlink
+# with it, unless we are simulating.
+#
+run ln -sf "$repo_name" "$repo_link"
+
+if [ -z "$simulate" ]; then
+ run mv -T "$repo_link" "$repo" # Switch the repository symlink atomically.
+
+ # Now, when the repository link is switched, disable the new repository
+ # removal.
+ #
+ # Note that we still can respond with an error status. However, the
+ # remaining operations are all cleanups and thus unlikely to fail.
+ #
+ repo_new=
+fi
+
+trace "+ exec {lfd}<&-"
+exec {lfd}<&- # Close the file descriptor and unlock the repository.
+
+# Remove the old repository, unless we are simulating.
+#
+# Note that if simulating, we leave the new repository directory/symlink
+# removal to the exit trap (see above).
+#
+if [ -z "$simulate" ]; then
+ run rm -r "$repo_old"
+
+ what="published"
+else
+ what="simulated"
+fi
+
+run rm -r "$data_dir"
+
+trace "package is $what$message_suffix"
+exit_with_manifest 200 "package is published$message_suffix"
diff --git a/brep/handler/submit/submit.bash.in b/brep/handler/submit/submit.bash.in
index d1d0634..7826809 100644
--- a/brep/handler/submit/submit.bash.in
+++ b/brep/handler/submit/submit.bash.in
@@ -1,5 +1,4 @@
# file : brep/handler/submit/submit.bash.in
-# copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
# license : MIT; see accompanying LICENSE file
# Utility functions useful for implementing package submission handlers.
@@ -48,12 +47,29 @@ function extract_package_manifest () # <archive> <manifest>
local arc="$1"
local man="$2"
- # Pass the --deep option to make sure that the *-file manifest values are
- # resolvable, so rep-create will not fail due to this package down the road.
- # Note that we also make sure that all the manifest values are known (see
- # bpkg-pkg-verify for details).
+ # Pass the --deep option to make sure that the bootstrap buildfile is
+ # present and the *-file manifest values are resolvable, so rep-create will
+ # not fail due to this package down the road. Note that we also make sure
+ # that all the manifest values are known (see bpkg-pkg-verify for details).
#
- if ! run_silent bpkg pkg-verify --deep --manifest "$arc" >"$man"; then
+ local cmd=(bpkg pkg-verify --deep --manifest "$arc")
+ trace_cmd "${cmd[@]}"
+
+ # Note that we used to just advise the user to run bpkg-pkg-verify locally
+ # for the details on the potential failure. That, however, may not always be
+ # helpful since the user can use a different version of the toolchain and so
+ # may observe a different behavior. Thus, we add the bpkg-pkg-verify error
+ # message to the response, turning it into an info. This way the user may
+ # potentially see the following bdep-publish diagnostics:
+ #
+ # error: package archive is not valid
+ # info: unable to satisfy constraint (build2 >= 0.17.0-) for package libhello-1.0.0.tar.gz
+ # info: available build2 version is 0.16.0
+ # info: run bpkg pkg-verify for details
+ # info: reference: 308e155764c8
+ #
+ local e
+ if ! e="$("${cmd[@]}" 2>&1 >"$man")"; then
# Perform the sanity check to make sure that bpkg is runnable.
#
@@ -61,6 +77,33 @@ function extract_package_manifest () # <archive> <manifest>
error "unable to run bpkg"
fi
- exit_with_manifest 400 "archive is not a valid package (run bpkg pkg-verify for details)"
+ # Note that bpkg-pkg-verify diagnostics may potentially contain the
+ # archive absolute path. Let's sanitize this diagnostics by stripping the
+ # archive directory path, if present. Also note that to use sed for that
+ # we first need to escape the special regex characters and slashes in the
+ # archive directory path (see sed's basic regular expressions for
+ # details).
+ #
+ local d="$(sed 's/[[\.*^$/]/\\&/g' <<<"$(dirname "$arc")/")"
+
+ e="$(sed -e "s/$d//g" -e 's/^error:/ info:/' <<<"$e")"
+ e=$'package archive is not valid\n'"$e"$'\n info: run bpkg pkg-verify for details'
+
+ exit_with_manifest 400 "$e"
fi
}
+
+# Extract the revision part from the package version. Return 0 if the version
+# doesn't contain revision.
+#
+function version_revision () # version
+{
+ local r
+ r="$(sed -n -re 's%^(\+?[^+]+)(\+([0-9]+))?$%\3%p' <<<"$1")"
+
+ if [ -z "$r" ]; then
+ r="0"
+ fi
+
+ echo "$r"
+}
diff --git a/brep/handler/upload/.gitignore b/brep/handler/upload/.gitignore
new file mode 100644
index 0000000..da4dc5a
--- /dev/null
+++ b/brep/handler/upload/.gitignore
@@ -0,0 +1,2 @@
+brep-upload-bindist
+brep-upload-bindist-clean
diff --git a/brep/handler/upload/buildfile b/brep/handler/upload/buildfile
new file mode 100644
index 0000000..ca52ddd
--- /dev/null
+++ b/brep/handler/upload/buildfile
@@ -0,0 +1,13 @@
+# file : brep/handler/upload/buildfile
+# license : MIT; see accompanying LICENSE file
+
+./: exe{brep-upload-bindist} exe{brep-upload-bindist-clean}
+
+include ../
+
+exe{brep-upload-bindist}: in{upload-bindist} bash{upload} ../bash{handler}
+
+[rule_hint=bash] \
+exe{brep-upload-bindist-clean}: in{upload-bindist-clean}
+
+bash{upload}: in{upload} ../bash{handler}
diff --git a/brep/handler/upload/upload-bindist-clean.in b/brep/handler/upload/upload-bindist-clean.in
new file mode 100644
index 0000000..99914a7
--- /dev/null
+++ b/brep/handler/upload/upload-bindist-clean.in
@@ -0,0 +1,224 @@
+#!/usr/bin/env bash
+
+# file : brep/handler/upload/upload-bindist-clean.in
+# license : MIT; see accompanying LICENSE file
+
+# Remove expired package configuration directories created by the
+# upload-bindist handler.
+#
+# Specifically, perform the following steps:
+#
+# - Recursively scan the specified root directory and collect the package
+# configuration directories with age older than the specified timeout (in
+# minutes). Recognize the package configuration directories by matching the
+# *-????-??-??T??:??:??Z* pattern and calculate their age based on the
+# modification time of the packages.sha256 file they may contain. If
+# packages.sha256 doesn't exist in the configuration directory, then
+# consider it as still being prepared and skip.
+#
+# - Iterate over the expired package configuration directories and for each of
+# them:
+#
+# - Lock the root directory.
+#
+# - Re-check the expiration criteria.
+#
+# - Remove the package configuration symlink if it refers to this directory.
+#
+# - Remove this directory.
+#
+# - Remove all the the parent directories of this directory which become
+# empty, up to (but excluding) the root directory.
+#
+# - Unlock the root directory.
+#
+usage="usage: $0 <root> <timeout>"
+
+# Diagnostics.
+#
+verbose= #true
+
+# The root directory lock timeout (in seconds).
+#
+lock_timeout=60
+
+trap "{ exit 1; }" ERR
+set -o errtrace # Trap in functions and subshells.
+set -o pipefail # Fail if any pipeline command fails.
+shopt -s lastpipe # Execute last pipeline command in the current shell.
+shopt -s nullglob # Expand no-match globs to nothing rather than themselves.
+
+function info () { echo "$*" 1>&2; }
+function error () { info "$*"; exit 1; }
+function trace () { if [ "$verbose" ]; then info "$*"; fi }
+
+# Trace a command line, quoting empty arguments as well as those that contain
+# spaces.
+#
+function trace_cmd () # <cmd> <arg>...
+{
+ if [[ "$verbose" ]]; then
+ local s="+"
+ while [ $# -gt 0 ]; do
+ if [ -z "$1" -o -z "${1##* *}" ]; then
+ s="$s '$1'"
+ else
+ s="$s $1"
+ fi
+
+ shift
+ done
+
+ info "$s"
+ fi
+}
+
+# Trace and run a command.
+#
+function run () # <cmd> <arg>...
+{
+ trace_cmd "$@"
+ "$@"
+}
+
+if [[ "$#" -ne 2 ]]; then
+ error "$usage"
+fi
+
+# Package configurations root directory.
+#
+root_dir="${1%/}"
+shift
+
+if [[ -z "$root_dir" ]]; then
+ error "$usage"
+fi
+
+if [[ ! -d "$root_dir" ]]; then
+ error "'$root_dir' does not exist or is not a directory"
+fi
+
+# Package configuration directories timeout.
+#
+timeout="$1"
+shift
+
+if [[ ! "$timeout" =~ ^[0-9]+$ ]]; then
+ error "$usage"
+fi
+
+# Note that while the '%s' date format is not POSIX, it is supported on both
+# Linux and FreeBSD.
+#
+expiration=$(($(date -u +"%s") - $timeout * 60))
+
+# Collect the list of expired package configuration directories.
+#
+expired_dirs=()
+
+run find "$root_dir" -type d -name "*-????-??-??T??:??:??Z*" | while read d; do
+ f="$d/packages.sha256"
+
+ # Note that while the -r date option is not POSIX, it is supported on both
+ # Linux and FreeBSD.
+ #
+ trace_cmd date -u -r "$f" +"%s"
+ if t="$(date -u -r "$f" +"%s" 2>/dev/null)" && (($t <= $expiration)); then
+ expired_dirs+=("$d")
+ fi
+done
+
+if [[ "${#expired_dirs[@]}" -eq 0 ]]; then
+ exit 0 # Nothing to do.
+fi
+
+# Make sure the root directory lock file exists.
+#
+lock="$root_dir/upload.lock"
+run touch "$lock"
+
+# Remove the expired package configuration directories, symlinks which refer
+# to them, and the parent directories which become empty.
+#
+for d in "${expired_dirs[@]}"; do
+ # Deduce the path of the potential package configuration symlink that may
+ # refer to this package configuration directory by stripping the
+ # -<timestamp>[-<number>] suffix.
+ #
+ l="$(sed -n -re 's/^(.+)-[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z(-[0-9]+)?$/\1/p' <<<"$d")"
+ if [[ -z "$l" ]]; then
+ error "invalid name '$d' for package configuration directory"
+ fi
+
+ f="$d/packages.sha256"
+
+ # Open the reading file descriptor and lock the root directory. Fail if
+ # unable to lock before timeout.
+ #
+ trace "+ exec {lfd}<$lock"
+ exec {lfd}<"$lock"
+
+ if ! run flock -w "$lock_timeout" "$lfd"; then
+ error "unable to lock root directory"
+ fi
+
+ # Now, as the lock is acquired, recheck the package configuration directory
+ # expiration criteria (see above) and, if it still holds, remove this
+ # directory, the package configuration symlink if it refers to it, and all
+ # the parent directories which become empty up to (but excluding) the root
+ # directory.
+ #
+ trace_cmd date -u -r "$f" +"%s"
+ if t="$(date -u -r "$f" +"%s" 2>/dev/null)" && (($t <= $expiration)); then
+ # Remove the package configuration symlink.
+ #
+ # Do this first to avoid dangling symlinks which may potentially be
+ # exposed by brep.
+ #
+ # Note that while the realpath utility is not POSIX, it is present on
+ # both Linux and FreeBSD.
+ #
+ if [[ -L "$l" ]]; then
+ p="$(realpath "$l")"
+ if [[ "$p" == "$d" ]]; then
+ run rm "$l"
+ fi
+ fi
+
+ # Remove the package configuration directory.
+ #
+ # Note that this directory contains files copied from a subdirectory of
+ # upload-data. These files are normally owned by the Apache2 user/group
+ # and have rw-r--r-- permissions. This script is normally executed as the
+ # brep user/group and thus the uploads root directory and all its
+ # subdirectories must have read, write, and execute permissions granted to
+ # the brep user, for example, by using ACL (see INSTALL file for
+ # details). Since cp preserves the file permissions by default, these
+ # files effective permissions will normally be r-- (read-only) for this
+ # script. In this case rm pops up the 'remove write-protected regular
+ # file' prompt by default prior to removing these files. To suppress the
+ # prompt we will pass the -f option to rm.
+ #
+ run rm -rf "$d"
+
+ # Remove the empty parent directories.
+ #
+ # Note that we iterate until the rmdir command fails, presumably because a
+ # directory is not empty.
+ #
+ d="$(dirname "$d")"
+ while [[ "$d" != "$root_dir" ]]; do
+ trace_cmd rmdir "$d"
+ if rmdir "$d" 2>/dev/null; then
+ d="$(dirname "$d")"
+ else
+ break
+ fi
+ done
+ fi
+
+ # Close the file descriptor and unlock the root directory.
+ #
+ trace "+ exec {lfd}<&-"
+ exec {lfd}<&-
+done
diff --git a/brep/handler/upload/upload-bindist.in b/brep/handler/upload/upload-bindist.in
new file mode 100644
index 0000000..05d0bcf
--- /dev/null
+++ b/brep/handler/upload/upload-bindist.in
@@ -0,0 +1,595 @@
+#!/usr/bin/env bash
+
+# file : brep/handler/upload/upload-bindist.in
+# license : MIT; see accompanying LICENSE file
+
+# Binary distribution packages upload handler which places the uploaded
+# packages under the following filesystem hierarchy:
+#
+# <root>/[<tenant>/]<instance>/<os-release-name-id><os-release-version-id>/<project>/<package>/<version>/<package-config>
+#
+# The overall idea behind this handler is to create a uniquely named package
+# configuration directory for each upload and maintain the package
+# configuration symlink at the above path to refer to the directory of the
+# latest upload.
+#
+# The root directory is passed as an argument (via upload-handler-argument).
+# All the remaining directory components are retrieved from the respective
+# manifest values of request.manifest created by brep and
+# bindist-result.manifest contained in the uploaded archive.
+#
+# Note that the leaf component of the package configuration symlink path is
+# sanitized, having the "bindist", <instance>, <os-release-name-id>, and
+# <os-release-name-id><os-release-version-id> dash-separated sub-components
+# removed. If the component becomes empty as a result of the sanitization,
+# then the target CPU is assumed, if the package is not architecture-
+# independent, and "noarch" otherwise. If the sanitized component is not
+# empty, the package is not architecture-independent, and the resulting
+# component doesn't containt the target CPU, then prepend it with the <cpu>-
+# prefix. For example, the following symlink paths:
+#
+# .../archive/windows10/foo/libfoo/1.0.0/bindist-archive-windows10-release
+# .../archive/windows10/foo/libfoo/1.0.0/bindist-archive-windows10
+#
+# are reduced to:
+#
+# .../archive/windows10/foo/libfoo/1.0.0/x86_64-release
+# .../archive/windows10/foo/libfoo/1.0.0/x86_64
+#
+# To achieve this the handler performs the following steps (<dir> is passed as
+# last argument by brep and is a subdirectory of upload-data):
+#
+# - Parse <dir>/request.manifest to retrieve the upload archive path,
+# timestamp, and the values which are required to compose the package
+# configuration symlink path.
+#
+# - Extract files from the upload archive.
+#
+# - Parse <dir>/<instance>/bindist-result.manifest to retrieve the values
+# required to compose the package configuration symlink path and the package
+# file paths.
+#
+# - Compose the package configuration symlink path.
+#
+# - Compose the package configuration directory path by appending the
+# -<timestamp>[-<number>] suffix to the package configuration symlink path.
+#
+# - Create the package configuration directory.
+#
+# - Copy the uploaded package files into the package configuration directory.
+#
+# - Generate the packages.sha256 file in the package configuration directory,
+# which lists the SHA256 checksums of the files contained in this directory.
+#
+# - Switch the package configuration symlink to refer to the newly created
+# package configuration directory.
+#
+# - If the --keep-previous option is not specified, then remove the previous
+# target of the package configuration symlink, if exists.
+#
+# Notes:
+#
+# - There could be a race both with upload-bindist-clean and other
+# upload-bindist instances while creating the package version/configuration
+# directories, querying the package configuration symlink target, switching
+# the symlink, and removing the symlink's previous target. To avoid it, the
+# root directory needs to be locked for the duration of these operations.
+# This, however, needs to be done granularly to perform the time consuming
+# operations (files copying, etc) while not holding the lock.
+#
+# - The brep module doesn't acquire the root directory lock. Thus, the package
+# configuration symlink during its lifetime should always refer to a
+# valid/complete package configuration directory.
+#
+# - Filesystem entries that exist or are created in the data directory:
+#
+# <archive> saved by brep
+# request.manifest created by brep
+# <instance>/* extracted by the handler (bindist-result.manifest, etc)
+# result.manifest saved by brep
+#
+# Options:
+#
+# --keep-previous
+#
+# Don't remove the previous target of the package configuration symlink.
+#
+usage="usage: $0 [<options>] <root> <dir>"
+
+# Diagnostics.
+#
+verbose= #true
+
+# The root directory lock timeout (in seconds).
+#
+lock_timeout=60
+
+# If the package configuration directory already exists (may happen due to the
+# low timestamp resolution), then re-try creating the configuration directory
+# by adding the -<number> suffix and incrementing it until the creation
+# succeeds or the retries limit is reached.
+#
+create_dir_retries=99
+
+trap "{ exit 1; }" ERR
+set -o errtrace # Trap in functions and subshells.
+set -o pipefail # Fail if any pipeline command fails.
+shopt -s lastpipe # Execute last pipeline command in the current shell.
+shopt -s nullglob # Expand no-match globs to nothing rather than themselves.
+
+@import brep/handler/handler@
+@import brep/handler/upload/upload@
+
+# Parse the command line options.
+#
+keep_previous=
+
+while [[ "$#" -gt 0 ]]; do
+ case $1 in
+ --keep-previous)
+ shift
+ keep_previous=true
+ ;;
+ *)
+ break
+ ;;
+ esac
+done
+
+if [[ "$#" -ne 2 ]]; then
+ error "$usage"
+fi
+
+# Destination root directory.
+#
+root_dir="${1%/}"
+shift
+
+if [[ -z "$root_dir" ]]; then
+ error "$usage"
+fi
+
+if [[ ! -d "$root_dir" ]]; then
+ error "'$root_dir' does not exist or is not a directory"
+fi
+
+# Upload data directory.
+#
+data_dir="${1%/}"
+shift
+
+if [[ -z "$data_dir" ]]; then
+ error "$usage"
+fi
+
+if [[ ! -d "$data_dir" ]]; then
+ error "'$data_dir' does not exist or is not a directory"
+fi
+
+reference="$(basename "$data_dir")" # Upload request reference.
+
+# Parse the upload request manifest.
+#
+manifest_parser_start "$data_dir/request.manifest"
+
+archive=
+instance=
+timestamp=
+name=
+version=
+project=
+package_config=
+target=
+tenant=
+
+while IFS=: read -ru "$manifest_parser_ofd" -d '' n v; do
+ case "$n" in
+ archive) archive="$v" ;;
+ instance) instance="$v" ;;
+ timestamp) timestamp="$v" ;;
+ name) name="$v" ;;
+ version) version="$v" ;;
+ project) project="$v" ;;
+ package-config) package_config="$v" ;;
+ target) target="$v" ;;
+ tenant) tenant="$v" ;;
+ esac
+done
+
+manifest_parser_finish
+
+if [[ -z "$archive" ]]; then
+ error "archive manifest value expected"
+fi
+
+if [[ -z "$instance" ]]; then
+ error "instance manifest value expected"
+fi
+
+if [[ -z "$timestamp" ]]; then
+ error "timestamp manifest value expected"
+fi
+
+if [[ -z "$name" ]]; then
+ error "name manifest value expected"
+fi
+
+if [[ -z "$version" ]]; then
+ error "version manifest value expected"
+fi
+
+if [[ -z "$project" ]]; then
+ error "project manifest value expected"
+fi
+
+if [[ -z "$package_config" ]]; then
+ error "package-config manifest value expected"
+fi
+
+if [[ -z "$target" ]]; then
+ error "target manifest value expected"
+fi
+
+# Let's disallow the leading dot in the package-config manifest value since
+# the latter serves as the package configuration symlink name and brep skips
+# symlinks with the leading dots assuming them as hidden (see
+# mod/mod-package-version-details.cxx for details).
+#
+if [[ "$package_config" == "."* ]]; then
+ exit_with_manifest 400 "package-config manifest value may not start with dot"
+fi
+
+# Extract the CPU component from the target triplet and deduce the binary
+# distribution-specific CPU representation which is normally used in the
+# package file names.
+#
+cpu="$(sed -n -re 's/^([^-]+)-.+/\1/p' <<<"$target")"
+
+if [[ -z "$cpu" ]]; then
+ error "CPU expected in target triplet '$target'"
+fi
+
+# Use CPU extracted from the target triplet as a distribution-specific
+# representation, unless this is Debian or Fedora (see bpkg's
+# system-package-manager-{fedora,debian}.cxx for details).
+#
+cpu_dist="$cpu"
+
+case $instance in
+ debian)
+ case $cpu in
+ x86_64) cpu_dist="amd64" ;;
+ aarch64) cpu_dist="arm64" ;;
+ i386 | i486 | i586 | i686) cpu_dist="i386" ;;
+ esac
+ ;;
+ fedora)
+ case $cpu in
+ i386 | i486 | i586 | i686) cpu_dist="i686" ;;
+ esac
+ ;;
+esac
+
+# Unpack the archive.
+#
+run tar -xf "$data_dir/$archive" -C "$data_dir"
+
+# Parse the bindist result manifest list.
+#
+f="$data_dir/$instance/bindist-result.manifest"
+
+if [[ ! -f "$f" ]]; then
+ exit_with_manifest 400 "$instance/bindist-result.manifest not found"
+fi
+
+manifest_parser_start "$f"
+
+# Parse the distribution manifest.
+#
+# Note that we need to skip the first manifest version value and parse until
+# the next one is encountered, which introduces the first package file
+# manifest.
+#
+os_release_name_id=
+os_release_version_id=
+
+first=true
+more=
+while IFS=: read -ru "$manifest_parser_ofd" -d '' n v; do
+ case "$n" in
+ "") if [[ "$first" ]]; then # Start of the first (distribution) manifest?
+ first=
+ else # Start of the second (package file) manifest.
+ more=true
+ break
+ fi
+ ;;
+
+ os-release-name-id) os_release_name_id="$v" ;;
+ os-release-version-id) os_release_version_id="$v" ;;
+ esac
+done
+
+if [[ -z "$os_release_name_id" ]]; then
+ exit_with_manifest 400 "os-release-name-id bindist result manifest value expected"
+fi
+
+if [[ -z "$os_release_version_id" ]]; then
+ exit_with_manifest 400 "os-release-version-id bindist result manifest value expected"
+fi
+
+if [[ ! "$more" ]]; then
+ exit_with_manifest 400 "no package file manifests in bindist result manifest list"
+fi
+
+# Parse the package file manifest list and cache the file paths.
+#
+# While at it, detect if the package is architecture-specific or not by
+# checking if any package file names contain the distribution-specific CPU
+# representation (as a sub-string).
+#
+# Note that while we currently only need the package file paths, we can make
+# use of their types and system names in the future. Thus, let's verify that
+# all the required package file values are present and, while at it, cache
+# them all in the parallel arrays.
+#
+package_file_paths=()
+package_file_types=()
+package_file_system_names=()
+
+arch_specific=
+
+# The outer loop iterates over package file manifests while the inner loop
+# iterates over manifest values in each such manifest.
+#
+while [[ "$more" ]]; do
+ more=
+ type=
+ path=
+ system_name=
+
+ while IFS=: read -ru "$manifest_parser_ofd" -d '' n v; do
+ case "$n" in
+ "") # Start of the next package file manifest.
+ more=true
+ break
+ ;;
+
+ package-file-path) path="$v" ;;
+ package-file-type) type="$v" ;;
+ package-file-system-name) system_name="$v" ;;
+ esac
+ done
+
+ if [[ -z "$path" ]]; then
+ exit_with_manifest 400 "package-file-path bindist result manifest value expected"
+ fi
+
+ if [[ -z "$type" ]]; then
+ exit_with_manifest 400 "package-file-type bindist result manifest value expected"
+ fi
+
+ package_file_paths+=("$path")
+ package_file_types+=("$type")
+ package_file_system_names+=("$system_name") # Note: system name can be empty.
+
+ if [[ "$path" == *"$cpu_dist"* ]]; then
+ arch_specific=true
+ fi
+done
+
+manifest_parser_finish
+
+# Sanitize the package configuration name.
+#
+config=
+for c in $(sed 's/-/ /g' <<<"$package_config"); do
+ if [[ "$c" != "bindist" &&
+ "$c" != "$instance" &&
+ "$c" != "$os_release_name_id" &&
+ "$c" != "$os_release_name_id$os_release_version_id" ]]; then
+ if [[ -z "$config" ]]; then
+ config="$c"
+ else
+ config="$config-$c"
+ fi
+ fi
+done
+
+# Reflect the architecture in the sanitized configuration name.
+#
+if [[ -z "$config" ]]; then
+ if [[ "$arch_specific" ]]; then
+ config="$cpu"
+ else
+ config="noarch"
+ fi
+else
+ if [[ "$arch_specific" && ("$config" != *"$cpu"*) ]]; then
+ config="$cpu-$config"
+ fi
+fi
+
+# Compose the package configuration symlink path.
+#
+config_link="$root_dir"
+
+if [[ -n "$tenant" ]]; then
+ config_link="$config_link/$tenant"
+fi
+
+config_link="$config_link/$instance/$os_release_name_id$os_release_version_id"
+config_link="$config_link/$project/$name/$version/$config"
+
+# Compose the package configuration directory path.
+#
+config_dir="$config_link-$timestamp"
+
+# Create the package configuration directory.
+#
+# Note that it is highly unlikely that multiple uploads for the same package
+# configuration/distribution occur at the same time (with the seconds
+# resolution) making the directory name not unique. If that still happens,
+# lets retry for some reasonable number of times to create the directory,
+# while adding the -<number> suffix to its path on each iteration. If
+# that also fails, then we assume that there is some issue with the handler
+# setup and fail, printing the cached mkdir diagnostics to stderr.
+#
+# Note that we need to prevent removing of the potentially empty package
+# version directory by the upload-bindist-clean script before we create
+# configuration directory. To achieve that, we lock the root directory for the
+# duration of the package version/configuration directories creation.
+#
+# Open the reading file descriptor and lock the root directory. Fail if
+# unable to lock before timeout.
+#
+lock="$root_dir/upload.lock"
+run touch "$lock"
+trace "+ exec {lfd}<$lock"
+exec {lfd}<"$lock"
+
+if ! run flock -w "$lock_timeout" "$lfd"; then
+ exit_with_manifest 503 "upload service is busy"
+fi
+
+# Create parent (doesn't fail if directory exists).
+#
+config_parent_dir="$(dirname "$config_dir")"
+run mkdir -p "$config_parent_dir"
+
+created=
+
+trace_cmd mkdir "$config_dir"
+if ! e="$(mkdir "$config_dir" 2>&1)"; then # Note: fails if directory exists.
+ for ((i=0; i != $create_dir_retries; ++i)); do
+ d="$config_dir-$i"
+ trace_cmd mkdir "$d"
+ if e="$(mkdir "$d" 2>&1)"; then
+ config_dir="$d"
+ created=true
+ break
+ fi
+ done
+else
+ created=true
+fi
+
+# Close the file descriptor and unlock the root directory.
+#
+trace "+ exec {lfd}<&-"
+exec {lfd}<&-
+
+if [[ ! "$created" ]]; then
+ echo "$e" 1>&2
+ error "unable to create package configuration directory"
+fi
+
+# On exit, remove the newly created package configuration directory, unless
+# its removal is canceled (for example, the symlink is switched to refer to
+# it). Also remove the new symlink, if already created.
+#
+# Make sure we don't fail if the entries are already removed, for example, by
+# the upload-bindist-clean script.
+#
+config_link_new=
+function exit_trap ()
+{
+ if [[ -n "$config_dir" && -d "$config_dir" ]]; then
+ if [[ -n "$config_link_new" && -L "$config_link_new" ]]; then
+ run rm -f "$config_link_new"
+ fi
+ run rm -rf "$config_dir"
+ fi
+}
+
+trap exit_trap EXIT
+
+# Copy all the extracted package files to the package configuration directory.
+#
+for ((i=0; i != "${#package_file_paths[@]}"; ++i)); do
+ run cp "$data_dir/$instance/${package_file_paths[$i]}" "$config_dir"
+done
+
+# Generate the packages.sha256 file.
+#
+# Note that since we don't hold the root directory lock at this time, we
+# temporary "hide" the resulting file from the upload-bindist-clean script
+# (which uses it for the upload age calculation) by adding the leading dot to
+# its name. Not doing so we may potentially end up with upload-bindist-clean
+# removing the half-cooked directory and so breaking the upload handling.
+#
+trace "+ (cd $config_dir && exec sha256sum -b ${package_file_paths[@]} >.packages.sha256)"
+(cd "$config_dir" && exec sha256sum -b "${package_file_paths[@]}" >".packages.sha256")
+
+# Create the new package configuration "hidden" symlink. Construct its name by
+# prepending the configuration directory name with a dot.
+#
+config_dir_name="$(basename "$config_dir")"
+config_link_new="$config_parent_dir/.$config_dir_name"
+run ln -s "$config_dir_name" "$config_link_new"
+
+# Switch the package configuration symlink atomically. But first, cache the
+# previous package configuration symlink target if the --keep-previous option
+# is not specified and "unhide" the packages.sha256 file.
+#
+# Note that to avoid a race with upload-bindist-clean and other upload-bindist
+# instances, we need to perform all the mentioned operations as well as
+# removing the previous package configuration directory while holding the root
+# directory lock.
+
+# Lock the root directory.
+#
+trace "+ exec {lfd}<$lock"
+exec {lfd}<"$lock"
+
+if ! run flock -w "$lock_timeout" "$lfd"; then
+ exit_with_manifest 503 "upload service is busy"
+fi
+
+# Note that while the realpath utility is not POSIX, it is present on both
+# Linux and FreeBSD.
+#
+config_dir_prev=
+if [[ ! "$keep_previous" && -L "$config_link" ]]; then
+ config_dir_prev="$(realpath "$config_link")"
+fi
+
+# "Unhide" the packages.sha256 file.
+#
+run mv "$config_dir/.packages.sha256" "$config_dir/packages.sha256"
+
+# Note that since brep doesn't acquire the root directory lock, we need to
+# switch the symlink as the final step, when the package directory is fully
+# prepared and can be exposed.
+#
+# @@ Also note that the -T option is Linux-specific. To add support for
+# FreeBSD we need to use -h option there (but maybe -T also works,
+# who knows).
+#
+run mv -T "$config_link_new" "$config_link"
+
+# Now, when the package configuration symlink is switched, disable removal of
+# the newly created package configuration directory.
+#
+# Note that we still can respond with an error status. However, the remaining
+# operations are all cleanups and thus unlikely to fail.
+#
+config_dir=
+
+# Remove the previous package configuration directory, if requested.
+#
+if [[ -n "$config_dir_prev" ]]; then
+ run rm -r "$config_dir_prev"
+fi
+
+# Unlock the root directory.
+#
+trace "+ exec {lfd}<&-"
+exec {lfd}<&-
+
+# Remove the no longer needed upload data directory.
+#
+run rm -r "$data_dir"
+
+trace "binary distribution packages are published"
+exit_with_manifest 200 "binary distribution packages are published"
diff --git a/brep/handler/upload/upload.bash.in b/brep/handler/upload/upload.bash.in
new file mode 100644
index 0000000..9acead9
--- /dev/null
+++ b/brep/handler/upload/upload.bash.in
@@ -0,0 +1,40 @@
+# file : brep/handler/upload/upload.bash.in
+# license : MIT; see accompanying LICENSE file
+
+# Utility functions useful for implementing upload handlers.
+
+if [ "$brep_handler_upload" ]; then
+ return 0
+else
+ brep_handler_upload=true
+fi
+
+@import brep/handler/handler@
+
+# Serialize the upload result manifest to stdout and exit the (sub-)shell with
+# the zero status.
+#
+reference= # Should be assigned later by the handler, when becomes available.
+
+function exit_with_manifest () # <status> <message>
+{
+ trace_func "$@"
+
+ local sts="$1"
+ local msg="$2"
+
+ manifest_serializer_start
+
+ manifest_serialize "" "1" # Start of manifest.
+ manifest_serialize "status" "$sts"
+ manifest_serialize "message" "$msg"
+
+ if [ -n "$reference" ]; then
+ manifest_serialize "reference" "$reference"
+ elif [ "$sts" == "200" ]; then
+ error "no reference for code $sts"
+ fi
+
+ manifest_serializer_finish
+ run exit 0
+}
diff --git a/build/bootstrap.build b/build/bootstrap.build
index 3941a63..07bdd4f 100644
--- a/build/bootstrap.build
+++ b/build/bootstrap.build
@@ -1,5 +1,4 @@
# file : build/bootstrap.build
-# copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
# license : MIT; see accompanying LICENSE file
project = brep
diff --git a/build/export.build b/build/export.build
index 60bdb11..2293a0e 100644
--- a/build/export.build
+++ b/build/export.build
@@ -1,5 +1,4 @@
# file : build/export.build
-# copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
# license : MIT; see accompanying LICENSE file
$out_root/
diff --git a/build/root.build b/build/root.build
index ce8ca41..3dbc0cf 100644
--- a/build/root.build
+++ b/build/root.build
@@ -1,7 +1,10 @@
# file : build/root.build
-# copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
# license : MIT; see accompanying LICENSE file
+config [bool] config.brep.develop ?= false
+
+develop = $config.brep.develop
+
cxx.std = latest
using cxx
@@ -11,12 +14,22 @@ ixx{*}: extension = ixx
txx{*}: extension = txx
cxx{*}: extension = cxx
-cxx.poptions =+ "-I$out_root" "-I$src_root"
-
# Disable "unknown pragma" warnings.
#
cxx.coptions += -Wno-unknown-pragmas
+if ($cxx.id == 'gcc')
+{
+ cxx.coptions += -Wno-maybe-uninitialized -Wno-free-nonheap-object # libbutl
+
+ if ($cxx.version.major >= 13)
+ cxx.coptions += -Wno-dangling-reference
+}
+elif ($cxx.id.type == 'clang' && $cxx.version.major >= 15)
+ cxx.coptions += -Wno-unqualified-std-cast-call
+
+cxx.poptions =+ "-I$out_root" "-I$src_root"
+
# Load the cli module but only if it's available. This way a distribution
# that includes pre-generated files can be built without installing cli.
# This is also the reason why we need to explicitly spell out individual
@@ -51,3 +64,27 @@ tests/{libue libul}{*}: bin.whole = false
# Specify the test target for cross-testing.
#
test.target = $cxx.target
+
+# Omit the rest during the skeleton load.
+#
+if ($build.mode != 'skeleton')
+{
+ # Unless we are in the develop mode, detect the Apache2 headers location
+ # automatically and add the respective preprocessor option.
+ #
+ if! $develop
+ {
+ apache2_includedir = [dir_path] $process.run(apxs -q 'INCLUDEDIR')
+
+ config [config.report] apache2_includedir
+
+ cxx.poptions += "-I$apache2_includedir"
+ }
+
+ # Extract the copyright notice from the LICENSE file.
+ #
+ copyright = $process.run_regex( \
+ cat $src_root/LICENSE, \
+ 'Copyright \(c\) (.+) \(see the AUTHORS and LEGAL files\)\.', \
+ '\1')
+}
diff --git a/buildfile b/buildfile
index 5a9ff2e..17ba8ab 100644
--- a/buildfile
+++ b/buildfile
@@ -1,9 +1,8 @@
# file : buildfile
-# copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
# license : MIT; see accompanying LICENSE file
-./: {*/ -build/ -web/} \
- doc{LICENSE NEWS README INSTALL* CONTRIBUTING.md} \
+./: {*/ -build/ -web/} \
+ doc{NEWS README INSTALL*} legal{LICENSE AUTHORS LEGAL} \
manifest
# Don't install tests or the INSTALL* files.
diff --git a/clean/buildfile b/clean/buildfile
index a183ff5..b91b1a0 100644
--- a/clean/buildfile
+++ b/clean/buildfile
@@ -1,5 +1,4 @@
# file : clean/buildfile
-# copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
# license : MIT; see accompanying LICENSE file
import libs = libodb%lib{odb}
@@ -8,9 +7,14 @@ import libs += libbutl%lib{butl}
import libs += libbbot%lib{bbot}
include ../libbrep/
+include ../mod/
exe{brep-clean}: {hxx ixx cxx}{* -clean-options} {hxx ixx cxx}{clean-options} \
- ../libbrep/lib{brep} $libs
+ ../mod/libue{mod} ../libbrep/lib{brep} $libs
+
+# Build options.
+#
+obj{clean}: cxx.poptions += -DBREP_COPYRIGHT=\"$copyright\"
# Generated options parser.
#
diff --git a/clean/clean.cli b/clean/clean.cli
index 434b32f..d3be4d6 100644
--- a/clean/clean.cli
+++ b/clean/clean.cli
@@ -1,5 +1,4 @@
// file : clean/clean.cli
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
include <vector>;
@@ -128,8 +127,8 @@ Fatal error.|
\li|\cb{2}
-An instance of \cb{brep-clean} or \l{brep-migrate(1)} is already running. Try
-again.|
+An instance of \cb{brep-clean} or some other \cb{brep} utility is already
+running. Try again.|
\li|\cb{3}
diff --git a/clean/clean.cxx b/clean/clean.cxx
index a48308b..828ae4b 100644
--- a/clean/clean.cxx
+++ b/clean/clean.cxx
@@ -1,5 +1,4 @@
// file : clean/clean.cxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
#include <map>
@@ -13,9 +12,7 @@
#include <odb/pgsql/database.hxx>
-#include <libbutl/pager.mxx>
-
-#include <libbbot/build-config.hxx>
+#include <libbutl/pager.hxx>
#include <libbrep/build.hxx>
#include <libbrep/build-odb.hxx>
@@ -25,10 +22,11 @@
#include <libbrep/build-package-odb.hxx>
#include <libbrep/database-lock.hxx>
+#include <mod/build-target-config.hxx>
+
#include <clean/clean-options.hxx>
using namespace std;
-using namespace bbot;
using namespace odb::core;
namespace brep
@@ -62,7 +60,7 @@ namespace brep
<< "libbbot " << LIBBBOT_VERSION_ID << endl
<< "libbpkg " << LIBBPKG_VERSION_ID << endl
<< "libbutl " << LIBBUTL_VERSION_ID << endl
- << "Copyright (c) 2014-2019 Code Synthesis Ltd" << endl
+ << "Copyright (c) " << BREP_COPYRIGHT << "." << endl
<< "This is free software released under the MIT license." << endl;
return 0;
@@ -112,8 +110,8 @@ namespace brep
ops.db_port (),
"options='-c default_transaction_isolation=serializable'");
- // Prevent several brep-clean/migrate instances from updating build
- // database simultaneously.
+ // Prevent several brep utility instances from updating the database
+ // simultaneously.
//
database_lock l (db);
@@ -206,12 +204,13 @@ namespace brep
return 1;
}
- set<string> configs;
+ // Load build target configurations.
+ //
+ build_target_configs configs;
try
{
- for (auto& c: parse_buildtab (cp))
- configs.emplace (move (c.name));
+ configs = bbot::parse_buildtab (cp);
}
catch (const io_error& e)
{
@@ -219,6 +218,13 @@ namespace brep
return 1;
}
+ // Note: contains shallow references to the configuration targets/names.
+ //
+ set<build_target_config_id> configs_set;
+
+ for (const build_target_config& c: configs)
+ configs_set.insert (build_target_config_id {c.target, c.name});
+
// Parse timestamps.
//
map<string, timestamp> timeouts; // Toolchain timeouts.
@@ -260,18 +266,26 @@ namespace brep
//
// Query package builds in chunks in order not to hold locks for too long.
// Sort the result by package version to minimize number of queries to the
- // package database.
+ // package database. Note that we still need to sort by configuration and
+ // toolchain to make sure that builds are sorted consistently across
+ // queries and we don't miss any of them.
//
using bld_query = query<build>;
using prep_bld_query = prepared_query<build>;
size_t offset (0);
bld_query bq ("ORDER BY" +
- bld_query::id.package.tenant + "," +
- bld_query::id.package.name +
+ bld_query::id.package.tenant + "," +
+ bld_query::id.package.name +
order_by_version_desc (bld_query::id.package.version,
- false) +
- "OFFSET" + bld_query::_ref (offset) + "LIMIT 100");
+ false) + "," +
+ bld_query::id.target + "," +
+ bld_query::id.target_config_name + "," +
+ bld_query::id.package_config_name + "," +
+ bld_query::id.toolchain_name +
+ order_by_version (bld_query::id.toolchain_version,
+ false /* first */) +
+ "OFFSET" + bld_query::_ref (offset) + "LIMIT 2000");
connection_ptr conn (db.connection ());
@@ -285,19 +299,19 @@ namespace brep
// be made once per tenant package name due to the builds query sorting
// criteria (see above).
//
- using pkg_query = query<buildable_package>;
- using prep_pkg_query = prepared_query<buildable_package>;
+ using pkg_query = query<build_package_version>;
+ using prep_pkg_query = prepared_query<build_package_version>;
string tnt;
package_name pkg_name;
set<version> package_versions;
- pkg_query pq (
- pkg_query::build_package::id.tenant == pkg_query::_ref (tnt) &&
- pkg_query::build_package::id.name == pkg_query::_ref (pkg_name));
+ pkg_query pq (pkg_query::buildable &&
+ pkg_query::id.tenant == pkg_query::_ref (tnt) &&
+ pkg_query::id.name == pkg_query::_ref (pkg_name));
prep_pkg_query pkg_prep_query (
- conn->prepare_query<buildable_package> ("package-query", pq));
+ conn->prepare_query<build_package_version> ("package-query", pq));
for (bool ne (true); ne; )
{
@@ -317,6 +331,17 @@ namespace brep
? i->second
: default_timeout);
+ // Note that we don't consider the case when both the configuration
+ // and the package still exist but the package now excludes the
+ // configuration (configuration is now of the legacy class instead
+ // of the default class, etc). Should we handle this case and
+ // re-implement in a way brep-monitor does it? Probably not since
+ // the described situation is not very common and storing some extra
+ // builds which sooner or later will be wiped out due to the timeout
+ // is harmless. The current implementation, however, is simpler and
+ // consumes less resources in runtime (doesn't load build package
+ // objects, etc).
+ //
bool cleanup (
// Check that the build is not stale.
//
@@ -327,7 +352,10 @@ namespace brep
// Note that we unable to detect configuration changes and rely on
// periodic rebuilds to take care of that.
//
- configs.find (b.configuration) == configs.end ());
+ configs_set.find (
+ build_target_config_id {b.target,
+ b.target_config_name}) ==
+ configs_set.end ());
// Check that the build package still exists.
//
@@ -344,7 +372,7 @@ namespace brep
}
cleanup = package_versions.find (b.package_version) ==
- package_versions.end ();
+ package_versions.end ();
}
if (cleanup)
@@ -452,8 +480,8 @@ namespace brep
auto tenant_ids (pq.execute ());
if ((ne = !tenant_ids.empty ()))
{
- // Cache tenant ids and erase packages, repositories, and tenants at
- // once.
+ // Cache tenant ids and erase packages, repositories, public keys, and
+ // tenants at once.
//
strings tids;
tids.reserve (tenant_ids.size ());
@@ -469,6 +497,9 @@ namespace brep
db.erase_query<repository> (
query<repository>::id.tenant.in_range (tids.begin (), tids.end ()));
+ db.erase_query<public_key> (
+ query<public_key>::id.tenant.in_range (tids.begin (), tids.end ()));
+
db.erase_query<tenant> (
query<tenant>::id.in_range (tids.begin (), tids.end ()));
}
diff --git a/doc/buildfile b/doc/buildfile
index 4b2305e..f0a9387 100644
--- a/doc/buildfile
+++ b/doc/buildfile
@@ -1,11 +1,11 @@
# file : doc/buildfile
-# copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
# license : MIT; see accompanying LICENSE file
-cmds = \
-brep-clean \
-brep-load \
-brep-migrate
+cmds = \
+brep-clean \
+brep-load \
+brep-migrate \
+brep-monitor
./: {man1 xhtml}{$cmds} \
css{common pre-box man} \
diff --git a/doc/cli.sh b/doc/cli.sh
index 264b299..3c23f49 100755
--- a/doc/cli.sh
+++ b/doc/cli.sh
@@ -1,7 +1,6 @@
#! /usr/bin/env bash
-version=0.13.0-a.0.z
-date="$(date +"%B %Y")"
+version=0.17.0-a.0.z
trap 'exit 1' ERR
set -o errtrace # Trap in functions.
@@ -9,6 +8,9 @@ set -o errtrace # Trap in functions.
function info () { echo "$*" 1>&2; }
function error () { info "$*"; exit 1; }
+date="$(date +"%B %Y")"
+copyright="$(sed -n -re 's%^Copyright \(c\) (.+) \(see the AUTHORS and LEGAL files\)\.$%\1%p' ../LICENSE)"
+
while [ $# -gt 0 ]; do
case $1 in
--clean)
@@ -36,18 +38,31 @@ function compile ()
shift
done
- cli -I .. -v project="brep" -v version="$version" -v date="$date" \
---include-base-last "${o[@]}" --generate-html --html-prologue-file \
-man-prologue.xhtml --html-epilogue-file man-epilogue.xhtml --html-suffix .xhtml \
+ cli -I .. \
+-v project="brep" \
+-v version="$version" \
+-v date="$date" \
+-v copyright="$copyright" \
+--include-base-last "${o[@]}" \
+--generate-html --html-suffix .xhtml \
+--html-prologue-file man-prologue.xhtml \
+--html-epilogue-file man-epilogue.xhtml \
--link-regex '%bpkg([-.].+)%../../bpkg/doc/bpkg$1%' \
--link-regex '%brep(#.+)?%build2-repository-interface-manual.xhtml$1%' \
../$n.cli
- cli -I .. -v project="brep" -v version="$version" -v date="$date" \
---include-base-last "${o[@]}" --generate-man --man-prologue-file \
-man-prologue.1 --man-epilogue-file man-epilogue.1 --man-suffix .1 \
+ cli -I .. \
+-v project="brep" \
+-v version="$version" \
+-v date="$date" \
+-v copyright="$copyright" \
+--include-base-last "${o[@]}" \
+--generate-man --man-suffix .1 \
+--man-prologue-file man-prologue.1 \
+--man-epilogue-file man-epilogue.1 \
--link-regex '%bpkg(#.+)?%$1%' \
--link-regex '%brep(#.+)?%$1%' \
+--link-regex '%bbot(#.+)?%$1%' \
../$n.cli
}
@@ -57,7 +72,7 @@ o="--output-prefix brep-"
#
#compile "brep" $o --output-prefix ""
-pages="clean/clean load/load migrate/migrate"
+pages="clean/clean load/load migrate/migrate monitor/monitor"
for p in $pages; do
compile $p $o
@@ -79,13 +94,16 @@ function xhtml_to_ps () # <from> <to> [<html2ps-options>]
cli -I .. \
-v version="$(echo "$version" | sed -e 's/^\([^.]*\.[^.]*\).*/\1/')" \
-v date="$date" \
+-v copyright="$copyright" \
--generate-html --html-suffix .xhtml \
--html-prologue-file doc-prologue.xhtml \
--html-epilogue-file doc-epilogue.xhtml \
--link-regex '%b([-.].+)%../../build2/doc/b$1%' \
--link-regex '%bpkg([-.].+)%../../bpkg/doc/bpkg$1%' \
--link-regex '%bpkg(#.+)?%../../bpkg/doc/build2-package-manager-manual.xhtml$1%' \
---output-prefix build2-repository-interface- manual.cli
+--link-regex '%bbot(#.+)?%../../bbot/doc/build2-build-bot-manual.xhtml$1%' \
+--output-prefix build2-repository-interface- \
+manual.cli
xhtml_to_ps build2-repository-interface-manual.xhtml build2-repository-interface-manual-a4.ps -f doc.html2ps:a4.html2ps
ps2pdf14 -sPAPERSIZE=a4 -dOptimize=true -dEmbedAllFonts=true build2-repository-interface-manual-a4.ps build2-repository-interface-manual-a4.pdf
diff --git a/doc/manual.cli b/doc/manual.cli
index 61ef1f8..2b96393 100644
--- a/doc/manual.cli
+++ b/doc/manual.cli
@@ -1,5 +1,4 @@
// file : doc/manual.cli
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
"\name=build2-repository-interface-manual"
@@ -16,7 +15,8 @@
This document describes \c{brep}, the \c{build2} package repository web
interface. For the command line interface of \c{brep} utilities refer to the
-\l{brep-load(1)}, \l{brep-clean(1)}, and \l{brep-migrate(1)} man pages.
+\l{brep-load(1)}, \l{brep-clean(1)}, \l{brep-migrate(1)}, and
+\l{brep-monitor(1)} man pages.
\h1#submit|Package Submission|
@@ -51,8 +51,8 @@ binary mode.|
\li|Verify other parameters are valid manifest name/value pairs.
-The value can only contain printable ASCII characters as well as tab
-(\c{\\t}), carriage return (\c{\\r}), and line feed (\c{\\n}).|
+The value can only contain UTF-8 encoded Unicode graphic characters as well as
+tab (\c{\\t}), carriage return (\c{\\r}), and line feed (\c{\\n}).|
\li|Check for a duplicate submission.
@@ -121,7 +121,6 @@ reference: <abbrev-checksum>
|
-
\li|Send the submission email.
If \c{submit-email} is configured, send an email to this address containing
@@ -228,8 +227,8 @@ upload.|
\li|Verify other parameters are valid manifest name/value pairs.
-The value can only contain printable ASCII characters as well as tab
-(\c{\\t}), carriage return (\c{\\r}), and line feed (\c{\\n}).|
+The value can only contain UTF-8 encoded Unicode graphic characters as well as
+tab (\c{\\t}), carriage return (\c{\\r}), and line feed (\c{\\n}).|
\li|Generate CI request id and create request directory.
@@ -306,6 +305,15 @@ Check violations that are explicitly mentioned above are always reported with
the CI result manifest. Other errors (for example, internal server errors)
might be reported with unformatted text, including HTML.
+If the CI request contains the \c{interactive} parameter, then the CI service
+provides the execution environment login information for each test and stops
+them at the specified breakpoint.
+
+Pre-defined breakpoint ids are \c{error} and \c{warning}. The breakpoint id is
+included into the CI request manifest and the CI service must at least handle
+\c{error} but may recognize additional ids (build phase/command identifiers,
+etc).
+
If the CI request contains the \c{simulate} parameter, then the CI service
simulates the specified outcome of the CI process without actually performing
any externally visible actions (e.g., testing the package, publishing the
@@ -328,16 +336,28 @@ corresponding to the custom request parameters.
id: <request-id>
repository: <url>
[package]: <name>[/<version>]
-timestamp: <date-time>
+[interactive]: <breakpoint>
[simulate]: <outcome>
+timestamp: <date-time>
[client-ip]: <string>
[user-agent]: <string>
+[service-id]: <string>
+[service-type]: <string>
+[service-data]: <string>
\
The \c{package} value can be repeated multiple times. The \c{timestamp} value
is in the ISO-8601 \c{<YYYY>-<MM>-<DD>T<hh>:<mm>:<ss>Z} form (always
UTC). Note also that \c{client-ip} can be IPv4 or IPv6.
+Note that some CI service implementations may serve as backends for
+third-party services. The latter may initiate CI tasks, providing all the
+required information via some custom protocol, and expect the CI service to
+notify it about the progress. In this case the third-party service type as
+well as optionally the third-party id and custom state data can be
+communicated to the underlying CI handler program via the respective
+\c{service-*} manifest values.
+
\h#ci-overrides-manifest|CI Overrides Manifest|
@@ -349,8 +369,30 @@ being applied. Currently, only the following value groups can be overridden:
\
build-email build-{warning,error}-email
builds build-{include,exclude}
+*-builds *-build-{include,exclude}
+*-build-config
\
+For the package configuration-specific build constraint overrides the
+corresponding configuration must exist in the package manifest. In contrast,
+the package configuration override (\cb{*-build-config}) adds a new
+configuration if it doesn't exist and updates the arguments of the existing
+configuration otherwise. In the former case, all the potential build
+constraint overrides for such a newly added configuration must follow the
+corresponding \cb{*-build-config} override.
+
+Note that the build constraints group values (both common and build package
+configuration-specific) are overridden hierarchically so that the
+\c{[\b{*-}]\b{build-}{\b{include},\b{exclude}\}} overrides don't affect the
+respective \c{[\b{*-}]\b{builds}} values.
+
+Note also that the common and build package configuration-specific build
+constraints group value overrides are mutually exclusive. If the common build
+constraints are overridden, then all the configuration-specific constraints
+are removed. Otherwise, if any configuration-specific constraints are
+overridden, then for the remaining configurations the build constraints are
+reset to \cb{builds:\ none}.
+
See \l{bpkg#manifest-package Package Manifest} for details on these values.
@@ -368,4 +410,182 @@ message: <string>
[reference]: <string>
\
+
+\h1#upload|Build Artifacts Upload|
+
+The build artifacts upload functionality allows uploading archives of files
+generated as a byproduct of the package builds. Such archives as well as
+additional, repository-specific information can optionally be uploaded by the
+automated build bots via the HTTP \c{POST} method using the
+\c{multipart/form-data} content type (see the \l{bbot \c{bbot} documentation}
+for details). The implementation in \c{brep} only handles uploading as well as
+basic actions and verification (build session resolution, agent
+authentication, checksum verification) expecting the rest of the upload logic
+to be handled by a separate entity according to the repository policy. Such an
+entity can be notified by \c{brep} about a new upload as an invocation of the
+\i{handler program} (as part of the HTTP request) and/or via email. It could
+also be a separate process that monitors the upload data directory.
+
+For each upload request \c{brep} performs the following steps.
+
+\ol|
+
+\li|Determine upload type.
+
+The upload type must be passed via the \c{upload} parameter in the query
+component of the request URL.|
+
+\li|Verify upload size limit.
+
+The upload form-data payload size must not exceed \c{upload-max-size} specific
+for this upload type.|
+
+\li|Verify the required \c{session}, \c{instance}, \c{archive}, and
+\c{sha256sum} parameters are present. If \c{brep} is configured to perform
+agent authentication, then verify that the \c{challenge} parameter is also
+present. See the \l{bbot#arch-result-req Result Request Manifest} for
+semantics of the \c{session} and \c{challenge} parameters.
+
+The \c{archive} parameter must be the build artifacts archive upload while
+\c{sha256sum} must be its 64 characters SHA256 checksum calculated in the
+binary mode.|
+
+\li|Verify other parameters are valid manifest name/value pairs.
+
+The value can only contain UTF-8 encoded Unicode graphic characters as well as
+tab (\c{\\t}), carriage return (\c{\\r}), and line feed (\c{\\n}).|
+
+\li|Resolve the session.
+
+Resolve the \c{session} parameter value to the actual package build
+information.|
+
+\li| Authenticate the build bot agent.
+
+Use the \c{challenge} parameter value and the resolved package build
+information to authenticate the agent, if configured to do so.|
+
+\li|Generate upload request id and create request directory.
+
+For each upload request a unique id (UUID) is generated and a request
+subdirectory is created in the \c{upload-data} directory with this id as its
+name.|
+
+\li|Save the upload archive into the request directory and verify its
+checksum.
+
+The archive is saved using the submitted name, and its checksum is calculated
+and compared to the submitted checksum.|
+
+\li|Save the upload request manifest into the request directory.
+
+The upload request manifest is saved as \c{request.manifest} into the request
+subdirectory next to the archive.|
+
+\li|Invoke the upload handler program.
+
+If \c{upload-handler} is configured, invoke the handler program passing to it
+additional arguments specified with \c{upload-handler-argument} (if any)
+followed by the absolute path to the upload request directory.
+
+The handler program is expected to write the upload result manifest to
+\c{stdout} and terminate with the zero exit status. A non-zero exit status is
+treated as an internal error. The handler program's \c{stderr} is logged.
+
+Note that the handler program should report temporary server errors (service
+overload, network connectivity loss, etc.) via the upload result manifest
+status values in the [500-599] range (HTTP server error) rather than via a
+non-zero exit status.
+
+The handler program assumes ownership of the upload request directory and can
+move/remove it. If after the handler program terminates the request directory
+still exists, then it is handled by \c{brep} depending on the handler process
+exit status and the upload result manifest status value. If the process has
+terminated abnormally or with a non-zero exit status or the result manifest
+status is in the [500-599] range (HTTP server error), then the directory is
+saved for troubleshooting by appending the \c{.fail} extension to its name.
+Otherwise, if the status is in the [400-499] range (HTTP client error), then
+the directory is removed. If the directory is left in place by the handler or
+is saved for troubleshooting, then the upload result manifest is saved as
+\c{result.manifest} into this directory, next to the request manifest.
+
+If \c{upload-handler-timeout} is configured and the handler program does not
+exit in the allotted time, then it is killed and its termination is treated as
+abnormal.
+
+If the handler program is not specified, then the following upload result
+manifest is implied:
+
+\
+status: 200
+message: <upload-type> upload is queued
+reference: <request-id>
+\
+
+|
+
+\li|Send the upload email.
+
+If \c{upload-email} is configured, send an email to this address containing
+the upload request manifest and the upload result manifest.|
+
+\li|Respond to the client.
+
+Respond to the client with the upload result manifest and its \c{status} value
+as the HTTP status code.|
+
+|
+
+Check violations (max size, etc) that are explicitly mentioned above are
+always reported with the upload result manifest. Other errors (for example,
+internal server errors) might be reported with unformatted text, including
+HTML.
+
+
+\h#upload-request-manifest|Upload Request Manifest|
+
+The upload request manifest starts with the below values and in that order
+optionally followed by additional values in the unspecified order
+corresponding to the custom request parameters.
+
+\
+id: <request-id>
+session: <session-id>
+instance: <name>
+archive: <name>
+sha256sum: <sum>
+timestamp: <date-time>
+
+name: <name>
+version: <version>
+project: <name>
+target-config: <name>
+package-config: <name>
+target: <target-triplet>
+[tenant]: <tenant-id>
+toolchain-name: <name>
+toolchain-version: <standard-version>
+repository-name: <canonical-name>
+machine-name: <name>
+machine-summary: <text>
+\
+
+The \c{timestamp} value is in the ISO-8601
+\c{<YYYY>-<MM>-<DD>T<hh>:<mm>:<ss>Z} form (always UTC).
+
+
+\h#upload-result-manifest|Upload Result Manifest|
+
+The upload result manifest starts with the below values and in that order
+optionally followed by additional values if returned by the handler program.
+If the upload request is successful, then the \c{reference} value must be
+present and contain a string that can be used to identify this request (for
+example, the upload request id).
+
+\
+status: <http-code>
+message: <string>
+[reference]: <string>
+\
+
"
diff --git a/doc/style b/doc/style
-Subproject 7e75bb936cf5b2bd2fa3344e13b6c486c8ecc8a
+Subproject b72eb624d13b1628e27e9f6c0b3c80853e8e015
diff --git a/etc/brep-module.conf b/etc/brep-module.conf
index 458261e..d5a5e78 100644
--- a/etc/brep-module.conf
+++ b/etc/brep-module.conf
@@ -3,12 +3,24 @@
# brep-). See brep(1) for detailed description of each configuration option.
# Commented out options indicate their default values.
#
+# Besides being parsed by the brep module, this file may also be parsed by
+# brep utilities that are normally only interested in the subset of the
+# options. To simplify skipping of unrecognized, this file must always have an
+# option name and its value on the same line.
+#
# Package search page title. It is placed inside XHTML5 <title> element.
#
# search-title Packages
+# Package search page description. If specified, it is displayed before the
+# search form on the first page only. The value is treated as an XHTML5
+# fragment.
+#
+# search-description ""
+
+
# Web page logo. It is displayed in the page header aligned to the left edge.
# The value is treated as an XHTML5 fragment.
#
@@ -107,6 +119,25 @@ menu About=?about
# build-bot-agent-keys
+# Regular expressions in the /<regex>/<replacement>/ form for transforming the
+# interactive build login information, for example, into the actual command
+# that can be used by the user. The regular expressions are matched against
+# the "<agent> <interactive-login>" string containing the respective task
+# request manifest values. The first matching expression is used for the
+# transformation. If no expression matches, then the task request is
+# considered invalid, unless no expressions are specified. Repeat this option
+# to specify multiple expressions.
+#
+# build-interactive-login
+
+
+# Order in which packages are considered for build. The valid values are
+# 'stable' and 'random'. If not specified, then 'stable' is assumed. Note that
+# interactive builds are always preferred.
+#
+#build-package-order stable
+
+
# Number of builds per page.
#
# build-page-entries 20
@@ -123,10 +154,66 @@ menu About=?about
# build-forced-rebuild-timeout 600
-# Time to wait before considering a package for a normal rebuild. Must be
-# specified in seconds. Default is 24 hours.
+# Time to wait before considering a package for a soft rebuild (only to be
+# performed if the build environment or any of the package dependencies have
+# changed). Must be specified in seconds. The special zero value disables soft
+# rebuilds. Default is 24 hours.
+#
+# build-soft-rebuild-timeout 86400
+
+
+# Alternative package soft rebuild timeout to use instead of the soft rebuild
+# timeout (see the build-soft-rebuild-timeout option for details) during the
+# specified time interval. Must be specified in seconds. Default is the time
+# interval length plus (build-soft-rebuild-timeout - 24h) if soft rebuild
+# timeout is greater than 24 hours (thus the rebuild is only triggered within
+# the last 24 hours of the build-soft-rebuild-timeout expiration).
#
-# build-normal-rebuild-timeout 86400
+# The alternative rebuild timeout can be used to "pull" the rebuild window to
+# the specified time of day, for example, to optimize load and/or power
+# consumption of the build infrastructure (off-work hours, solar, off-peak
+# electricity tariffs, etc). A shorter than the time interval rebuild timeout
+# can also be used to force continuous rebuilds, for example, to shake out
+# flaky tests. Note also that if the alternative rebuild timeout is greater
+# than the normal rebuild timeout, then this will result in slower rebuilds
+# during the alternative time interval. In this case, if the build
+# infrastructure is monitored for delayed package builds, then the alternative
+# rebuild timeout should only be made slightly greater than the normal timeout
+# (see brep-monitor(1) for details).
+#
+# The time interval boundaries must be specified as times of day (in the local
+# timezone) in the <hours>:<minutes> form. If the stop time is less than the
+# start time then the interval extends through midnight. The start and stop
+# times must both be either specified or absent. If unspecified, then no
+# alternative rebuild timeout will be used.
+#
+# build-alt-soft-rebuild-timeout
+# build-alt-soft-rebuild-start
+# build-alt-soft-rebuild-stop
+
+
+# Time to wait before considering a package for a hard rebuild (to be
+# performed unconditionally). Must be specified in seconds. The special zero
+# value disables hard rebuilds. Default is 7 days.
+#
+# build-hard-rebuild-timeout 604800
+
+
+# Alternative package hard rebuild timeout. The semantics is the same as for
+# the build-alt-soft-rebuild-* options but for the build-hard-rebuild-timeout
+# option.
+#
+# build-alt-hard-rebuild-timeout
+# build-alt-hard-rebuild-start
+# build-alt-hard-rebuild-stop
+
+
+# Time to wait before assuming the 'queued' notifications are delivered for
+# package CI requests submitted via third-party services (GitHub, etc). During
+# this time a package is not considered for a build. Must be specified in
+# seconds. Default is 30 seconds.
+#
+# build-queued-timeout 30
# The maximum size of the build task request manifest accepted. Note that the
@@ -150,6 +237,19 @@ menu About=?about
# build-result-request-max-size 10485760
+# Enable or disable package build notification emails in the <name>=<mode>
+# form. The valid <mode> values are 'none', 'latest', and 'all'. If 'all' is
+# specified for a toolchain name, then emails are sent according to the
+# build-*email package manifest values when all versions of a package are
+# built with this toolchain. If 'latest' is specified, then for this toolchain
+# name the emails are only sent for the latest version of a package. If 'none'
+# is specified, then no emails are sent for this toolchain name. By default
+# the 'latest' mode is assumed. Repeat this option to enable/disable emails
+# for multiple toolchains.
+#
+# build-toolchain-email <toolchain-name>=latest|none|all
+
+
# The build database connection configuration. By default, brep will try to
# connect to the local instance of PostgreSQL with the operating system-default
# mechanism (Unix-domain socket, etc) and use operating system (login) user
@@ -178,6 +278,25 @@ menu About=?about
# build-db-retry 10
+# The root directory where the uploaded binary distribution packages are
+# saved to under the following directory hierarchy:
+#
+# [<tenant>/]<distribution>/<os-release>/<project>/<package>/<version>/<package-config>
+#
+# The package configuration directory symlinks that match these paths are
+# mapped to web URLs based on the bindist-url value and displayed on the
+# package version details page. If this option is specified, then bindist-url
+# must be specified as well."
+#
+# bindist-root
+
+
+# The root URL of the directory specified with the bindist-root option. This
+# option must be specified if bindist-root is specified.
+#
+# bindist-url
+
+
# The openssl program to be used for crypto operations. You can also specify
# additional options that should be passed to the openssl program with
# openssl-option. If the openssl program is not explicitly specified, then brep
@@ -254,10 +373,9 @@ menu About=?about
# The handler program to be executed on package submission. The handler is
-# executed as part of the submission request and is passed additional
-# arguments that can be specified with submit-handler-argument followed by
-# the absolute path to the submission directory. Note that the program path
-# must be absolute.
+# executed as part of the HTTP request and is passed additional arguments that
+# can be specified with submit-handler-argument followed by the absolute path
+# to the submission directory. Note that the program path must be absolute.
#
# submit-handler
@@ -321,6 +439,66 @@ menu About=?about
# ci-handler-timeout
+# The directory to save upload data to for the specified upload type. If
+# unspecified, the build artifacts upload functionality will be disabled for
+# this type.
+#
+# Note that the directory path must be absolute and the directory itself must
+# exist and have read, write, and execute permissions granted to the user that
+# runs the web server.
+#
+# upload-data <type>=<dir>
+
+
+# The maximum size of the upload data accepted for the specified upload type.
+# Note that currently the entire upload request is read into memory. The
+# default is 10M.
+#
+# upload-max-size <type>=10485760
+
+
+# The build artifacts upload email. If specified, the upload request and
+# result manifests will be sent to this address.
+#
+# upload-email <type>=<email>
+
+
+# The handler program to be executed on build artifacts upload of the
+# specified type. The handler is executed as part of the HTTP request and is
+# passed additional arguments that can be specified with
+# upload-handler-argument followed by the absolute path to the upload
+# directory (upload-data). Note that the program path must be absolute.
+#
+# upload-handler <type>=<path>
+
+
+# Additional arguments to be passed to the upload handler program for the
+# specified upload type (see upload-handler for details). Repeat this option
+# to specify multiple arguments.
+#
+# upload-handler-argument <type>=<arg>
+
+
+# The upload handler program timeout in seconds for the specified upload type.
+# If specified and the handler does not exit in the allotted time, then it is
+# killed and its termination is treated as abnormal.
+#
+# upload-handler-timeout <type>=<seconds>
+
+
+# Disable upload of the specified type for the specified toolchain name.
+# Repeat this option to disable uploads for multiple toolchains.
+#
+# upload-toolchain-exclude <type>=<name>
+
+
+# Disable upload of the specified type for packages from the repository with
+# the specified canonical name. Repeat this option to disable uploads for
+# multiple repositories.
+#
+# upload-repository-exclude <type>=<name>
+
+
# The default view to display for the global repository root. The value is one
# of the supported services (packages, builds, submit, ci, etc). Default is
# packages.
diff --git a/etc/buildfile b/etc/buildfile
index 8518b2b..f3157f2 100644
--- a/etc/buildfile
+++ b/etc/buildfile
@@ -1,14 +1,30 @@
# file : etc/buildfile
-# copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
# license : MIT; see accompanying LICENSE file
./: file{** -buildfile}
-# Install into the etc/ subdirectory of, say, /usr/share/
-# recreating subdirectories.
+# Install into the etc/ subdirectory of, say, /usr/share/ recreating
+# subdirectories.
#
*:
{
install = data/etc/
install.subdirs = true
}
+
+# Keep the executable permission for the installed script files.
+#
+private/
+{
+ file{vm-start}@./ \
+ file{vm-start-base}@./ \
+ file{vm-login}@./ \
+ file{vm-stop}@./: install.mode=755
+
+ install/
+ {
+ file{brep-install}@./ \
+ file{brep-startup}@./ \
+ file{vm-gen-service}@./ : install.mode=755
+ }
+}
diff --git a/etc/private/README b/etc/private/README
new file mode 100644
index 0000000..d9a702e
--- /dev/null
+++ b/etc/private/README
@@ -0,0 +1,335 @@
+This directory contains a virtual machine (VM) with a build2 repository web
+interface (brep) installed and configured for a private package repository.
+It also includes a number of scripts and configuration files for running
+this VM as a systemd service.
+
+A brep installation consists of a web server (Apache2), a database server
+(PostgreSQL), and a number of auxiliary processes (repository loader,
+submission handler, etc). While all this can be installed and configured
+manually (as described in brep/INSTALL), this VM has everything pre-installed
+and pre-configured which makes it possible to quickly get a private
+repository up and running.
+
+Note that the configuration offered by this VM is only suitable for a
+private/trusted environment, normally either for personal use (host-local)
+or for use within an organization's private network. Specifically:
+
+ - The repository is accessed via HTTP.
+
+ - The repository is not signed.
+
+ - Submitted packages are published directly and without ownership
+ authentication.
+
+ - The VM does not auto-update (since it does not assume the presence of an
+ Internet connection) and therefore may not have the latest security
+ patches.
+
+The below setup instructions are for host machines that run systemd-based
+Linux distributions. Note, however, that it should be possible to use other
+distributions or operating systems provided they are capable of running
+QEMU/KVM virtual machines. The following utilities are expected to be
+available on the host machine:
+
+ - systemd >= 229 (systemd --version)
+ - bash >= 4.3 (bash --version)
+ - qemu >= 2.5.0 (qemu-system-x86_64 --version)
+ - screen, socat (screen --version, socat -V)
+
+Consult your distribution's package manager if any of these utilities are
+missing.
+
+The host machine is also expected to have KVM virtualization support as
+well as at least 1G or RAM (2G recommended) and at least 5G of disk space
+(4G for VM image and the rest for package storage) that can be dedicated
+to the VM.
+
+Commands shown in this guide use several prompts with the following meaning:
+
+ # -- must be executed as root on the host machine
+ $ -- must be execute as user brep on the host machine
+ > -- can be executed for testing on any other machine with build2 installed
+
+
+1. Create the brep user and group
+---------------------------------
+
+In this setup, the VM image, scripts, etc., as well as the repository packages
+are all kept in the home directory of the special user brep. In particular,
+the packages are stored on the host machine (as opposed to inside the VM
+image) and are shared with the VM (using the virtio-9p filesystem). As a
+result, if necessary, you can manipulate the package repository from the host
+machine (but see Step 6 below for the rules). This setup also makes it easier
+to upgrade VM images by simply replacing the old image with a new (see Step 7
+below for details).
+
+However, to make this arrangement work reliably, the brep user/group IDs on
+the host machine must match those inside the VM. As a result, we create the
+brep user/group with specific IDs:
+
+# groupadd --gid 63700 brep
+# useradd --uid 63700 --gid 63700 --create-home brep
+# usermod --lock brep # disable password login (if desired)
+
+Additionally, if your distribution requires users that are allowed to use KVM
+to belong to a special group (normally kvm), then add the brep user to this
+group:
+
+# usermod -G kvm brep
+
+If unsure whether this is required, skip this step and come back to it if you
+get the 'KVM: permission denied' error on Step 4.
+
+
+2. Download and unpack the VM archive into the brep user's home directory
+-------------------------------------------------------------------------
+
+# su - brep
+$ pwd
+/home/brep
+
+$ curl -fO https://download.build2.org/X.Y.Z/linux-debian-N-brep-X.Y.Z.tar.xz
+$ sha256sum -b linux-debian-N-brep-X.Y.Z.tar.xz
+
+Verify the checksum matches the one from https://build2.org/download.xhtml
+
+$ tar -xf linux-debian-N-brep-X.Y.Z.tar.xz --strip-components=1
+$ ls
+bin/ etc/ vm/ vm-brep@.service README NEWS
+
+
+3. Configure networking for the VM
+----------------------------------
+
+This setup expects the VM to use bridged networking with a persistent tap
+interface. This allows for a wide variety of configurations ranging between
+host-local (private bridge without routing), subnet (private bridge with
+routing/NAT), and local area network (public bridge over host's Ethernet
+adapter). In particular, the last configuration would make the repository
+accessible from other machines on the same local network.
+
+The exact steps on how to setup bridged networking and create a persistent tap
+interface depend the network manager used thus consult your distribution's
+documentation for details. The guide found in etc/systemd-networkd/README
+shows how to setup the local area network configuration mentioned above using
+the systemd-networkd network manager available on most systemd-based
+distributions.
+
+In the rest of this guide we assume that tap interface called tap0 is
+appropriately configured and is owned by user/group brep.
+
+
+4. Generate a MAC address and start the VM for testing
+------------------------------------------------------
+
+The recommended way to obtain a MAC address for the VM is to generate it based
+on the address of the host's Ethernet adapter (see inside vm-gen-macaddress
+for details):
+
+$ ~/bin/vm-gen-macaddress xx:yy:yy:yy:yy:yy 0
+
+Where xx:yy:yy:yy:yy:yy is the MAC address of the host's Ethernet adapter
+which can can be viewed with the following command:
+
+# ip link show
+
+The address printed by vm-gen-macaddress will be in the 02:yy:yy:yy:yy:yy
+form.
+
+If you are using a local network configuration, then now is a good time to
+assign the VM its IP address and hostname. If you need to submit a request to
+your network administrator, then the following text could serve as a template:
+
+"I would like to run a VM on the <host> machine that needs to have its own IP
+ address and domain name (configured via DHCP). It will have fixed MAC address
+ <mac> (which was derived from <host>'s physical Ethernet address; but you are
+ welcome to assign a different MAC address if required). The DHCP client ID is
+ the same as the MAC address. I would like this machine to have the <vm> name
+ if possible.
+
+ FYI, this is a QEMU/KVM virtual machine running as a systemd service. It
+ will use bridged networking with a tap interface."
+
+Where:
+
+ <host> host machine's name, for example, myserver.lan (run hostname -f)
+ <mac> the generated mac address (02:yy:yy:yy:yy:yy)
+ <vm> VM machine's name, for example, mybrep.lan
+
+Note that the VM is configured to receive its hostname from DHCP server (the
+DHCP protocol option 12, "Host Name"). Failed that, the repository URL will
+use the IP address.
+
+Next, create the package repository directory and start the VM for testing
+(replace 02:yy:yy:yy:yy:yy with the actual MAC address):
+
+$ mkdir -p state/bpkg
+$ ~/bin/vm-start --stdio --tap tap0 --mac 02:yy:yy:yy:yy:yy vm/brep.img
+
+After booting, you will be presented with a login. Login as root with password
+123 (VM command prompts are shown indented with two spaces). Then verify IP
+address, hostname, and the network functionality:
+
+ # ip addr show
+ # hostname -f
+ # ping example.org
+
+If everything appears correct, visit the repository web page with a browser
+(for example, http://mybrep.lan). Check the About page to verify the
+repository URL matches the hostname or IP address.
+
+Try to submit a package (for example, from your development machine):
+
+> bdep new hello
+> cd hello
+> git add . && git commit -m test
+> bdep init -C @test cc
+> bdep publish --control=none --repository http://mybrep.lan --force=snapshot
+
+Visit the repository web page and confirm the package is there. Then try to
+consume the submitted package from the repository:
+
+> bpkg create -d test
+> bpkg build -d test hello@http://mybrep.lan/1
+
+If everything is working fine, shut the VM down:
+
+ # shutdown -h now
+
+
+5. Setup the VM to run as a systemd service
+-------------------------------------------
+
+To start the VM as a systemd service on host boot, perform the following
+steps.
+
+First, create the VM configuration file (replace 02:yy:yy:yy:yy:yy with the
+actual MAC address)::
+
+$ cat <<EOF >vm/brep.conf
+RAM=2G
+CPU=1
+TAP=tap0
+MAC=02:yy:yy:yy:yy:yy
+EOF
+
+Then configure the systemd service:
+
+# cp ~brep/vm-brep@.service /etc/systemd/system/
+# chmod 644 /etc/systemd/system/vm-brep@.service
+# systemctl status vm-brep@brep
+# systemctl start vm-brep@brep
+# systemctl status vm-brep@brep
+
+If the VM fails to start, study the logs for a possible cause:
+
+# journalctl -u vm-brep@brep
+
+If the VM has started successfully, perform the same verifications as on Step
+4 above.
+
+To login to the VM running as a systemd service (for example, to verify IP and
+hostname) use the vm-login script (which uses screen(1) to connect to the VM's
+console):
+
+$ ~/bin/vm-login ~/brep-con.sock
+
+Note that the screen may be blank (due to this being a serial console). In
+this case, press Enter to see the login. To close the login, press 'Ctrl-a k'
+(or 'Ctrl-a a k' if already running inside screen).
+
+If everything functions correctly, verify the VM can be stopped:
+
+# systemctl stop vm-brep@brep
+# systemctl status vm-brep@brep
+
+Finally, if desired, enable the VM service to start on boot:
+
+# systemctl enable vm-brep@brep
+
+After this you may also want to reboot the host machine and confirm the VM is
+started on boot.
+
+
+6. Manage the repository state
+------------------------------
+
+While you can submit packages to the repository using bdep-publish(1), they
+can also be added them manually. Also, currently, packages can only be removed
+manually.
+
+The repository packages and metadata are stored in the ~brep/state/bpkg/pkg/
+directory. If you need to make any modifications in this directory, there are
+two rules that you must follow:
+
+ 1. You must stop the VM before making any modifications.
+
+ 2. You must make any modification only as user brep.
+
+After performing the modifications, remove the 1/packages.manifest file to
+trigger the repository metadata regeneration on the next VM startup. You can
+also customize the repositories.manifest file in the same way. For example,
+you could add cppget.org as a prerequisite repository for your private
+repository.
+
+Putting it all together, the steps could look like this:
+
+# systemctl stop vm-brep@brep
+# su - brep
+$ cd state/bpkg/pkg/1
+$ <make your changes here>
+$ rm packages.manifest
+$ exit
+# systemctl start vm-brep@brep
+
+Note also that it's easy to break the repository with manual modifications.
+For example, you may add a package that has an unmet dependency or remove a
+package that still has some dependents. In this case, the brep service inside
+the VM will fail to start and the repository web interface will be
+unavailable. In this case, you can login into the VM to investigate:
+
+$ ~/bin/vm-login ~/brep-con.sock
+
+ # systemctl status brep-startup
+ # journalctl -u brep-startup
+
+
+7. Upgrade the VM
+-----------------
+
+To upgrade to the new version of the VM, first download and unpack the new
+VM archive similar to Step 2:
+
+$ curl -fO https://download.build2.org/X.Y.Z/linux-debian-N-brep-X.Y.Z.tar.xz
+$ sha256sum -b linux-debian-N-brep-X.Y.Z.tar.xz
+
+Verify the checksum matches the one from https://build2.org/download.xhtml
+
+$ tar -xf linux-debian-N-brep-X.Y.Z.tar.xz
+
+Next read the linux-debian-N-brep-X.Y.Z/NEWS file for changes and potential
+backwards compatibility issues. Unless instructed otherwise by the NEWS file,
+the upgrade procedure is as follows:
+
+# systemctl stop vm-brep@brep
+
+$ cd
+$ mkdir bak
+$ mv -t bak/ bin etc vm vm-brep@.service README NEWS
+$ mv -t ./ linux-debian-N-brep-X.Y.Z/*
+$ cp bak/vm/brep.conf vm/
+$ rm state/bpkg/pkg/1/packages.manifest
+
+# cp ~brep/vm-brep@.service /etc/systemd/system/
+# chmod 644 /etc/systemd/system/vm-brep@.service
+# systemctl daemon-reload
+# systemctl start vm-brep@brep
+# systemctl status vm-brep@brep
+
+If the VM has started successfully, perform the same verifications as on Step
+4 above. If everything is functioning correctly, you can remove the backup
+files:
+
+$ rm -r bak
+
+If there are any issues, investigate as on Step 6.
diff --git a/etc/private/install/README b/etc/private/install/README
new file mode 100644
index 0000000..ef3ae70
--- /dev/null
+++ b/etc/private/install/README
@@ -0,0 +1,75 @@
+This directory contains scripts, configuration files, etc., that are used to
+prepare the virtual machine (VM) with a build2 repository web interface (brep)
+described in ../README. This document provides terse notes on how this VM is
+prepared.
+
+The VM is prepared and tested using user brep:
+
+# groupadd --gid 63700 brep
+# useradd --uid 63700 --gid 63700 --create-home brep
+# usermod --lock brep
+# usermod -G kvm brep
+
+Note: different UID/GID can be used by passing the --brep-user option to the
+brep-install script.
+
+# su - brep
+$ mkdir -p bin vm state/bpkg
+
+Next copy the prepared VM image:
+
+$ cp .../linux_debian_10.img vm/brep.img
+
+The brep-install script assumes a Debian-based VM distribution. Other
+distributions can probably be made to work but will require changes to
+brep-install. The VM normally already has the following changes applied:
+
+ # apt-get install acpid # For QEMU system_powerdown to work.
+ # systemctl enable acpid
+
+ # systemctl enable serial-getty@ttyS0.service
+ # systemctl start serial-getty@ttyS0.service
+
+ # nano /etc/default/grub
+ # # GRUB_CMDLINE_LINUX_DEFAULT="console=ttyS0"
+ # # GRUB_TERMINAL="serial console"
+ # update-grub
+
+ # echo localhost >/etc/hostname
+ # nano /etc/dhcp/dhclient.conf
+ # # Comment out `send host-name ...`.
+ # # Add `send dhcp-client-identifier = hardware;`
+
+Clone or copy the brep repository and create the directory structure:
+
+$ cp -r .../brep ./
+$ cp brep/etc/private/vm-* bin/
+$ cp -r brep/etc/private ./etc
+
+Download the build2 toolchain installation script:
+
+$ curl -sSfO https://download.build2.org/X.Y.Z/build2-install-X.Y.Z.sh
+$ # Verify the checksum.
+$ mv build2-install-*.sh etc/install/
+
+Start the VM (give as much CPU/RAM as available to speed up compilation):
+
+$ ~/bin/vm-start --install etc/install/ --cpu 8 --ram 8G \
+ --tap tap0 --mac de:ad:be:ef:de:ad vm/brep.img
+
+Login into the VM as root, then perform the following steps:
+
+ # mount -t 9p -o trans=virtio,version=9p2000.L install /mnt
+ # /mnt/brep-install --mount
+
+After the installation is complete, test the result as described in ../README.
+
+Note: to create a "clean" VM for distribution, pass the --clean option to
+brep-install, shut the VM down immediately after installation, save the clean
+VM image, then boot a copy for testing.
+
+Generate the systemd service template file:
+
+~/etc/install/vm-gen-service --bin bin --etc vm --var vm --run .
+
+Test starting the VM as a systemd service as described in ../README.
diff --git a/etc/private/install/brep-apache2.conf b/etc/private/install/brep-apache2.conf
new file mode 100644
index 0000000..99186d1
--- /dev/null
+++ b/etc/private/install/brep-apache2.conf
@@ -0,0 +1,99 @@
+# Keep in the main server configuration context. This way the directive will
+# be in effect during module initialization and request handling.
+#
+# Note that initialization log messages are written to the main server log
+# file (/var/log/apache2/error.log), and request handling messages to the
+# virtual server log file (/var/www/brep/log/error.log).
+#
+LogLevel brep:info
+
+<VirtualHost *:80>
+ #ServerName <brep-hostname>
+ #ServerAdmin <brep-admin-email>
+
+ #DocumentRoot /var/www/brep/public
+ #Options +Indexes
+
+ AddOutputFilterByType DEFLATE application/xhtml+xml
+ AddOutputFilterByType DEFLATE text/manifest
+ AddOutputFilterByType DEFLATE text/plain
+ AddOutputFilterByType DEFLATE text/css
+
+ Alias "/1" "/var/brep/bpkg/pkg/1"
+
+ ErrorLog /var/www/brep/log/error.log
+ CustomLog /var/www/brep/log/access.log combined
+
+ # brep configuration
+ #
+
+ # Load the brep module.
+ #
+ <IfModule !brep_module>
+ LoadModule brep_module /home/brep/install/libexec/brep/mod_brep.so
+ </IfModule>
+
+ # Repository email. This email is used for the From: header in emails send
+ # by brep (for example, build failure notifications).
+ #
+ #brep-email <brep-admin-email>
+
+ # Repository host. It specifies the scheme and the host address (but not the
+ # root path; see brep-root below) that will be used whenever brep needs to
+ # construct an absolute URL to one of its locations (for example, a link to
+ # a build log that is being send via email).
+ #
+ #brep-host http://<brep-hostname>
+
+ # Repository root. This is the part of the URL between the host name and the
+ # start of the repository. For example, root value /pkg means the repository
+ # URL is http://example.org/pkg/. Specify / to use the web server root
+ # (e.g., http://example.org/). If using a different repository root, don't
+ # forget to also change Location and Alias directives below.
+ #
+ brep-root /
+
+ <Location "/">
+ SetHandler brep
+
+ <IfModule dir_module>
+ DirectoryIndex disabled
+ DirectorySlash Off
+ </IfModule>
+ </Location>
+
+ # Brep module configuration. If you prefer, you can paste the contents of
+ # this file here. However, you will need to prefix every option with
+ # 'brep-'.
+ #
+ brep-conf /home/brep/config/brep-module.conf
+
+ # Static brep content (CSS files).
+ #
+ <IfModule !alias_module>
+ Error "mod_alias is not enabled"
+ </IfModule>
+
+ # Note: trailing slashes are important!
+ #
+ Alias /@/ /home/brep/install/share/brep/www/
+
+ <Directory "/home/brep/install/share/brep/www">
+ Require all granted
+ </Directory>
+
+ # brep config override (must come after).
+ #
+ <LocationMatch "^/([0-9]|icons)(/.*)?$">
+ SetHandler none
+
+ DirectoryIndex enabled
+ DirectorySlash On
+ </LocationMatch>
+</VirtualHost>
+
+<Directory /var/brep/bpkg/pkg/>
+ Options Indexes FollowSymLinks
+ AllowOverride None
+ Require all granted
+</Directory>
diff --git a/etc/private/install/brep-install b/etc/private/install/brep-install
new file mode 100755
index 0000000..37179c2
--- /dev/null
+++ b/etc/private/install/brep-install
@@ -0,0 +1,479 @@
+#! /usr/bin/env bash
+
+# file : etc/private/install/brep-install
+# license : MIT; see accompanying LICENSE file
+
+# Setup HTTP-only brep instance with unsigned package submission support via
+# direct repository publishing (brep/handler/submit/submit-pub).
+#
+# NOTE: this setup should only be used in private/trusted environments.
+#
+# Unless the --setup option is specified, create the 'brep' group and user and
+# re-run itself with the --setup option under this user. In the setup mode
+# install and configure the brep instance, automating the instructions from
+# the INSTALL file, including:
+#
+# - Build the build2 toolchain (installing it to /usr/local/) and brep
+# (installing it to ~brep/install/).
+#
+# - Install PostgreSQL and create brep users/databases.
+#
+# - Installing Apache2 and configure HTTP server with the brep module.
+#
+# Note that the script is written for use on Debian-based distributions so you
+# will need to adjust it to match other distributions or operating systems.
+#
+# Options:
+#
+# --mount
+#
+# Mount the virtio-9p device with id 'state' as the /var/brep directory.
+# This directory is expected to either contain the pkg repository or be
+# empty, in which case an empty repository will be automatically
+# initialized. If this option is unspecified, the directory will be created
+# in the local filesystem.
+#
+# --brep-user
+#
+# User and group ids to use when creating the 'brep' group and user. If
+# unspecified, 63700 is used.
+#
+# --setup
+#
+# Install and configure the brep instance, assuming that the 'brep' user
+# already exists and this script is executed as this user.
+#
+# --clean
+#
+# At the end of the brep instance setup remove installation environment-
+# specific traces (host name/IP from the configuration files, etc). Normally
+# you would use this option to make the "clean" machine copy for
+# distribution. Note that if this option is specified, then the brep
+# instance will only be unusable after the machine reboot.
+#
+usage="Usage: $0 [<options>]"
+
+# build2 toolchain repository certificate fingerprint. Note: this is a
+# repository the toolchain installation script downloads the build2 packages
+# from.
+#
+toolchain_repo_cert_fp="70:64:FE:E4:E0:F3:60:F1:B4:51:E1:FA:12:5C:E0:B3:DB:DF:96:33:39:B9:2E:E5:C2:68:63:4C:A6:47:39:43"
+#toolchain_repo_cert_fp="EC:50:13:E2:3D:F7:92:B4:50:0B:BF:2A:1F:7D:31:04:C6:57:6F:BC:BE:04:2E:E0:58:14:FA:66:66:21:1F:14"
+
+# brep package repository URL and certificate fingerprint.
+#
+#brep_repo_url="https://pkg.cppget.org/1/alpha"
+#brep_repo_cert_fp="70:64:FE:E4:E0:F3:60:F1:B4:51:E1:FA:12:5C:E0:B3:DB:DF:96:33:39:B9:2E:E5:C2:68:63:4C:A6:47:39:43"
+brep_repo_url="https://stage.build2.org/1"
+brep_repo_cert_fp="EC:50:13:E2:3D:F7:92:B4:50:0B:BF:2A:1F:7D:31:04:C6:57:6F:BC:BE:04:2E:E0:58:14:FA:66:66:21:1F:14"
+
+owd=`pwd`
+trap "{ exit 1; }" ERR
+trap "{ cd $owd; }" EXIT
+set -o errtrace # Trap in functions.
+
+function info () { echo "$*" 1>&2; }
+function error () { info "error: $*"; exit 1; }
+
+# Trace a command line, quoting empty arguments as well as those that contain
+# spaces.
+#
+function trace () # <cmd> <arg>...
+{
+ local s="+"
+ while [ "$#" -gt 0 ]; do
+ if [ -z "$1" -o -z "${1##* *}" ]; then
+ s="$s \"$1\""
+ else
+ s="$s $1"
+ fi
+
+ shift
+ done
+
+ info "$s"
+}
+
+# Trace and run a command.
+#
+run () # <args>...
+{
+ trace "$@"
+ "$@"
+}
+
+# The chosen fixed id for the 'brep' user. Note: must match the id of the
+# 'brep' user on the host.
+#
+# Note that Linux assigns the [0 99] range for the statically allocated system
+# users and [100 499] -- for dynamic allocations by administrators and post-
+# install scripts. Debian, in turn, assigns the [100 999] range for the
+# dynamically allocated system users and [60000 64999] -- for statically
+# allocated on demand "obscure package users".
+#
+brep_id=63700 # Update the README file on change.
+
+# Parse the command line options and, while at it, compose the options array
+# for potential re-execution as the 'brep' user.
+#
+mount=
+setup=
+clean=
+ops=()
+
+while [ "$#" -gt 0 ]; do
+ case "$1" in
+ --mount)
+ mount=true
+ ops+=("$1")
+ shift
+ ;;
+ --brep-user)
+ shift
+ brep_id="$1"
+ shift
+ ;;
+ --setup)
+ setup=true
+ shift
+ ;;
+ --clean)
+ clean=true
+ ops+=("$1")
+ shift
+ ;;
+ *)
+ break # The end of options is encountered.
+ ;;
+ esac
+done
+
+if [ "$#" -ne 0 ]; then
+ error "$usage"
+fi
+
+scr_exe="$(realpath "${BASH_SOURCE[0]}")"
+scr_dir="$(dirname "$scr_exe")"
+
+# Unless we are not in the setup mode, non-interactively add the 'brep'
+# user/group and re-execute the script in the setup mode as this user.
+#
+if [ ! "$setup" ]; then
+ run sudo addgroup --gid "$brep_id" brep
+
+ run sudo adduser --uid "$brep_id" --gid "$brep_id" --disabled-password \
+ --gecos "" brep
+
+ run sudo tee -a /etc/sudoers.d/brep >/dev/null <<EOF
+brep ALL=(ALL) NOPASSWD:ALL
+EOF
+
+ run sudo chmod 0440 /etc/sudoers.d/brep
+
+ # Use --session-command rather than --command|-c to make sure that when the
+ # su program receives SIGINT (Ctrl-C) it kills not just its child process
+ # but also all its descendants.
+ #
+ # Note: here we rely on ops to not contain spaces or be empty.
+ #
+ run exec sudo su -l brep --session-command "'$scr_exe' --setup ${ops[*]}"
+fi
+
+# Here we assume that we are executed as brep user.
+#
+run cd "$HOME"
+
+# Mount the brep state directory, if requested. Note that otherwise, the
+# directory will be created later, in the local filesystem by the brep-startup
+# script.
+#
+if [ "$mount" ]; then
+ run sudo mkdir -p /var/brep
+
+ run sudo tee -a /etc/fstab >/dev/null <<EOF
+state /var/brep 9p trans=virtio,version=9p2000.L,posixacl,cache=none,_netdev 0 0
+EOF
+
+ run sudo mount -a
+fi
+
+# Install the prerequisite binary packages.
+#
+run sudo apt-get --yes update
+run sudo apt-get --yes install --no-install-recommends g++
+run sudo apt-get --yes install --no-install-recommends postgresql postgresql-contrib libpq-dev
+run sudo apt-get --yes install --no-install-recommends apache2 libapr1-dev libapreq2-dev apache2-dev
+run sudo apt-get --yes install --no-install-recommends acl rsync
+run sudo apt-get clean
+
+# Install build2 toolchain.
+#
+run mkdir build2-build
+run cd build2-build
+
+# Look for the toolchain installation script in this script directory.
+#
+run cp "$(echo "$scr_dir"/build2-install-*.sh)" .
+run sh ./build2-install-*.sh --no-check --yes --trust "$toolchain_repo_cert_fp"
+#run sh ./build2-install-*.sh --no-check --yes --local
+
+run cd .. # Back to brep home.
+
+# Grant Apache2 read access to the module and configuration.
+#
+run setfacl -m g:www-data:rx "$HOME"
+run setfacl -dm g:www-data:rx "$HOME"
+
+# Install brep.
+#
+run mkdir brep
+run cd brep
+
+run bpkg create \
+ cc \
+ config.cc.coptions="-O3" \
+ config.cc.poptions="-I$(apxs -q includedir)" \
+ config.bin.lib=shared \
+ config.bin.rpath="$HOME/install/lib" \
+ config.install.root="$HOME/install"
+
+run bpkg add "$brep_repo_url"
+run bpkg fetch --trust "$brep_repo_cert_fp"
+run bpkg build --yes brep ?sys:libapr1 ?sys:libapreq2 ?sys:libpq
+run bpkg install brep
+
+run cd .. # Back to brep home.
+
+# Create PostgreSQL user and databases.
+#
+# Note that while we could probably omit the build-related setup, let's keep
+# it to stay close to the instructions in the INSTALL file and to simplify the
+# potential future configuration of the brep instance as a build2 build bot
+# controller.
+#
+run sudo sudo -u postgres psql <<EOF
+CREATE DATABASE brep_package
+TEMPLATE template0
+ENCODING 'UTF8'
+LC_COLLATE 'en_US.UTF8'
+LC_CTYPE 'en_US.UTF8';
+
+CREATE DATABASE brep_build
+TEMPLATE template0
+ENCODING 'UTF8'
+LC_COLLATE 'en_US.UTF8'
+LC_CTYPE 'en_US.UTF8';
+
+CREATE USER brep;
+
+GRANT ALL PRIVILEGES ON DATABASE brep_package, brep_build TO brep;
+
+CREATE USER "www-data" INHERIT IN ROLE brep;
+
+CREATE USER "brep-build" INHERIT IN ROLE brep PASSWORD '-';
+
+\c brep_package
+GRANT ALL PRIVILEGES ON SCHEMA public TO brep;
+
+\c brep_build
+GRANT ALL PRIVILEGES ON SCHEMA public TO brep;
+EOF
+
+# Create the "staging" package database for the submit-pub package submission
+# handler.
+#
+run sudo sudo -u postgres psql <<EOF
+CREATE DATABASE brep_submit_package
+TEMPLATE template0
+ENCODING 'UTF8'
+LC_COLLATE 'en_US.UTF8'
+LC_CTYPE 'en_US.UTF8';
+
+GRANT ALL PRIVILEGES ON DATABASE brep_submit_package TO brep;
+
+\c brep_submit_package
+GRANT ALL PRIVILEGES ON SCHEMA public TO brep;
+EOF
+
+# Make sure the 'brep' and Apache2 user's logins work properly.
+#
+q="SELECT current_database();"
+run psql -d brep_package -c "$q" >/dev/null
+run psql -d brep_build -c "$q" >/dev/null
+run psql -d brep_submit_package -c "$q" >/dev/null
+
+run sudo sudo -u www-data psql -d brep_package -c "$q" >/dev/null
+run sudo sudo -u www-data psql -d brep_build -c "$q" >/dev/null
+
+# Setup the connection between the databases.
+#
+run sudo sudo -u postgres psql -d brep_build <<EOF
+CREATE EXTENSION postgres_fdw;
+
+CREATE SERVER package_server
+FOREIGN DATA WRAPPER postgres_fdw
+OPTIONS (dbname 'brep_package', updatable 'true');
+
+GRANT USAGE ON FOREIGN SERVER package_server to brep;
+
+CREATE USER MAPPING FOR PUBLIC
+SERVER package_server
+OPTIONS (user 'brep-build', password '-');
+EOF
+
+# Allow brep-build user to access the brep_package database.
+#
+f="$(run sudo sudo -u postgres psql -t -A -c "show hba_file;")"
+s="# TYPE DATABASE USER ADDRESS METHOD\nlocal brep_package brep-build md5\n\n"
+
+run sudo sed --in-place=.bak "1s/^/$s/" "$f"
+run sudo systemctl restart postgresql
+
+# Enable creating database tables with columns of the case-insensitive
+# character string type.
+#
+q="CREATE EXTENSION citext;"
+run sudo sudo -u postgres psql -d brep_package <<<"$q"
+run sudo sudo -u postgres psql -d brep_build <<<"$q"
+run sudo sudo -u postgres psql -d brep_submit_package <<<"$q"
+
+# Copy the brep module configuration.
+#
+# Note: must be done before bin/brep-startup execution, which adjusts the
+# configuration.
+#
+run mkdir config
+run cp "$scr_dir/brep-module.conf" config/
+
+# Initialize the brep private instance, in particular creating the database
+# schemas and running the brep loader.
+#
+run mkdir bin/
+run cp "$scr_dir/brep-startup" bin/
+run bin/brep-startup
+
+# Smoke test the database schemas.
+#
+run psql -d brep_package -c 'SELECT canonical_name, summary FROM repository' >/dev/null
+run psql -d brep_build -c 'SELECT package_name FROM build' >/dev/null
+run psql -d brep_build -c 'SELECT DISTINCT name FROM build_package' >/dev/null
+run psql -d brep_submit_package -c 'SELECT canonical_name, summary FROM repository' >/dev/null
+
+# Setup executing the brep-startup script on boot.
+#
+run sudo cp "$scr_dir/brep-startup.service" /etc/systemd/system/
+
+run sudo systemctl start brep-startup.service # Make sure there are no issues.
+run sudo systemctl enable brep-startup.service
+
+# Prepare directories for the package submission service.
+#
+run mkdir submit-data
+run mkdir submit-temp
+run setfacl -m g:www-data:rwx submit-data
+run setfacl -m g:www-data:rwx submit-temp
+
+# Make the Apache2 user owned directories fully accessible by the 'brep' user
+# (which the submit-pub submission handler will run as).
+#
+run setfacl -dm g:brep:rwx submit-data
+run setfacl -dm g:brep:rwx submit-temp
+
+# Add the Apache2 user to sudoers, so the submission handler can re-execute
+# itself as the 'brep' user.
+#
+run sudo tee -a /etc/sudoers.d/www-data >/dev/null <<EOF
+www-data ALL=(ALL) NOPASSWD:ALL
+EOF
+
+run sudo chmod 0440 /etc/sudoers.d/www-data
+
+# Setup the Apache2 module.
+#
+run sudo mkdir -p /var/www/brep/log/
+
+run sudo cp "$scr_dir/brep-apache2.conf" /etc/apache2/sites-available/000-brep.conf
+run sudo cp "$scr_dir/brep-logrotate" /etc/logrotate.d/brep
+
+run sudo a2dissite --purge -q 000-default
+run sudo a2ensite -q 000-brep
+
+run sudo systemctl restart apache2
+run sudo systemctl status apache2 >/dev/null
+
+# Make sure the Apache2 service depends on PostgreSQL and
+# brep-startup.service, so that they are started in proper order.
+#
+run sudo mkdir -p /etc/systemd/system/apache2.service.d/
+run sudo tee /etc/systemd/system/apache2.service.d/postgresql.conf >/dev/null <<EOF
+[Unit]
+Requires=postgresql.service
+After=postgresql.service
+EOF
+
+run sudo tee /etc/systemd/system/apache2.service.d/brep-startup.conf >/dev/null <<EOF
+[Unit]
+Requires=brep-startup.service
+After=brep-startup.service
+EOF
+
+run sudo mkdir -p /etc/systemd/system/postgresql.service.d/
+run sudo tee /etc/systemd/system/postgresql.service.d/apache2.conf >/dev/null <<EOF
+[Unit]
+Wants=apache2.service
+EOF
+
+run sudo systemctl daemon-reload
+
+# Verify that Apache2 is stopped after PostgreSQL is stopped.
+#
+run sudo systemctl stop postgresql
+
+ec="0"
+run sudo systemctl status apache2 >/dev/null || ec="$?"
+
+if [ "$ec" -ne 3 ]; then
+ error "exit code 3 (unit is not active) is expected instead of $ec"
+fi
+
+# Verify that Apache2 is started after PostgreSQL is started.
+#
+run sudo systemctl start postgresql
+
+run sleep 3
+run sudo systemctl status apache2 >/dev/null
+
+# Setup periodic loader execution.
+#
+run sudo cp "$scr_dir/brep-load.service" /etc/systemd/system/
+run sudo cp "$scr_dir/brep-load.timer" /etc/systemd/system/
+
+run sudo systemctl start brep-load.service # Make sure there are no issues.
+
+run sudo systemctl start brep-load.timer
+run sudo systemctl status brep-load.timer >/dev/null
+run sudo systemctl enable brep-load.timer
+run sudo systemctl status brep-load.timer >/dev/null
+
+# Cleanup the installation environment-specific traces, if requested.
+#
+if [ "$clean" ]; then
+
+ # Stop the relevant services.
+ #
+ run sudo systemctl stop brep-load.timer
+ run sudo systemctl stop apache2
+
+ # Remove the host name/IP from the configuration.
+ #
+ run cp "$scr_dir/brep-module.conf" config/ # Adjusted by brep-startup.
+ run rm config/loadtab # Recreated by brep-startup.
+
+ # Finally, stop networking and cleanup the DHCP lease information.
+ #
+ # Note that after networking is stopped, sudo prints the 'unable to resolve
+ # host' diagnostics while trying to obtain the host IP. Thus, we execute the
+ # last two commands via a single sudo call.
+ #
+ run sudo bash -c "systemctl stop networking && rm -rf /var/lib/dhcp/*.leases"
+fi
diff --git a/etc/private/install/brep-load.service b/etc/private/install/brep-load.service
new file mode 100644
index 0000000..272a124
--- /dev/null
+++ b/etc/private/install/brep-load.service
@@ -0,0 +1,11 @@
+[Unit]
+Description=brep repository loader service
+
+[Service]
+Type=oneshot
+User=brep
+Group=brep
+ExecStart=/home/brep/install/bin/brep-load /home/brep/config/loadtab
+
+[Install]
+WantedBy=default.target
diff --git a/etc/private/install/brep-load.timer b/etc/private/install/brep-load.timer
new file mode 100644
index 0000000..1bf78c4
--- /dev/null
+++ b/etc/private/install/brep-load.timer
@@ -0,0 +1,33 @@
+[Unit]
+Description=brep repository loader timer
+RefuseManualStart=no
+RefuseManualStop=no
+
+# Note that due to brep-startup service's oneshot type, this unit won't be
+# started until the brep-startup process exits successfully.
+#
+# Also note that if brep-startup fails and is restarted manually, similar to
+# services, the timer is not started automatically. Instead, it has to be
+# started manually with `systemctl start brep-load.timer`.
+#
+Requires=brep-startup.service
+After=brep-startup.service
+
+[Timer]
+Unit=brep-load.service
+
+# Don't keep track of the timer across reboots.
+#
+Persistent=false
+
+# Start the timer for the first time.
+#
+OnBootSec=1
+
+# Then wait 4-5 seconds until the next run.
+#
+OnUnitInactiveSec=4
+AccuracySec=1
+
+[Install]
+WantedBy=timers.target
diff --git a/etc/private/install/brep-logrotate b/etc/private/install/brep-logrotate
new file mode 100644
index 0000000..67c7c90
--- /dev/null
+++ b/etc/private/install/brep-logrotate
@@ -0,0 +1,20 @@
+/var/www/brep/log/*.log {
+ weekly
+ missingok
+ rotate 4
+ compress
+ delaycompress
+ notifempty
+ create 640 root adm
+ sharedscripts
+ postrotate
+ if /etc/init.d/apache2 status > /dev/null ; then \
+ /etc/init.d/apache2 reload > /dev/null; \
+ fi;
+ endscript
+ prerotate
+ if [ -d /etc/logrotate.d/httpd-prerotate ]; then \
+ run-parts /etc/logrotate.d/httpd-prerotate; \
+ fi; \
+ endscript
+}
diff --git a/etc/private/install/brep-module.conf b/etc/private/install/brep-module.conf
new file mode 100644
index 0000000..bfaa8f6
--- /dev/null
+++ b/etc/private/install/brep-module.conf
@@ -0,0 +1,532 @@
+# Configuration file for the brep module (note: this is not an apache2 .conf
+# file but it can be converted to one by prefixing all the options with
+# brep-). See brep(1) for detailed description of each configuration option.
+# Commented out options indicate their default values.
+#
+# Besides being parsed by the brep module, this file may also be parsed by
+# brep utilities that are normally only interested in the subset of the
+# options. To simplify skipping of unrecognized, this file must always have an
+# option name and its value on the same line.
+#
+
+# Package search page title. It is placed inside XHTML5 <title> element.
+#
+# search-title Packages
+
+
+# Package search page description. If specified, it is displayed before the
+# search form on the first page only. The value is treated as an XHTML5
+# fragment.
+#
+# search-description ""
+
+
+# Web page logo. It is displayed in the page header aligned to the left edge.
+# The value is treated as an XHTML5 fragment.
+#
+# logo ""
+
+
+# Web page menu. Each entry is displayed in the page header in the order
+# specified and aligned to the right edge. A link target that starts with '/'
+# or contains ':' is used as is. Otherwise, it is prefixed with the repository
+# web interface root.
+#
+menu Packages=
+# menu Builds=?builds
+# menu Configs=?build-configs
+menu Submit=?submit
+# menu CI=?ci
+menu About=?about
+
+
+# Number of packages per page.
+#
+# search-page-entries 20
+
+
+# Number of pages in navigation (pager).
+#
+# search-pages 5
+
+
+# Number of package description characters to display in brief pages.
+#
+# package-description 500
+
+
+# Number of package changes characters to display in brief pages.
+#
+# package-changes 5000
+
+
+# The package database connection configuration. By default, brep will try to
+# connect to the local instance of PostgreSQL with the operating system-
+# default mechanism (Unix-domain socket, etc) and use operating system
+# (login) user name and the database called 'brep_package'. If the role name
+# is not empty then the login user will be switched (with SET ROLE) to this
+# user prior to executing any statements. If not specified, then 'brep' is
+# used. See brep(1) for details.
+#
+# package-db-user
+# package-db-role brep
+# package-db-password
+# package-db-name brep_package
+# package-db-host
+# package-db-port
+
+
+# The maximum number of concurrent package database connections per web server
+# process. If 0, then no limitation is applied.
+#
+# package-db-max-connections 5
+
+
+# The maximum number of times to retry package database transactions in the
+# face of recoverable failures (deadlock, loss of connection, etc).
+#
+# package-db-retry 10
+
+
+# Build configuration file. If not specified (default), then the package
+# building functionality will be disabled. If specified, then the build
+# database must be configured (see next). Note: must be an absolute path.
+#
+# build-config
+
+
+# Number of build configurations per page.
+#
+# build-config-page-entries 20
+
+
+# Number of pages in navigation (pager).
+#
+# build-config-pages 5
+
+
+# Directory containing build bot agent public keys. If specified, then brep
+# will perform agent authentication and will reject build results from
+# unauthenticated ones. If not specified, then build results are accepted from
+# all agents (which will be a security risk if the brep instance is publicly
+# accessible).
+#
+# The directory is expected to contain one PEM-encoded public key per file with
+# the .pem extension. All other files and subdirectories are ignored. The brep
+# instance needs to be restarted after adding new key files for the changes to
+# take effect.
+#
+# build-bot-agent-keys
+
+
+# Regular expressions in the /<regex>/<replacement>/ form for transforming the
+# interactive build login information, for example, into the actual command
+# that can be used by the user. The regular expressions are matched against
+# the "<agent> <interactive-login>" string containing the respective task
+# request manifest values. The first matching expression is used for the
+# transformation. If no expression matches, then the task request is
+# considered invalid, unless no expressions are specified. Repeat this option
+# to specify multiple expressions.
+#
+# build-interactive-login
+
+
+# Order in which packages are considered for build. The valid values are
+# 'stable' and 'random'. If not specified, then 'stable' is assumed. Note that
+# interactive builds are always preferred.
+#
+#build-package-order stable
+
+
+# Number of builds per page.
+#
+# build-page-entries 20
+
+
+# Number of pages in navigation (pager).
+#
+# build-pages 5
+
+
+# Time to wait before considering a package for a forced rebuild. Must be
+# specified in seconds. Default is 10 minutes.
+#
+# build-forced-rebuild-timeout 600
+
+
+# Time to wait before considering a package for a soft rebuild (only to be
+# performed if the build environment or any of the package dependencies have
+# changed). Must be specified in seconds. The special zero value disables soft
+# rebuilds. Default is 24 hours.
+#
+# build-soft-rebuild-timeout 86400
+
+
+# Alternative package soft rebuild timeout to use instead of the soft rebuild
+# timeout (see the build-soft-rebuild-timeout option for details) during the
+# specified time interval. Must be specified in seconds. Default is the time
+# interval length plus (build-soft-rebuild-timeout - 24h) if soft rebuild
+# timeout is greater than 24 hours (thus the rebuild is only triggered within
+# the last 24 hours of the build-soft-rebuild-timeout expiration).
+#
+# The alternative rebuild timeout can be used to "pull" the rebuild window to
+# the specified time of day, for example, to optimize load and/or power
+# consumption of the build infrastructure (off-work hours, solar, off-peak
+# electricity tariffs, etc). A shorter than the time interval rebuild timeout
+# can also be used to force continuous rebuilds, for example, to shake out
+# flaky tests. Note also that if the alternative rebuild timeout is greater
+# than the normal rebuild timeout, then this will result in slower rebuilds
+# during the alternative time interval. In this case, if the build
+# infrastructure is monitored for delayed package builds, then the alternative
+# rebuild timeout should only be made slightly greater than the normal timeout
+# (see brep-monitor(1) for details).
+#
+# The time interval boundaries must be specified as times of day (in the local
+# timezone) in the <hours>:<minutes> form. If the stop time is less than the
+# start time then the interval extends through midnight. The start and stop
+# times must both be either specified or absent. If unspecified, then no
+# alternative rebuild timeout will be used.
+#
+# build-alt-soft-rebuild-timeout
+# build-alt-soft-rebuild-start
+# build-alt-soft-rebuild-stop
+
+
+# Time to wait before considering a package for a hard rebuild (to be
+# performed unconditionally). Must be specified in seconds. The special zero
+# value disables hard rebuilds. Default is 7 days.
+#
+# build-hard-rebuild-timeout 604800
+
+
+# Alternative package hard rebuild timeout. The semantics is the same as for
+# the build-alt-soft-rebuild-* options but for the build-hard-rebuild-timeout
+# option.
+#
+# build-alt-hard-rebuild-timeout
+# build-alt-hard-rebuild-start
+# build-alt-hard-rebuild-stop
+
+
+# Time to wait before assuming the 'queued' notifications are delivered for
+# package CI requests submitted via third-party services (GitHub, etc). During
+# this time a package is not considered for a build. Must be specified in
+# seconds. Default is 30 seconds.
+#
+# build-queued-timeout 30
+
+
+# The maximum size of the build task request manifest accepted. Note that the
+# HTTP POST request body is cached to retry database transactions in the face
+# of recoverable failures (deadlock, loss of connection, etc). Default is
+# 100K.
+#
+# build-task-request-max-size 102400
+
+
+# Time to wait before considering the expected task result lost. Must be
+# specified in seconds. Default is 3 hours.
+#
+# build-result-timeout 10800
+
+
+# The maximum size of the build result manifest accepted. Note that the HTTP
+# POST request body is cached to retry database transactions in the face of
+# recoverable failures (deadlock, loss of connection, etc). Default is 10M.
+#
+# build-result-request-max-size 10485760
+
+
+# Enable or disable package build notification emails in the <name>=<mode>
+# form. The valid <mode> values are 'none', 'latest', and 'all'. If 'all' is
+# specified for a toolchain name, then emails are sent according to the
+# build-*email package manifest values when all versions of a package are
+# built with this toolchain. If 'latest' is specified, then for this toolchain
+# name the emails are only sent for the latest version of a package. If 'none'
+# is specified, then no emails are sent for this toolchain name. By default
+# the 'latest' mode is assumed. Repeat this option to enable/disable emails
+# for multiple toolchains.
+#
+# build-toolchain-email <toolchain-name>=latest|none|all
+
+
+# The build database connection configuration. By default, brep will try to
+# connect to the local instance of PostgreSQL with the operating system-default
+# mechanism (Unix-domain socket, etc) and use operating system (login) user
+# name and the database called 'brep_build'. If the role name is not empty
+# then the login user will be switched (with SET ROLE) to this user prior
+# to executing any statements. If not specified, then 'brep' is used. See
+# brep(1) for details.
+#
+# build-db-user
+# build-db-role brep
+# build-db-password
+# build-db-name brep_build
+# build-db-host
+# build-db-port
+
+
+# The maximum number of concurrent build database connections per web server
+# process. If 0, then no limitation is applied.
+#
+# build-db-max-connections 5
+
+
+# The maximum number of times to retry build database transactions in the
+# face of recoverable failures (deadlock, loss of connection, etc).
+#
+# build-db-retry 10
+
+
+# The root directory where the uploaded binary distribution packages are
+# saved to under the following directory hierarchy:
+#
+# [<tenant>/]<distribution>/<os-release>/<project>/<package>/<version>/<package-config>
+#
+# The package configuration directory symlinks that match these paths are
+# mapped to web URLs based on the bindist-url value and displayed on the
+# package version details page. If this option is specified, then bindist-url
+# must be specified as well."
+#
+# bindist-root
+
+
+# The root URL of the directory specified with the bindist-root option. This
+# option must be specified if bindist-root is specified.
+#
+# bindist-url
+
+
+# The openssl program to be used for crypto operations. You can also specify
+# additional options that should be passed to the openssl program with
+# openssl-option. If the openssl program is not explicitly specified, then brep
+# will use openssl by default.
+#
+# openssl openssl
+
+
+# Additional option to be passed to the openssl program (see openssl for
+# details). Repeat this option to specify multiple openssl options.
+#
+# openssl-option
+
+
+# Environment variable to be set (<name>=<value>) or unset (just <name>) for
+# the openssl program (see openssl for details). Repeat this option to specify
+# multiple openssl variables. Note that unspecified variables are inherited
+# from the web server process.
+#
+# You need to at least set the RANDFILE environment variable to change the
+# default location of the openssl program seed file and maybe also the
+# OPENSSL_CONF variable if you would like to use a custom openssl configuration
+# file.
+#
+# openssl-envvar RANDFILE=/home/brep/www-data-openssl.rnd
+# openssl-envvar OPENSSL_CONF=/home/brep/www-data-openssl.cnf
+#
+# To create www-data-openssl.rnd with suitable permissions, run (as user brep):
+#
+# $ touch www-data-openssl.rnd
+# $ setfacl -b -m g:www-data:rw www-data-openssl.rnd
+#
+
+
+# The directory to save final submission data to. If unspecified, the package
+# submission functionality will be disabled. If specified, then submit-temp
+# must be specified as well.
+#
+# Note that the directory path must be absolute and the directory itself must
+# exist and have read, write, and execute permissions granted to the user that
+# runs the web server.
+#
+submit-data /home/brep/submit-data
+
+
+# The directory to save temporary submission data to. Must be specified if the
+# package submission functionality is enabled.
+#
+# Note that this directory must be on the same filesystem and satisfy the same
+# requirements as submit-data. It is also the user's responsibility to clean
+# it up after an unclean web server shutdown.
+#
+submit-temp /home/brep/submit-temp
+
+
+# The maximum size of the submission data accepted. Note that currently the
+# entire submission request is read into memory. Default is 10M.
+#
+# 100M.
+#
+submit-max-size 104857600
+
+
+# The package submission form fragment. If specified, then its contents are
+# treated as an XHTML5 fragment that is inserted into the <body> element of
+# the submission page. If unspecified, then no submission page will be
+# displayed. Note that the file path must be absolute.
+#
+submit-form /home/brep/install/share/brep/www/submit.xhtml
+
+
+# The package submission email. If specified, the submission request and
+# result manifests will be sent to this address.
+#
+# submit-email
+
+
+# The handler program to be executed on package submission. The handler is
+# executed as part of the HTTP request and is passed additional arguments that
+# can be specified with submit-handler-argument followed by the absolute path
+# to the submission directory. Note that the program path must be absolute.
+#
+submit-handler /home/brep/install/bin/brep-submit-pub
+
+
+# Additional arguments to be passed to the submission handler program (see
+# submit-handler for details). Repeat this option to specify multiple
+# arguments.
+#
+submit-handler-argument --user
+submit-handler-argument brep
+submit-handler-argument --result-url
+submit-handler-argument http://unknown
+submit-handler-argument /home/brep/install/bin/brep-load
+submit-handler-argument --db-name=brep_submit_package
+submit-handler-argument /var/brep/bpkg/pkg
+
+
+# The handler program timeout in seconds. If specified and the handler does
+# not exit in the alloted time, then it is killed and its termination is
+# treated as abnormal.
+#
+submit-handler-timeout 120
+
+
+# The directory to save CI request data to. If unspecified, the package CI
+# functionality will be disabled.
+#
+# Note that the directory path must be absolute and the directory itself must
+# exist and have read, write, and execute permissions granted to the user that
+# runs the web server.
+#
+# ci-data
+
+
+# The package CI form fragment. If specified, then its contents are treated as
+# an XHTML5 fragment that is inserted into the <body> element of the CI page.
+# If unspecified, then no CI page will be displayed. Note that the file path
+# must be absolute.
+#
+# ci-form
+
+
+# The package CI email. If specified, the CI request and result manifests will
+# be sent to this address.
+#
+# ci-email
+
+
+# The handler program to be executed on CI request. The handler is executed as
+# part of the HTTP request and is passed additional arguments that can be
+# specified with ci-handler-argument followed by the absolute path to the CI
+# request directory. Note that the program path must be absolute.
+#
+# ci-handler
+
+
+# Additional arguments to be passed to the CI handler program (see ci-handler
+# for details). Repeat this option to specify multiple arguments.
+#
+# ci-handler-argument
+
+
+# The CI handler program timeout in seconds. If specified and the handler does
+# not exit in the allotted time, then it is killed and its termination is
+# treated as abnormal.
+#
+# ci-handler-timeout
+
+
+# The directory to save upload data to for the specified upload type. If
+# unspecified, the build artifacts upload functionality will be disabled for
+# this type.
+#
+# Note that the directory path must be absolute and the directory itself must
+# exist and have read, write, and execute permissions granted to the user that
+# runs the web server.
+#
+# upload-data <type>=<dir>
+
+
+# The maximum size of the upload data accepted for the specified upload type.
+# Note that currently the entire upload request is read into memory. The
+# default is 10M.
+#
+# upload-max-size <type>=10485760
+
+
+# The build artifacts upload email. If specified, the upload request and
+# result manifests will be sent to this address.
+#
+# upload-email <type>=<email>
+
+
+# The handler program to be executed on build artifacts upload of the
+# specified type. The handler is executed as part of the HTTP request and is
+# passed additional arguments that can be specified with
+# upload-handler-argument followed by the absolute path to the upload
+# directory (upload-data). Note that the program path must be absolute.
+#
+# upload-handler <type>=<path>
+
+
+# Additional arguments to be passed to the upload handler program for the
+# specified upload type (see upload-handler for details). Repeat this option
+# to specify multiple arguments.
+#
+# upload-handler-argument <type>=<arg>
+
+
+# The upload handler program timeout in seconds for the specified upload type.
+# If specified and the handler does not exit in the allotted time, then it is
+# killed and its termination is treated as abnormal.
+#
+# upload-handler-timeout <type>=<seconds>
+
+
+# Disable upload of the specified type for the specified toolchain name.
+# Repeat this option to disable uploads for multiple toolchains.
+#
+# upload-toolchain-exclude <type>=<name>
+
+
+# Disable upload of the specified type for packages from the repository with
+# the specified canonical name. Repeat this option to disable uploads for
+# multiple repositories.
+#
+# upload-repository-exclude <type>=<name>
+
+
+# The default view to display for the global repository root. The value is one
+# of the supported services (packages, builds, submit, ci, etc). Default is
+# packages.
+#
+# root-global-view packages
+
+
+# The default view to display for the tenant repository root. The value is one
+# of the supported services (packages, builds, submit, ci, etc). Default is
+# packages.
+#
+# root-tenant-view packages
+
+
+# Name to call the tenant values on web pages. If not specified, then 'tenant'
+# is used.
+#
+# tenant-name tenant
+
+
+# Trace verbosity. Disabled by default.
+#
+# verbosity 0
diff --git a/etc/private/install/brep-startup b/etc/private/install/brep-startup
new file mode 100755
index 0000000..780a2c0
--- /dev/null
+++ b/etc/private/install/brep-startup
@@ -0,0 +1,88 @@
+#! /usr/bin/env bash
+
+# file : etc/private/install/brep-startup
+# license : MIT; see accompanying LICENSE file
+
+# (Re-)initialize the brep private instance, normally on the machine startup.
+#
+# Specifically:
+#
+# - Create the pkg repository and symlink to it, unless already exists.
+#
+# - Migrate the brep databases as a sanity check.
+#
+# - Adjust the brep module configuration file using the current host name/IP.
+#
+# - Generate the loadtab using the current host name/IP and run the loader.
+#
+trap "{ exit 1; }" ERR
+set -o errtrace # Trap in functions.
+
+function info () { echo "$*" 1>&2; }
+function error () { info "error: $*"; exit 1; }
+
+# Create the pkg repository, if required.
+#
+d=/var/brep/bpkg
+
+if [ ! -L "$d/pkg" ]; then
+ rd="$(date "+pkg-%Y%m%d-%H%M%S-%N")"
+
+ mkdir -p "$d/$rd/1"
+ ln -s "$rd" "$d/pkg"
+fi
+
+r="$d/pkg/1"
+
+if [ ! -f "$r/repositories.manifest" ]; then
+ cat <<EOF >"$r/repositories.manifest"
+: 1
+#summary: Private repository
+#description: \\
+#This is a private repository.
+#And this description can contain multiple lines.
+#\\
+#email: admin@example.org
+
+#:
+#role: prerequisite
+#location: https://pkg.cppget.org/1/stable
+#trust: ...
+EOF
+fi
+
+if [ ! -f "$r/packages.manifest" ]; then
+ bpkg rep-create -q "$r"
+fi
+
+# Migrate the databases.
+#
+"$HOME/install/bin/brep-migrate" package
+"$HOME/install/bin/brep-migrate" build
+"$HOME/install/bin/brep-migrate" -n brep_submit_package package
+
+# Deduce the machine host name.
+#
+h="$(hostname -f)"
+if [ "$h" == "localhost" ]; then
+ h="$(hostname -I | sed 's/ *$//')" # Strip the potential trailing space(s).
+fi
+
+if [ -z "$h" ]; then
+ error "unable to obtain host name or IP address"
+fi
+
+# Adjust the submission result URL host name in the brep module configuration
+# file.
+#
+sed --in-place -re \
+"\$!N;s%^\s*(submit-handler-argument\s+--result-url\s*\\n)\
+\s*(submit-handler-argument\s+https?://)[^/]*(.*)\$%\1\2$h\3%;P;D" \
+"$HOME/config/brep-module.conf"
+
+# (Re-)generate the loadtab file and reload the repository.
+#
+f="$HOME/config/loadtab"
+
+echo "http://$h/1 private cache:$r" >"$f"
+"$HOME/install/bin/brep-load" "$f"
diff --git a/etc/private/install/brep-startup.service b/etc/private/install/brep-startup.service
new file mode 100644
index 0000000..a3dc546
--- /dev/null
+++ b/etc/private/install/brep-startup.service
@@ -0,0 +1,17 @@
+[Unit]
+Description=brep instance initialization service
+
+Wants=network-online.target
+After=network-online.target
+
+Requires=postgresql.service
+After=postgresql.service
+
+[Service]
+Type=oneshot
+User=brep
+Group=brep
+ExecStart=/home/brep/bin/brep-startup
+
+[Install]
+WantedBy=default.target
diff --git a/etc/private/install/vm-gen-service b/etc/private/install/vm-gen-service
new file mode 100755
index 0000000..ae49a49
--- /dev/null
+++ b/etc/private/install/vm-gen-service
@@ -0,0 +1,207 @@
+#! /usr/bin/env bash
+
+# Generate systemd .service file for QEMU/KVM virtual machines.
+#
+# Normally the machines are run from a dedicated user account with its home
+# directory containing all the relevant files (management scripts, images,
+# configurations, and sockets). However, this can be overriden with the
+# following options
+#
+# --home <dir>
+# The virtual machines "home" directory. If unspecified, the user's home
+# directory is assumed.
+#
+# --bin <dir>
+# The virtual machines management scripts directory. If unspecified,
+# <home>/bin is assumed. If specified as relative, then assumed relative
+# to <home>.
+#
+# --etc <dir>
+# The virtual machines configuration files directory. If unspecified,
+# <home>/etc is assumed. If specified as relative, then assumed relative
+# to <home>.
+#
+# --var <dir>
+# The virtual machines image files directory. If unspecified, <home>/var is
+# assumed. If specified as relative, then assumed relative to <home>.
+#
+# --run <dir>
+# The virtual machines sockets directory. If unspecified, <home>/run is
+# assumed. If specified as relative, then assumed relative to <home>.
+#
+# If <user> is unspecified, the current user is assumed. If <group> is
+# unspecified, the user's primary group is assumed.
+#
+usage="usage: $0 [<options>] [<user>] [<group>]"
+
+owd="$(pwd)"
+trap "{ cd '$owd'; exit 1; }" ERR
+set -o errtrace # Trap in functions.
+
+function info () { echo "$*" 1>&2; }
+function error () { info "$*"; exit 1; }
+
+home=
+bin=
+etc=
+var=
+run=
+
+while [ "$#" -gt 0 ]; do
+ case "$1" in
+ --home)
+ shift
+ home="$1"
+ shift
+ ;;
+ --bin)
+ shift
+ bin="$1"
+ shift
+ ;;
+ --etc)
+ shift
+ etc="$1"
+ shift
+ ;;
+ --var)
+ shift
+ var="$1"
+ shift
+ ;;
+ --run)
+ shift
+ run="$1"
+ shift
+ ;;
+ *)
+ break
+ ;;
+ esac
+done
+
+user="$1"
+
+if [ -z "$user" ]; then
+ user="$(id -un)"
+fi
+
+group="$2"
+
+if [ -z "$group" ]; then
+ group="$(id -un "$user")"
+fi
+
+if [ -z "$home" ]; then
+ home="$(eval echo ~$user)"
+fi
+
+function complete_dir () # <default> <home> <dir>
+{
+ local r
+ if [ -z "$3" ]; then
+ r="$2/$1"
+ elif [ "${3:0:1}" != "/" ]; then
+ r="$2/$3"
+ else
+ r="$3"
+ fi
+ echo "$(realpath --no-symlinks --canonicalize-missing "$r")"
+}
+
+bin="$(complete_dir bin "$home" "$bin")"
+etc="$(complete_dir etc "$home" "$etc")"
+var="$(complete_dir var "$home" "$var")"
+run="$(complete_dir run "$home" "$run")"
+
+name="vm-$user"
+file="$name@.service"
+
+# Thinks that must be \-escaped:
+#
+# - $ (including in comments)
+# - \ (e.g., in line continuations)
+#
+cat <<EOF >"$file"
+# $file -- QEMU/KVM machine service template for systemd
+#
+# user: $user
+# group: $group
+# bin: $bin
+# etc: $etc
+# var: $var
+# run: $run
+#
+# To install:
+#
+# sudo cp $file /etc/systemd/system/
+# sudo chmod 644 /etc/systemd/system/$file
+#
+# cp ... $var/<machine>.img
+# nano $etc/<machine>.conf # Specify RAM, CPU, TAP, MAC, etc.
+#
+# sudo systemctl start $name@<machine>
+# sudo systemctl status $name@<machine>
+# login-machine $run/<machine>-con.sock
+# sudo systemctl stop $name@<machine>
+#
+# sudo systemctl enable $name@<machine>
+
+[Unit]
+Description=QEMU/KVM virtual machine %I
+
+Wants=network-online.target
+#After=network-online.target
+After=multi-user.target
+
+[Service]
+User=$user
+Group=$user
+UMask=0007
+WorkingDirectory=~
+
+Environment=CPU=1
+Environment=RAM=2G
+
+# These MUST be specific in EnvironmentFile!
+#
+#Environment=TAP=
+#Environment=MAC=
+
+# Note that using variable expansion in EnvironmentFile does not work (at
+# least not with systemd 229).
+#
+EnvironmentFile=$etc/%i.conf
+
+# Note that the first word of ExecStart cannot contain variable expansions.
+#
+ExecStart=$bin/vm-start \\
+ --cpu \${CPU} \\
+ --ram \${RAM} \\
+ --tap \${TAP} \\
+ --mac \${MAC} \\
+ --pid $run/%i.pid \\
+ --monitor $run/%i-mon.sock \\
+ --console $run/%i-con.sock \\
+ $var/%i.img
+
+ExecStop=$bin/vm-stop $run/%i.pid $run/%i-mon.sock
+
+# This makes sure systemd waits for the ExecStart command to exit rather
+# than killing it as soon as ExecStop exits (this is necessary since our
+# vm-stop may exit just before vm-start).
+#
+KillMode=none
+TimeoutStopSec=60
+
+[Install]
+WantedBy=multi-user.target
+EOF
+
+info "generated $file for"
+info " user: $user"
+info " group: $group"
+info " bin: $bin"
+info " etc: $etc"
+info " var: $var"
+info " run: $run"
diff --git a/etc/private/systemd-networkd/10-br0.netdev b/etc/private/systemd-networkd/10-br0.netdev
new file mode 100644
index 0000000..6431ba8
--- /dev/null
+++ b/etc/private/systemd-networkd/10-br0.netdev
@@ -0,0 +1,8 @@
+# Create a bridge network device.
+#
+# Use ethernet interface's MAC address as bridge MAC.
+
+[NetDev]
+Name=br0
+Kind=bridge
+MACAddress=02:11:11:11:11:11
diff --git a/etc/private/systemd-networkd/10-tap0.netdev b/etc/private/systemd-networkd/10-tap0.netdev
new file mode 100644
index 0000000..3989bd8
--- /dev/null
+++ b/etc/private/systemd-networkd/10-tap0.netdev
@@ -0,0 +1,12 @@
+# Create a tap network device.
+#
+# Set user/group to the user/group that will be using the tap
+# (e.g., the user that will run the VM that will use this tap).
+
+[NetDev]
+Name=tap0
+Kind=tap
+
+[Tap]
+#User=
+#Group=
diff --git a/etc/private/systemd-networkd/20-br0-eth0.network b/etc/private/systemd-networkd/20-br0-eth0.network
new file mode 100644
index 0000000..c57736f
--- /dev/null
+++ b/etc/private/systemd-networkd/20-br0-eth0.network
@@ -0,0 +1,12 @@
+# Add the ethernet interface to the bridge.
+#
+# Change eth0 to your ethernet interface name.
+
+[Match]
+Name=eth0
+
+[Network]
+Bridge=br0
+
+[Link]
+RequiredForOnline=no
diff --git a/etc/private/systemd-networkd/20-br0-tap0.network b/etc/private/systemd-networkd/20-br0-tap0.network
new file mode 100644
index 0000000..1c2c746
--- /dev/null
+++ b/etc/private/systemd-networkd/20-br0-tap0.network
@@ -0,0 +1,16 @@
+# Add the tap interface to the bridge.
+#
+# Note: do not assign MAC address to the tap interface, it's not the same
+# thing as the interface inside the VM (which is what we want to assign the
+# MAC address to).
+#
+
+[Match]
+Name=tap0
+
+[Network]
+Bridge=br0
+#ConfigureWithoutCarrier=yes
+
+[Link]
+RequiredForOnline=no
diff --git a/etc/private/systemd-networkd/30-br0-dhcp.network b/etc/private/systemd-networkd/30-br0-dhcp.network
new file mode 100644
index 0000000..211d870
--- /dev/null
+++ b/etc/private/systemd-networkd/30-br0-dhcp.network
@@ -0,0 +1,17 @@
+# Configure the bridge with IPv4 DHCP.
+
+[Match]
+Name=br0
+
+[Network]
+DHCP=ipv4
+IPForward=yes
+
+[DHCPv4]
+#UseHostname=yes
+
+#SendHostname=yes
+#Hostname=example.lan
+
+[Link]
+RequiredForOnline=yes
diff --git a/etc/private/systemd-networkd/README b/etc/private/systemd-networkd/README
new file mode 100644
index 0000000..48bb7cd
--- /dev/null
+++ b/etc/private/systemd-networkd/README
@@ -0,0 +1,106 @@
+This directory contains sample configuration files for setting up a bridge
+(br0) and a permanent tap interface (tap0) using systemd's networkd network
+manager. The tap interface can be used, for example, to run a virtual machine
+that appears as a real machine on the host's Ethernet network.
+
+Assumptions:
+
+ - The host uses Ethernet for networking.
+
+ - The host uses IPv4 DHCP for network configuration.
+
+Note: only perform the following steps over a physical login to the host since
+the configuration involves bringing the host's networking down.
+
+Note: commands that start with the `#` prompt must be executed as root.
+
+1. Switch to systemd-networkd for network configuration.
+
+Overall, the goal of this step is to disable the currently used network
+manager and enable systemd-networkd. First check if systemd-networkd is
+already used:
+
+# systemctl status systemd-networkd
+
+If it's enabled and running, skip to step 2. Otherwise, identify the currently
+used network manager. The possible options depend on the distribution used so
+consult the relevant documentation for details. One common option is the GNOME
+network manager:
+
+# systemctl status NetworkManager
+
+If it's enabled and running, stop and disable:
+
+# systemctl stop NetworkManager
+# systemctl disable NetworkManager
+
+For Debian-based distributions a common approach is to define the network
+configuration in the /etc/network/interfaces file. To disable this method,
+perform the following steps:
+
+# systemctl stop networking
+# mv /etc/network/interfaces /etc/network/interfaces.disabled
+
+Once the current network manager is disabled, proceed to step 2.
+
+
+2. Configure bridged networking using systemd-networkd.
+
+Copy configuration files found in this directory to /etc/systemd/network/ (see
+the comment at the beginning of each file for its purpose):
+
+# cp *.netdev *.network /etc/systemd/network/
+
+Note: if you are already using systemd-networkd, then you may already have
+some configuration in /etc/systemd/network/. If the existing configuration
+conflicts with this setup (for example, you already have a configuration for
+the Ethernet interface), then you will need to remove the relevant files.
+
+Then adjust the following to match your setup:
+
+ - Ethernet interface name if not eth0: 20-br0-eth0.network (both name and
+ content)
+
+ Use the following command to list all network interfaces:
+
+ # ip link show
+
+ - Bridge MAC address: 10-br0.netdev
+
+ Use your Ethernet interface's address as your bridge address, which
+ you can obtain with:
+
+ # ip link show eth0
+
+ - Tap user/group: 10-tap0.netdev
+
+ For example, set to the user/group that will run the VM that will use this
+ tap interface.
+
+
+3. Test and enable networking using systemd-networkd.
+
+Once the configuration is complete, start/restart systemd-networkd and verify
+networking is configured correctly.
+
+# systemctl restart systemd-networkd
+# systemctl status systemd-networkd
+# ip addr show br0
+# ip addr show tap0
+
+Specifically, verify that:
+
+ - The br0 MAC address is correct.
+
+ - The br0 interface is assigned (via DHCP) an IP address and, if a fixed
+ IP is used, it's what's expected.
+
+ - Try to ping example.org to confirm the overall network (routing, DNS)
+ is functional.
+
+If everything looks good, enable systemd-networkd:
+
+# systemctl enable systemd-networkd
+
+You may also want to reboot the host and performs the above verifications
+one more time.
diff --git a/etc/private/vm-gen-macaddress b/etc/private/vm-gen-macaddress
new file mode 100755
index 0000000..c13a993
--- /dev/null
+++ b/etc/private/vm-gen-macaddress
@@ -0,0 +1,60 @@
+#! /usr/bin/env bash
+
+# Generate a locally administered MAC address (LAA) number <num> based on the
+# specified universally administered address <mac> (UAA, for example, an
+# address corresponding to the host's physical Ethernet interface).
+#
+# Specifically, the resulting address is formed by combining the
+# LAA-conforming first octet with the subsequent five octets from <mac>:
+#
+# x[26ae]:xx:xx:xx:xx:xx
+#
+# The first octet is derived from <num> as follows:
+#
+# 0-15 : 02-f2
+# 16-31 : 06-f6
+# 32-47 : 0a-fa
+# 48-63 : 0e-fe
+#
+# For example, <num> can correspond to the interface number, such as tap0, for
+# which the resulting MAC address will be used.
+#
+usage="usage: $0 <mac> <num>"
+
+owd="$(pwd)"
+trap "{ cd '$owd'; exit 1; }" ERR
+set -o errtrace # Trap in functions.
+
+function info () { echo "$*" 1>&2; }
+function error () { info "$*"; exit 1; }
+
+if [ -z "$1" ]; then
+ error "$usage"
+fi
+
+o='[0-9a-fA-F]'
+mac="$(sed -nr -e "s/^$o$o:($o$o:$o$o:$o$o:$o$o:$o$o)$/\1/p" <<<"$1")"
+
+if [ -z "$mac" ]; then
+ error "invalid MAC address '$1'"
+fi
+
+if [ -z "$2" ]; then
+ error "$usage"
+fi
+
+num="$2"
+
+if (( num < 0 || num > 63 )); then
+ error "number '$num' is out of 0-63 range"
+fi
+
+if (( num < 16 )); then
+ printf "%x2:%s\n" $(( num )) "$mac"
+elif (( num < 32 )); then
+ printf "%x6:%s\n" $(( num - 16 )) "$mac"
+elif (( num < 48 )); then
+ printf "%xa:%s\n" $(( num - 32 )) "$mac"
+else
+ printf "%xe:%s\n" $(( num - 48 )) "$mac"
+fi
diff --git a/etc/private/vm-login b/etc/private/vm-login
new file mode 100755
index 0000000..28e8864
--- /dev/null
+++ b/etc/private/vm-login
@@ -0,0 +1,33 @@
+#! /usr/bin/env bash
+
+# Get virtual machine console (using screen).
+#
+# Note: use Ctrl-a k to exit screen (or Ctrl-a a k if running inside screen).
+#
+usage="usage: $0 <console-socket>"
+
+trap "{ exit 1; }" ERR
+set -o errtrace # Trap in functions.
+
+function info () { echo "$*" 1>&2; }
+function error () { info "$*"; exit 1; }
+
+con="$1"
+
+if [ -z "$con" ]; then
+ error "missing console socket"
+fi
+
+pty="$(dirname "$con")/$(basename -s .sock "$con").pty"
+
+socat "UNIX-CONNECT:$con" "PTY,link=$pty" &
+pid="$!"
+
+# Hack around terminal permission issue when running under `su - <user>`.
+#
+script -q -c "screen $pty" /dev/null
+
+# Note: socat may have already terminated (e.g., VM was shut down).
+#
+kill "$pid" 2>/dev/null || true
+wait
diff --git a/etc/private/vm-start b/etc/private/vm-start
new file mode 100755
index 0000000..41c4247
--- /dev/null
+++ b/etc/private/vm-start
@@ -0,0 +1,98 @@
+#! /usr/bin/env bash
+
+# file : etc/private/vm-start
+# license : MIT; see accompanying LICENSE file
+
+# Start the brep virtual machine (VM) for installing or running the previously
+# installed brep private instance. Must be executed on the host as brep user.
+#
+# Share with the VM the brep state directory as the 9p filesystem with the
+# passthrough security model enabled. This directory is expected to be owned
+# by the brep user and either contain the pkg repository maintained by the
+# brep instance or be empty, in which case the empty repository will be
+# automatically initialized.
+#
+# Note that you can signal to the VM to regenerate the repository on startup
+# (e.g., after package removal) by removing the packages.manifest file from
+# the repository.
+#
+# Options:
+#
+# --state <dir>
+#
+# State directory to share with the VM. If unspecified, $HOME/state is
+# assumed.
+#
+# --install <dir>
+#
+# Also share with the VM the install directory (contains the brep private
+# instance installation script and auxiliary files).
+#
+# Note that this script wraps the generic vm-start-base script and passes
+# through any arguments that follows these options to that script.
+#
+usage="usage: $0 [<options>] [<base-options>] <machine-img> [<extra-qemu-options>]"
+
+trap "{ exit 1; }" ERR
+set -o errtrace # Trap ERR in functions.
+
+function info () { echo "$*" 1>&2; }
+function error () { info "$*"; exit 1; }
+
+install=
+state="$HOME/state"
+
+while [ "$#" -gt 0 ]; do
+ case "$1" in
+ --install)
+ shift
+ install="${1%/}"
+ shift
+ ;;
+ --state)
+ shift
+ state="${1%/}"
+ shift
+ ;;
+ *)
+ break # The end of options is encountered.
+ ;;
+ esac
+done
+
+if [ "$#" -eq 0 ]; then
+ error "missing machine image"
+fi
+
+# Verify the state directory existence.
+#
+if [ ! -d "$state" ]; then
+ error "state directory '$state' does not exist or is not a directory"
+fi
+
+# Compute the start and QEMU options.
+#
+start_ops=()
+qemu_ops=(\
+ -fsdev "local,id=state,path=$state,security_model=passthrough" \
+ -device "virtio-9p-pci,fsdev=state,mount_tag=state")
+
+if [ -n "$install" ]; then
+
+ # Verify the toolchain install script existence in the install directory.
+ #
+ if [ ! -f "$(echo "$install"/build2-install-*.sh)" ]; then
+ error "missing toolchain installation script in '$install' directory"
+ fi
+
+ start_ops+=(--stdio)
+ qemu_ops+=(\
+ -fsdev "local,id=install,path=$install,security_model=passthrough" \
+ -device "virtio-9p-pci,fsdev=install,mount_tag=install")
+fi
+
+# Finally, forward execution to the base script.
+#
+scr_dir="$(dirname "$(realpath "${BASH_SOURCE[0]}")")"
+
+exec "$scr_dir/vm-start-base" "${start_ops[@]}" "$@" "${qemu_ops[@]}"
diff --git a/etc/private/vm-start-base b/etc/private/vm-start-base
new file mode 100755
index 0000000..4a81661
--- /dev/null
+++ b/etc/private/vm-start-base
@@ -0,0 +1,206 @@
+#! /usr/bin/env bash
+
+# Start a QEMU/KVM virtual machine.
+#
+# --cpu <num>
+# CPU hardware threads to allocate to the VM, 1 by default.
+#
+# --ram <num>
+# RAM to allocate to the VM, 2G by default (can be specified with G,
+# M suffixes).
+#
+# --tap <tap>
+# Existing tap interface to use instead of creating a new one.
+#
+# --mac <addr>
+# MAC address to use for the machine.
+#
+# --pid <path>
+# PID file path, /tmp/vm-<tap>.pid if unspecified.
+#
+# --monitor <path>
+# Monitor UNIX socket path, /tmp/vm-<tap>-mon.sock if unspecified.
+#
+# --console <path>
+# Console UNIX socket path, /tmp/vm-<tap>-con.sock if unspecified.
+#
+# --stdio
+# Connect both console and monitor to stdio (multiplexed). This disables
+# the creation of the monitor and console sockets.
+#
+# --stdio-monior
+# Connect only monitor to stdio. This disables the creation of the monitor
+# socket.
+#
+usage="usage: $0 [<options>] <vm-img> [<extra-qemu-options>]"
+
+trap "{ exit 1; }" ERR
+set -o errtrace # Trap in functions.
+
+function info () { echo "$*" 1>&2; }
+function error () { info "$*"; exit 1; }
+
+qemu=(qemu-system-x86_64 -enable-kvm)
+
+# The bridge is only used if we are cretaing the tap.
+#
+br=br0
+
+cpu=1
+ram=2G
+tap=
+mac="de:ad:be:ef:b8:da"
+pid=
+mon=
+con=
+stdio=
+stdio_monitor=
+
+while [ "$#" -gt 0 ]; do
+ case "$1" in
+ --cpu)
+ shift
+ cpu="$1"
+ shift
+ ;;
+ --ram)
+ shift
+ ram="$1"
+ shift
+ ;;
+ --tap)
+ shift
+ tap="$1"
+ shift
+ ;;
+ --mac)
+ shift
+ mac="$1"
+ shift
+ ;;
+ --pid)
+ shift
+ pid="$1"
+ shift
+ ;;
+ --monitor)
+ shift
+ mon="$1"
+ shift
+ ;;
+ --console)
+ shift
+ con="$1"
+ shift
+ ;;
+ --stdio)
+ stdio=true
+ stdio_monitor=
+ shift
+ ;;
+ --stdio-monitor)
+ stdio=
+ stdio_monitor=true
+ shift
+ ;;
+ *)
+ break
+ ;;
+ esac
+done
+
+img="$1"
+shift
+
+if [ -z "$img" ]; then
+ error "missing virtual machine image"
+fi
+
+if [ ! -f "$img" ]; then
+ error "virtual machine image '$img' does not exist"
+fi
+
+# Open the reading file descriptor and lock the machine image. Fail if unable
+# to lock.
+#
+# Note that the file descriptor is automatically closed on the script exit and
+# the lock is released.
+#
+exec {lfd}<"$img"
+
+if ! flock -n "$lfd"; then
+ error "virtual machine image is already in use"
+fi
+
+del_tap=
+if [ -z "$tap" ]; then
+ tap=tap9
+ sudo ip tuntap delete "$tap" mode tap || true
+ sudo ip tuntap add "$tap" mode tap user "$(whoami)"
+ sudo ip link set "$tap" up
+ #sleep 0.5s
+ sudo ip link set "$tap" master "$br"
+ del_tap=true
+fi
+
+if [ -z "$pid" ]; then
+ pid="/tmp/vm-$tap.pid"
+fi
+echo "$$" >"$pid"
+
+if [ -z "$mon" ]; then
+ mon="/tmp/vm-$tap-mon.sock"
+fi
+
+if [ -z "$con" ]; then
+ con="/tmp/vm-$tap-con.sock"
+fi
+
+ops=(\
+ -m "$ram" \
+ -cpu host -smp "$cpu,sockets=1,cores=$cpu,threads=1" \
+ \
+ -netdev "tap,id=net0,ifname=$tap,script=no" \
+ -device "virtio-net-pci,netdev=net0,mac=$mac" \
+ \
+ -drive "if=none,id=disk0,file=$img,format=raw" \
+ -device "virtio-blk-pci,scsi=off,drive=disk0" \
+ \
+ -nographic \
+)
+
+# Console/monitor options.
+#
+if [ "$stdio" ]; then
+ # Multiplex the monitor and serial console onto stdio. In particular, this
+ # makes sure Ctrl-c is passed to the guest (rather than termination the QEMU
+ # process). To switch between monitor and console, Ctrl-a,c (to terminate
+ # QEMU, type quit in the monitor).
+ #
+ ops+=(-serial mon:stdio)
+else
+ # Monitor.
+ #
+ if [ "$stdio_monitor" ]; then
+ ops+=(-chardev stdio,id=mon)
+ else
+ ops+=(-chardev "socket,id=mon,path=$mon,server,nowait")
+ fi
+
+ ops+=(-mon chardev=mon,mode=readline)
+
+ # Console.
+ #
+ ops+=(-chardev "socket,id=con,path=$con,server,nowait" \
+ -serial chardev:con)
+fi
+
+"${qemu[@]}" "${ops[@]}" -boot c "$@"
+
+if [ "$pid" -o "$mon" -o "$con" ]; then
+ rm -f "$pid" "$mon" "$con"
+fi
+
+if [ "$del_tap" ]; then
+ sudo ip tuntap delete "$tap" mode tap
+fi
diff --git a/etc/private/vm-stop b/etc/private/vm-stop
new file mode 100755
index 0000000..cf64dee
--- /dev/null
+++ b/etc/private/vm-stop
@@ -0,0 +1,37 @@
+#! /usr/bin/env bash
+
+# Stop virtual machine started with vm-start.
+#
+usage="usage: $0 <pid-file> <monitor-socket>"
+
+trap "{ exit 1; }" ERR
+set -o errtrace # Trap in functions.
+
+function info () { echo "$*" 1>&2; }
+function error () { info "$*"; exit 1; }
+
+
+if [ -z "$1" -o ! -f "$1" ]; then
+ error "missing or invalid PID file"
+fi
+
+pid="$(sed -nr -e 's/([0-9]+)/\1/p' "$1")"
+
+if [ -z "$pid" ]; then
+ error "PID file $1 does not contain valid PID"
+fi
+
+if [ -z "$2" -o ! -S "$2" ]; then
+ error "missing or invalid monitor socket"
+fi
+
+mon="$2"
+
+echo system_powerdown | socat - "UNIX-CONNECT:$mon" >/dev/null
+
+# An alternative way to implement this would be to connect a pipe to the
+# monitor socket and wait for it to be closed.
+#
+while [ -e "/proc/$pid" ]; do
+ sleep 0.2
+done
diff --git a/etc/proxy-apache2.conf b/etc/proxy-apache2.conf
new file mode 100644
index 0000000..fc7cfea
--- /dev/null
+++ b/etc/proxy-apache2.conf
@@ -0,0 +1,144 @@
+# Paste the following fragment into the <VirtualHost> section intended for
+# proxying HTTP(S) requests and caching the responses. See INSTALL-PROXY for
+# details.
+#
+# List of modules used:
+#
+# rewrite
+# headers
+# ssl
+# proxy
+# proxy_http
+# cache
+# cache_disk
+#
+
+ # Enable the rewrite rules functionality.
+ #
+ <IfModule !rewrite_module>
+ Error "rewrite_module is not enabled"
+ </IfModule>
+
+ RewriteEngine on
+ RewriteOptions AllowAnyURI
+
+ # Make sure that the HTTP header management functionality is enabled.
+ #
+ <IfModule !headers_module>
+ Error "headers_module is not enabled"
+ </IfModule>
+
+ # Enable the HTTP proxy.
+ #
+ <IfModule !proxy_module>
+ Error "proxy_module is not enabled"
+ </IfModule>
+
+ <IfModule !proxy_http_module>
+ Error "proxy_http_module is not enabled"
+ </IfModule>
+
+ ProxyRequests On
+
+ # Enable SSL/TLS API usage for querying HTTPS URLs.
+ #
+ <IfModule !ssl_module>
+ Error "ssl_module is not enabled"
+ </IfModule>
+
+ SSLProxyEngine on
+
+ # Optional: prevent non-authorized proxy usage, for example:
+ #
+ # <Proxy *>
+ # Require ip 10.5
+ # </Proxy>
+
+ # Accept only the HTTP GET method and respond with the 403 HTTP status
+ # code (Forbidden) for other methods.
+ #
+ RewriteCond %{REQUEST_METHOD} !GET
+ RewriteRule .* - [F]
+
+ # Optional: restrict the URL set allowed for proxying, for example:
+ #
+ # RewriteCond %{HTTP_HOST} !(.+\.)?example.org
+ # RewriteRule .* - [F]
+
+ # Convert the http scheme to https for URLs being proxied.
+ #
+ # To prevent the conversion we can exclude certain hosts. For example:
+ #
+ # RewriteCond %{HTTP_HOST} !(.+\.)?example.org [OR]
+ # RewriteCond %{HTTP_HOST} !(.+\.)?example.net
+ #
+ # Or check for a custom header value. Note that this header should not
+ # be forwarded to the origin server. For example:
+ #
+ # RewriteCond %{HTTP:X-Preserve-HTTP} !(1|on|true) [NC]
+ # RequestHeader unset X-Preserve-HTTP
+ #
+ RewriteRule ^proxy:http://(.*)$ "https://$1" [P]
+
+ # Enable the disk storage-based cache.
+ #
+ <IfModule !cache_module>
+ Error "cache_module is not enabled"
+ </IfModule>
+
+ <IfModule !cache_disk_module>
+ Error "cache_disk_module is not enabled"
+ </IfModule>
+
+ CacheEnable disk "http://"
+
+ # Specify the cache root directory and make sure it is writable by the
+ # user under which Apache2 is running.
+ #
+ # Note that if there are no other proxies enabled for the WEB server,
+ # you can probably specify (you still have to specify it) the default
+ # cache directory (/var/cache/apache2/mod_cache_disk for Debian/Ubuntu
+ # and /var/cache/httpd/proxy for Fedora/RHEL).
+ #
+ CacheRoot
+
+ # Cache entry maximum size (in bytes).
+ #
+ CacheMaxFileSize 100000000
+
+ # Prevent duplicate caching of responses for the same simultaneously
+ # proxied URL. Specify an appropriate per-URL lock timeout (in
+ # seconds) to avoid stalled downloads from keeping the entries
+ # uncached.
+ #
+ CacheLock on
+ CacheLockMaxAge 600
+
+ # Always validate an existing cache entry by querying the origin
+ # server.
+ #
+ # We do this by injecting the request header which always declares the
+ # existing cache entry as potentially stale (ignoring Expire response
+ # header and Cache-Control header's max-age field) which should also
+ # be propagated through all the upstream proxies forcing them to
+ # validate the resource freshness.
+ #
+ # Note that this relies on both the proxy and origin servers correctly
+ # supporting conditional requests based on entity tags (ETag HTTP
+ # response and If-None-Match HTTP request headers) or less accurate
+ # entity modification times (Last-Modified HTTP response and
+ # If-Modified-Since HTTP request headers), which is normally the case
+ # if both are running Apache. A proxy normally caches the ETag and/or
+ # Last-Modified response header values alongside the cached entity and
+ # adds If-None-Match and/or If-Modified-Since headers respectively to
+ # the entity validation request. An origin server normally checks if
+ # any of the ETag or Last-Modified headers changed for the entity and
+ # responds with its full content, if that's the case, or with the 304
+ # HTTP status code (Not Modified) otherwise (see the Apache Caching
+ # Guide for details).
+ #
+ # Also note that to observe the injected header the cache handler
+ # should not be configured as a quick handler.
+ #
+ RequestHeader set Cache-Control max-age=0
+ CacheQuickHandler off
diff --git a/etc/systemd/brep-clean.service b/etc/systemd/brep-clean.service
index 739a54a..d2e5630 100644
--- a/etc/systemd/brep-clean.service
+++ b/etc/systemd/brep-clean.service
@@ -1,5 +1,5 @@
[Unit]
-Description=brep build database cleaner service
+Description=brep build database and artifacts cleaner service
[Service]
Type=oneshot
@@ -7,9 +7,12 @@ Type=oneshot
#Group=brep
# Run both tenants and builds cleaners if CI request functionality is enabled.
+# Also run outdated build artifacts cleaners if build artifacts upload
+# functionality is enabled.
#
#ExecStart=/home/brep/install/bin/brep-clean tenants 240
ExecStart=/home/brep/install/bin/brep-clean builds /home/brep/config/buildtab
+#ExecStart=/home/brep/install/bin/brep-upload-bindist-clean /var/bindist 2880
[Install]
WantedBy=default.target
diff --git a/etc/systemd/brep-clean.timer b/etc/systemd/brep-clean.timer
index f4c587e..8e1e6e7 100644
--- a/etc/systemd/brep-clean.timer
+++ b/etc/systemd/brep-clean.timer
@@ -10,9 +10,9 @@ Unit=brep-clean.service
#
Persistent=false
-# Wait 20 seconds until the first run.
+# Wait 30 seconds until the first run.
#
-OnBootSec=20
+OnBootSec=30
# Then wait 5 minutes until the next run.
#
diff --git a/etc/systemd/brep-monitor.service b/etc/systemd/brep-monitor.service
new file mode 100644
index 0000000..0a5c25e
--- /dev/null
+++ b/etc/systemd/brep-monitor.service
@@ -0,0 +1,14 @@
+[Unit]
+Description=brep infrastructure monitor service
+
+[Service]
+Type=oneshot
+#User=brep
+#Group=brep
+
+# Replace the public toolchain name with a real list of toolchains.
+#
+ExecStart=/home/brep/install/bin/brep-monitor --report-timeout 86400 --clean /home/brep/config/brep-module.conf public
+
+[Install]
+WantedBy=default.target
diff --git a/etc/systemd/brep-monitor.timer b/etc/systemd/brep-monitor.timer
new file mode 100644
index 0000000..f5f5a64
--- /dev/null
+++ b/etc/systemd/brep-monitor.timer
@@ -0,0 +1,23 @@
+[Unit]
+Description=brep infrastructure monitor timer
+RefuseManualStart=no
+RefuseManualStop=no
+
+[Timer]
+Unit=brep-monitor.service
+
+# Don't keep track of the timer across reboots.
+#
+Persistent=false
+
+# Wait 40 seconds until the first run.
+#
+OnBootSec=40
+
+# Then wait 1 hour until the next run.
+#
+OnUnitInactiveSec=1h
+
+
+[Install]
+WantedBy=timers.target
diff --git a/libbrep/build-extra.sql b/libbrep/build-extra.sql
index 9ecbcb1..9e51a51 100644
--- a/libbrep/build-extra.sql
+++ b/libbrep/build-extra.sql
@@ -6,27 +6,62 @@
-- package-extra.sql file for details.
--
+DROP FOREIGN TABLE IF EXISTS build_package_config_bot_keys;
+
+DROP FOREIGN TABLE IF EXISTS build_package_config_auxiliaries;
+
+DROP FOREIGN TABLE IF EXISTS build_package_config_constraints;
+
+DROP FOREIGN TABLE IF EXISTS build_package_config_builds;
+
+DROP FOREIGN TABLE IF EXISTS build_package_configs;
+
+DROP FOREIGN TABLE IF EXISTS build_package_bot_keys;
+
+DROP FOREIGN TABLE IF EXISTS build_package_auxiliaries;
+
DROP FOREIGN TABLE IF EXISTS build_package_constraints;
DROP FOREIGN TABLE IF EXISTS build_package_builds;
+DROP FOREIGN TABLE IF EXISTS build_package_tests;
+
+DROP FOREIGN TABLE IF EXISTS build_package_requirement_alternative_requirements;
+
+DROP FOREIGN TABLE IF EXISTS build_package_requirement_alternatives;
+
+DROP FOREIGN TABLE IF EXISTS build_package_requirements;
+
DROP FOREIGN TABLE IF EXISTS build_package;
+DROP FOREIGN TABLE IF EXISTS build_public_key;
+
DROP FOREIGN TABLE IF EXISTS build_repository;
DROP FOREIGN TABLE IF EXISTS build_tenant;
-- The foreign table for build_tenant object.
--
---
CREATE FOREIGN TABLE build_tenant (
id TEXT NOT NULL,
- archived BOOLEAN NOT NULL)
+ private BOOLEAN NOT NULL,
+ interactive TEXT NULL,
+ archived BOOLEAN NOT NULL,
+ service_id TEXT NULL,
+ service_type TEXT NULL,
+ service_data TEXT NULL,
+ queued_timestamp BIGINT NULL,
+ toolchain_name TEXT OPTIONS (column_name 'build_toolchain_name') NULL,
+ toolchain_version_epoch INTEGER OPTIONS (column_name 'build_toolchain_version_epoch') NULL,
+ toolchain_version_canonical_upstream TEXT OPTIONS (column_name 'build_toolchain_version_canonical_upstream') NULL,
+ toolchain_version_canonical_release TEXT OPTIONS (column_name 'build_toolchain_version_canonical_release') NULL,
+ toolchain_version_revision INTEGER OPTIONS (column_name 'build_toolchain_version_revision') NULL,
+ toolchain_version_upstream TEXT OPTIONS (column_name 'build_toolchain_version_upstream') NULL,
+ toolchain_version_release TEXT OPTIONS (column_name 'build_toolchain_version_release') NULL)
SERVER package_server OPTIONS (table_name 'tenant');
-- The foreign table for build_repository object.
--
---
CREATE FOREIGN TABLE build_repository (
tenant TEXT NOT NULL,
canonical_name TEXT NOT NULL,
@@ -35,8 +70,15 @@ CREATE FOREIGN TABLE build_repository (
certificate_fingerprint TEXT NULL)
SERVER package_server OPTIONS (table_name 'repository');
--- The foreign table for build_package object.
+-- The foreign table for build_public_key object.
--
+CREATE FOREIGN TABLE build_public_key (
+ tenant TEXT NOT NULL,
+ fingerprint TEXT NOT NULL,
+ "data" TEXT NOT NULL)
+SERVER package_server OPTIONS (table_name 'public_key');
+
+-- The foreign table for build_package object.
--
CREATE FOREIGN TABLE build_package (
tenant TEXT NOT NULL,
@@ -47,14 +89,100 @@ CREATE FOREIGN TABLE build_package (
version_revision INTEGER NOT NULL,
version_upstream TEXT NOT NULL,
version_release TEXT NULL,
+ project CITEXT NOT NULL,
+ build_email TEXT NULL,
+ build_email_comment TEXT NULL,
+ build_warning_email TEXT NULL,
+ build_warning_email_comment TEXT NULL,
+ build_error_email TEXT NULL,
+ build_error_email_comment TEXT NULL,
internal_repository_tenant TEXT NULL,
internal_repository_canonical_name TEXT NULL,
- buildable BOOLEAN NOT NULL)
+ buildable BOOLEAN NOT NULL,
+ custom_bot BOOLEAN NULL)
SERVER package_server OPTIONS (table_name 'package');
--- The foreign table for the build_package object builds member (that is of a
+-- The foreign tables for the build_package object requirements member (that
+-- is of a 3-dimensional container type).
+--
+CREATE FOREIGN TABLE build_package_requirements (
+ tenant TEXT NOT NULL,
+ name CITEXT NOT NULL,
+ version_epoch INTEGER NOT NULL,
+ version_canonical_upstream TEXT NOT NULL,
+ version_canonical_release TEXT NOT NULL COLLATE "C",
+ version_revision INTEGER NOT NULL,
+ index BIGINT NOT NULL,
+ buildtime BOOLEAN NOT NULL,
+ comment TEXT NOT NULL)
+SERVER package_server OPTIONS (table_name 'package_requirements');
+
+CREATE FOREIGN TABLE build_package_requirement_alternatives (
+ tenant TEXT NOT NULL,
+ name CITEXT NOT NULL,
+ version_epoch INTEGER NOT NULL,
+ version_canonical_upstream TEXT NOT NULL,
+ version_canonical_release TEXT NOT NULL COLLATE "C",
+ version_revision INTEGER NOT NULL,
+ requirement_index BIGINT NOT NULL,
+ index BIGINT NOT NULL,
+ enable TEXT NULL,
+ reflect TEXT NULL)
+SERVER package_server OPTIONS (table_name 'package_requirement_alternatives');
+
+CREATE FOREIGN TABLE build_package_requirement_alternative_requirements (
+ tenant TEXT NOT NULL,
+ name CITEXT NOT NULL,
+ version_epoch INTEGER NOT NULL,
+ version_canonical_upstream TEXT NOT NULL,
+ version_canonical_release TEXT NOT NULL COLLATE "C",
+ version_revision INTEGER NOT NULL,
+ requirement_index BIGINT NOT NULL,
+ alternative_index BIGINT NOT NULL,
+ index BIGINT NOT NULL,
+ id TEXT NOT NULL)
+SERVER package_server OPTIONS (table_name 'package_requirement_alternative_requirements');
+
+-- The foreign table for the build_package object tests member (that is of a
-- container type).
--
+CREATE FOREIGN TABLE build_package_tests (
+ tenant TEXT NOT NULL,
+ name CITEXT NOT NULL,
+ version_epoch INTEGER NOT NULL,
+ version_canonical_upstream TEXT NOT NULL,
+ version_canonical_release TEXT NOT NULL COLLATE "C",
+ version_revision INTEGER NOT NULL,
+ index BIGINT NOT NULL,
+ test_name CITEXT NOT NULL,
+ test_min_version_epoch INTEGER NULL,
+ test_min_version_canonical_upstream TEXT NULL,
+ test_min_version_canonical_release TEXT NULL,
+ test_min_version_revision INTEGER NULL,
+ test_min_version_upstream TEXT NULL,
+ test_min_version_release TEXT NULL,
+ test_max_version_epoch INTEGER NULL,
+ test_max_version_canonical_upstream TEXT NULL,
+ test_max_version_canonical_release TEXT NULL,
+ test_max_version_revision INTEGER NULL,
+ test_max_version_upstream TEXT NULL,
+ test_max_version_release TEXT NULL,
+ test_min_open BOOLEAN NULL,
+ test_max_open BOOLEAN NULL,
+ test_package_tenant TEXT NULL,
+ test_package_name CITEXT NULL,
+ test_package_version_epoch INTEGER NULL,
+ test_package_version_canonical_upstream TEXT NULL,
+ test_package_version_canonical_release TEXT NULL COLLATE "C",
+ test_package_version_revision INTEGER NULL,
+ test_type TEXT NOT NULL,
+ test_buildtime BOOLEAN NOT NULL,
+ test_enable TEXT NULL,
+ test_reflect TEXT NULL)
+SERVER package_server OPTIONS (table_name 'package_tests');
+
+-- The foreign table for the build_package object builds member (that is of a
+-- container type).
--
CREATE FOREIGN TABLE build_package_builds (
tenant TEXT NOT NULL,
@@ -71,7 +199,6 @@ SERVER package_server OPTIONS (table_name 'package_builds');
-- The foreign table for the build_package object constraints member (that is
-- of a container type).
--
---
CREATE FOREIGN TABLE build_package_constraints (
tenant TEXT NOT NULL,
name CITEXT NOT NULL,
@@ -85,3 +212,111 @@ CREATE FOREIGN TABLE build_package_constraints (
target TEXT NULL,
comment TEXT NOT NULL)
SERVER package_server OPTIONS (table_name 'package_build_constraints');
+
+-- The foreign table for the build_package object auxiliaries member (that is
+-- of a container type).
+--
+CREATE FOREIGN TABLE build_package_auxiliaries (
+ tenant TEXT NOT NULL,
+ name CITEXT NOT NULL,
+ version_epoch INTEGER NOT NULL,
+ version_canonical_upstream TEXT NOT NULL,
+ version_canonical_release TEXT NOT NULL COLLATE "C",
+ version_revision INTEGER NOT NULL,
+ index BIGINT NOT NULL,
+ environment_name TEXT NOT NULL,
+ config TEXT NOT NULL,
+ comment TEXT NOT NULL)
+SERVER package_server OPTIONS (table_name 'package_build_auxiliaries');
+
+-- The foreign table for the build_package object bot_keys member (that is
+-- of a container type).
+--
+CREATE FOREIGN TABLE build_package_bot_keys (
+ tenant TEXT NOT NULL,
+ name CITEXT NOT NULL,
+ version_epoch INTEGER NOT NULL,
+ version_canonical_upstream TEXT NOT NULL,
+ version_canonical_release TEXT NOT NULL COLLATE "C",
+ version_revision INTEGER NOT NULL,
+ index BIGINT NOT NULL,
+ key_tenant TEXT NOT NULL,
+ key_fingerprint TEXT NOT NULL)
+SERVER package_server OPTIONS (table_name 'package_build_bot_keys');
+
+-- The foreign tables for the build_package object configs member (that is a
+-- container of values containing containers.
+--
+CREATE FOREIGN TABLE build_package_configs (
+ tenant TEXT NOT NULL,
+ name CITEXT NOT NULL,
+ version_epoch INTEGER NOT NULL,
+ version_canonical_upstream TEXT NOT NULL,
+ version_canonical_release TEXT NOT NULL COLLATE "C",
+ version_revision INTEGER NOT NULL,
+ index BIGINT NOT NULL,
+ config_name TEXT NOT NULL,
+ config_arguments TEXT NULL,
+ config_comment TEXT NOT NULL,
+ config_email TEXT NULL,
+ config_email_comment TEXT NULL,
+ config_warning_email TEXT NULL,
+ config_warning_email_comment TEXT NULL,
+ config_error_email TEXT NULL,
+ config_error_email_comment TEXT NULL)
+SERVER package_server OPTIONS (table_name 'package_build_configs');
+
+CREATE FOREIGN TABLE build_package_config_builds (
+ tenant TEXT NOT NULL,
+ name CITEXT NOT NULL,
+ version_epoch INTEGER NOT NULL,
+ version_canonical_upstream TEXT NOT NULL,
+ version_canonical_release TEXT NOT NULL COLLATE "C",
+ version_revision INTEGER NOT NULL,
+ config_index BIGINT NOT NULL,
+ index BIGINT NOT NULL,
+ expression TEXT NOT NULL,
+ comment TEXT NOT NULL)
+SERVER package_server OPTIONS (table_name 'package_build_config_builds');
+
+CREATE FOREIGN TABLE build_package_config_constraints (
+ tenant TEXT NOT NULL,
+ name CITEXT NOT NULL,
+ version_epoch INTEGER NOT NULL,
+ version_canonical_upstream TEXT NOT NULL,
+ version_canonical_release TEXT NOT NULL COLLATE "C",
+ version_revision INTEGER NOT NULL,
+ config_index BIGINT NOT NULL,
+ index BIGINT NOT NULL,
+ exclusion BOOLEAN NOT NULL,
+ config TEXT NOT NULL,
+ target TEXT NULL,
+ comment TEXT NOT NULL)
+SERVER package_server OPTIONS (table_name 'package_build_config_constraints');
+
+CREATE FOREIGN TABLE build_package_config_auxiliaries (
+ tenant TEXT NOT NULL,
+ name CITEXT NOT NULL,
+ version_epoch INTEGER NOT NULL,
+ version_canonical_upstream TEXT NOT NULL,
+ version_canonical_release TEXT NOT NULL COLLATE "C",
+ version_revision INTEGER NOT NULL,
+ config_index BIGINT NOT NULL,
+ index BIGINT NOT NULL,
+ environment_name TEXT NOT NULL,
+ config TEXT NOT NULL,
+ comment TEXT NOT NULL)
+SERVER package_server OPTIONS (table_name 'package_build_config_auxiliaries');
+
+CREATE FOREIGN TABLE build_package_config_bot_keys (
+ tenant TEXT NOT NULL,
+ name CITEXT NOT NULL,
+ version_epoch INTEGER NOT NULL,
+ version_canonical_upstream TEXT NOT NULL,
+ version_canonical_release TEXT NOT NULL COLLATE "C",
+ version_revision INTEGER NOT NULL,
+ config_index BIGINT NOT NULL,
+ index BIGINT NOT NULL,
+ key_tenant TEXT NOT NULL,
+ key_fingerprint TEXT NOT NULL)
+SERVER package_server OPTIONS (table_name 'package_build_config_bot_keys');
diff --git a/libbrep/build-package.hxx b/libbrep/build-package.hxx
index c288f07..9a9c277 100644
--- a/libbrep/build-package.hxx
+++ b/libbrep/build-package.hxx
@@ -1,11 +1,12 @@
// file : libbrep/build-package.hxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
#ifndef LIBBREP_BUILD_PACKAGE_HXX
#define LIBBREP_BUILD_PACKAGE_HXX
#include <odb/core.hxx>
+#include <odb/section.hxx>
+#include <odb/nested-container.hxx>
#include <libbrep/types.hxx>
#include <libbrep/utility.hxx>
@@ -22,16 +23,23 @@ namespace brep
//
// The mapping is established in build-extra.sql. We also explicitly mark
// non-primary key foreign-mapped members in the source object.
- //
+
// Foreign object that is mapped to a subset of the tenant object.
//
- #pragma db object table("build_tenant") pointer(shared_ptr) readonly
+ // Note: table created manually thus assign table name explicitly.
+ //
+ #pragma db object table("build_tenant") pointer(shared_ptr)
class build_tenant
{
public:
string id;
+ bool private_;
+ optional<string> interactive;
bool archived;
+ optional<tenant_service> service;
+ optional<timestamp> queued_timestamp;
+ optional<build_toolchain> toolchain;
// Database mapping.
//
@@ -44,6 +52,8 @@ namespace brep
// Foreign object that is mapped to a subset of the repository object.
//
+ // Note: table created manually thus assign table name explicitly.
+ //
#pragma db object table("build_repository") pointer(shared_ptr) readonly
class build_repository
{
@@ -69,39 +79,268 @@ namespace brep
build_repository (): canonical_name (id.canonical_name) {}
};
+ // Foreign object that is mapped to a subset of the public key object.
+ //
+ // Note: table created manually thus assign table name explicitly.
+ //
+ #pragma db object table("build_public_key") pointer(shared_ptr) readonly
+ class build_public_key: public string
+ {
+ public:
+ public_key_id id;
+
+ // Database mapping.
+ //
+ #pragma db member(id) id column("")
+
+ #pragma db member(data) virtual(string) access(this)
+
+ private:
+ friend class odb::access;
+ build_public_key () = default;
+ };
+
+ // build_package_config
+ //
+ using build_package_config =
+ build_package_config_template<lazy_shared_ptr<build_public_key>>;
+
+ using build_package_configs =
+ build_package_configs_template<lazy_shared_ptr<build_public_key>>;
+
+ #pragma db value(build_package_config) definition
+
+ #pragma db member(build_package_config::builds) transient
+ #pragma db member(build_package_config::constraints) transient
+ #pragma db member(build_package_config::auxiliaries) transient
+ #pragma db member(build_package_config::bot_keys) transient
+
+ // build_package_bot_keys
+ //
+ using build_package_bot_keys = vector<lazy_shared_ptr<build_public_key>>;
+ using build_package_bot_key_key = odb::nested_key<build_package_bot_keys>;
+
+ using build_package_bot_keys_map =
+ std::map<build_package_bot_key_key, lazy_shared_ptr<build_public_key>>;
+
+ #pragma db value(build_package_bot_key_key)
+ #pragma db member(build_package_bot_key_key::outer) column("config_index")
+ #pragma db member(build_package_bot_key_key::inner) column("index")
+
+ // Forward declarations.
+ //
+ class build_package;
+
+ // Build package dependency.
+ //
+ #pragma db value
+ struct build_dependency
+ {
+ package_name name;
+ optional<version_constraint> constraint;
+
+ lazy_shared_ptr<build_package> package;
+
+ // Database mapping.
+ //
+ #pragma db member(constraint) column("")
+ };
+
+ // Build package external test dependency.
+ //
+ #pragma db value
+ struct build_test_dependency: build_dependency
+ {
+ test_dependency_type type;
+ bool buildtime;
+ optional<string> enable;
+ optional<string> reflect;
+ };
+
// Foreign object that is mapped to a subset of the package object.
//
+ // Note: table created manually thus assign table name explicitly.
+ //
#pragma db object table("build_package") pointer(shared_ptr) readonly session
class build_package
{
public:
+ using requirements_type = brep::requirements;
+
package_id id;
upstream_version version;
+
+ package_name project;
+
+ optional<email> build_email;
+ optional<email> build_warning_email;
+ optional<email> build_error_email;
+
+ // Mapped to the package object requirements and tests members using the
+ // PostgreSQL foreign table mechanism.
+ //
+ requirements_type requirements;
+ small_vector<build_test_dependency, 1> tests;
+
+ odb::section requirements_tests_section;
+
lazy_shared_ptr<build_repository> internal_repository;
bool buildable;
+ optional<bool> custom_bot;
- // Mapped to the package object builds member using the PostgreSQL foreign
- // table mechanism.
+ // Mapped to the package object builds, build_constraints,
+ // build_auxiliaries, bot_keys, and build_configs members using the
+ // PostgreSQL foreign table mechanism.
//
- build_class_exprs builds;
+ build_class_exprs builds;
+ build_constraints constraints;
+ build_auxiliaries auxiliaries;
+ build_package_bot_keys bot_keys;
+ build_package_configs configs;
- // Mapped to the package object build_constraints member using the
- // PostgreSQL foreign table mechanism.
+ // Group the builds/constraints, auxiliaries, and bot_keys members of this
+ // object together with their respective nested configs entries into the
+ // separate sections for an explicit load. Note that the configs top-level
+ // members are loaded implicitly.
//
- build_constraints constraints;
+ odb::section constraints_section;
+ odb::section auxiliaries_section;
+ odb::section bot_keys_section;
+
+ bool
+ internal () const noexcept {return internal_repository != nullptr;}
// Database mapping.
//
#pragma db member(id) id column("")
#pragma db member(version) set(this.version.init (this.id.version, (?)))
- #pragma db member(builds) id_column("") value_column("")
- #pragma db member(constraints) id_column("") value_column("")
+
+ // requirements
+ //
+ // Note that this is a 2-level nested container (see package.hxx for
+ // details).
+ //
+
+ // Container of the requirement_alternatives values.
+ //
+ #pragma db member(requirements) id_column("") value_column("") \
+ section(requirements_tests_section)
+
+ // Container of the requirement_alternative values.
+ //
+ #pragma db member(requirement_alternatives) \
+ virtual(requirement_alternatives_map) \
+ after(requirements) \
+ get(odb::nested_get (this.requirements)) \
+ set(odb::nested_set (this.requirements, std::move (?))) \
+ id_column("") key_column("") value_column("") \
+ section(requirements_tests_section)
+
+ // Container of the requirement (string) values.
+ //
+ #pragma db member(requirement_alternative_requirements) \
+ virtual(requirement_alternative_requirements_map) \
+ after(requirement_alternatives) \
+ get(odb::nested2_get (this.requirements)) \
+ set(odb::nested2_set (this.requirements, std::move (?))) \
+ id_column("") key_column("") value_column("id") \
+ section(requirements_tests_section)
+
+ // tests
+ //
+ #pragma db member(tests) id_column("") value_column("test_") \
+ section(requirements_tests_section)
+
+ #pragma db member(requirements_tests_section) load(lazy) update(always)
+
+ // builds, constraints, auxiliaries, and bot_keys
+ //
+ #pragma db member(builds) id_column("") value_column("") \
+ section(constraints_section)
+
+ #pragma db member(constraints) id_column("") value_column("") \
+ section(constraints_section)
+
+ #pragma db member(auxiliaries) id_column("") value_column("") \
+ section(auxiliaries_section)
+
+ #pragma db member(bot_keys) id_column("") value_column("key_") \
+ section(bot_keys_section)
+
+ // configs
+ //
+ // Note that build_package_config::{builds,constraints,auxiliaries,bot_keys}
+ // are persisted/loaded via the separate nested containers (see
+ // commons.hxx for details).
+ //
+ #pragma db member(configs) id_column("") value_column("config_")
+
+ #pragma db member(config_builds) \
+ virtual(build_class_exprs_map) \
+ after(configs) \
+ get(odb::nested_get ( \
+ brep::build_package_config_builds (this.configs))) \
+ set(brep::build_package_config_builds bs; \
+ odb::nested_set (bs, std::move (?)); \
+ move (bs).to_configs (this.configs)) \
+ id_column("") key_column("") value_column("") \
+ section(constraints_section)
+
+ #pragma db member(config_constraints) \
+ virtual(build_constraints_map) \
+ after(config_builds) \
+ get(odb::nested_get ( \
+ brep::build_package_config_constraints (this.configs))) \
+ set(brep::build_package_config_constraints cs; \
+ odb::nested_set (cs, std::move (?)); \
+ move (cs).to_configs (this.configs)) \
+ id_column("") key_column("") value_column("") \
+ section(constraints_section)
+
+ #pragma db member(config_auxiliaries) \
+ virtual(build_auxiliaries_map) \
+ after(config_constraints) \
+ get(odb::nested_get ( \
+ brep::build_package_config_auxiliaries (this.configs))) \
+ set(brep::build_package_config_auxiliaries as; \
+ odb::nested_set (as, std::move (?)); \
+ move (as).to_configs (this.configs)) \
+ id_column("") key_column("") value_column("") \
+ section(auxiliaries_section)
+
+ #pragma db member(config_bot_keys) \
+ virtual(build_package_bot_keys_map) \
+ after(config_auxiliaries) \
+ get(odb::nested_get ( \
+ brep::build_package_config_bot_keys< \
+ lazy_shared_ptr<brep::build_public_key>> (this.configs))) \
+ set(brep::build_package_config_bot_keys< \
+ lazy_shared_ptr<brep::build_public_key>> bks; \
+ odb::nested_set (bks, std::move (?)); \
+ move (bks).to_configs (this.configs)) \
+ id_column("") key_column("") value_column("key_") \
+ section(bot_keys_section)
+
+ #pragma db member(constraints_section) load(lazy) update(always)
+ #pragma db member(auxiliaries_section) load(lazy) update(always)
+ #pragma db member(bot_keys_section) load(lazy) update(always)
private:
friend class odb::access;
build_package () = default;
};
+ #pragma db view object(build_package)
+ struct build_package_version
+ {
+ package_id id;
+ upstream_version version;
+
+ // Database mapping.
+ //
+ #pragma db member(version) set(this.version.init (this.id.version, (?)))
+ };
+
// Packages that can potentially be built.
//
// Note that ADL can't find the equal operator, so we use the function call
@@ -116,12 +355,13 @@ namespace brep
object(build_tenant: build_package::id.tenant == build_tenant::id)
struct buildable_package
{
- package_id id;
- upstream_version version;
+ shared_ptr<build_package> package;
- // Database mapping.
+ bool archived; // True if the tenant the package belongs to is archived.
+
+ // Present if the tenant the package belongs to is interactive.
//
- #pragma db member(version) set(this.version.init (this.id.version, (?)))
+ optional<string> interactive;
};
#pragma db view \
diff --git a/libbrep/build.cxx b/libbrep/build.cxx
index 45ef678..13f0818 100644
--- a/libbrep/build.cxx
+++ b/libbrep/build.cxx
@@ -1,5 +1,4 @@
// file : libbrep/build.cxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
#include <libbrep/build.hxx>
@@ -13,6 +12,7 @@ namespace brep
{
switch (s)
{
+ case build_state::queued: return "queued";
case build_state::building: return "building";
case build_state::built: return "built";
}
@@ -23,9 +23,10 @@ namespace brep
build_state
to_build_state (const string& s)
{
- if (s == "building") return build_state::building;
+ if (s == "queued") return build_state::queued;
+ else if (s == "building") return build_state::building;
else if (s == "built") return build_state::built;
- else throw invalid_argument ("invalid build state '" + s + "'");
+ else throw invalid_argument ("invalid build state '" + s + '\'');
}
// force_state
@@ -49,7 +50,7 @@ namespace brep
if (s == "unforced") return force_state::unforced;
else if (s == "forcing") return force_state::forcing;
else if (s == "forced") return force_state::forced;
- else throw invalid_argument ("invalid force state '" + s + "'");
+ else throw invalid_argument ("invalid force state '" + s + '\'');
}
// build
@@ -58,27 +59,194 @@ namespace brep
build (string tnt,
package_name_type pnm,
version pvr,
- string cfg,
+ target_triplet trg,
+ string tcf,
+ string pcf,
string tnm, version tvr,
+ optional<string> inr,
optional<string> afp, optional<string> ach,
- string mnm, string msm,
- butl::target_triplet trg)
+ build_machine mcn,
+ vector<build_machine> ams,
+ string ccs,
+ string mcs)
: id (package_id (move (tnt), move (pnm), pvr),
- move (cfg),
+ move (trg),
+ move (tcf),
+ move (pcf),
move (tnm), tvr),
tenant (id.package.tenant),
package_name (id.package.name),
package_version (move (pvr)),
- configuration (id.configuration),
+ target (id.target),
+ target_config_name (id.target_config_name),
+ package_config_name (id.package_config_name),
toolchain_name (id.toolchain_name),
toolchain_version (move (tvr)),
state (build_state::building),
+ interactive (move (inr)),
timestamp (timestamp_type::clock::now ()),
force (force_state::unforced),
agent_fingerprint (move (afp)), agent_challenge (move (ach)),
- machine (move (mnm)),
- machine_summary (move (msm)),
- target (move (trg))
+ machine (move (mcn)),
+ auxiliary_machines (move (ams)),
+ controller_checksum (move (ccs)),
+ machine_checksum (move (mcs))
+ {
+ }
+
+ build::
+ build (string tnt,
+ package_name_type pnm,
+ version pvr,
+ target_triplet trg,
+ string tcf,
+ string pcf,
+ string tnm, version tvr)
+ : id (package_id (move (tnt), move (pnm), pvr),
+ move (trg),
+ move (tcf),
+ move (pcf),
+ move (tnm), tvr),
+ tenant (id.package.tenant),
+ package_name (id.package.name),
+ package_version (move (pvr)),
+ target (id.target),
+ target_config_name (id.target_config_name),
+ package_config_name (id.package_config_name),
+ toolchain_name (id.toolchain_name),
+ toolchain_version (move (tvr)),
+ state (build_state::queued),
+ timestamp (timestamp_type::clock::now ()),
+ force (force_state::unforced)
+ {
+ }
+
+ build::
+ build (string tnt,
+ package_name_type pnm,
+ version pvr,
+ target_triplet trg,
+ string tcf,
+ string pcf,
+ string tnm, version tvr,
+ result_status rst,
+ operation_results ors,
+ build_machine mcn,
+ vector<build_machine> ams)
+ : id (package_id (move (tnt), move (pnm), pvr),
+ move (trg),
+ move (tcf),
+ move (pcf),
+ move (tnm), tvr),
+ tenant (id.package.tenant),
+ package_name (id.package.name),
+ package_version (move (pvr)),
+ target (id.target),
+ target_config_name (id.target_config_name),
+ package_config_name (id.package_config_name),
+ toolchain_name (id.toolchain_name),
+ toolchain_version (move (tvr)),
+ state (build_state::built),
+ timestamp (timestamp_type::clock::now ()),
+ force (force_state::unforced),
+ status (rst),
+ soft_timestamp (timestamp),
+ hard_timestamp (timestamp),
+ machine (move (mcn)),
+ auxiliary_machines (move (ams)),
+ results (move (ors))
+ {
+ }
+
+ build::
+ build (build&& b)
+ : id (move (b.id)),
+ tenant (id.package.tenant),
+ package_name (id.package.name),
+ package_version (move (b.package_version)),
+ target (id.target),
+ target_config_name (id.target_config_name),
+ package_config_name (id.package_config_name),
+ toolchain_name (id.toolchain_name),
+ toolchain_version (move (b.toolchain_version)),
+ state (b.state),
+ interactive (move (b.interactive)),
+ timestamp (b.timestamp),
+ force (b.force),
+ status (b.status),
+ soft_timestamp (b.soft_timestamp),
+ hard_timestamp (b.hard_timestamp),
+ agent_fingerprint (move (b.agent_fingerprint)),
+ agent_challenge (move (b.agent_challenge)),
+ machine (move (b.machine)),
+ auxiliary_machines (move (b.auxiliary_machines)),
+ auxiliary_machines_section (move (b.auxiliary_machines_section)),
+ results (move (b.results)),
+ results_section (move (b.results_section)),
+ controller_checksum (move (b.controller_checksum)),
+ machine_checksum (move (b.machine_checksum)),
+ agent_checksum (move (b.agent_checksum)),
+ worker_checksum (move (b.worker_checksum)),
+ dependency_checksum (move (b.dependency_checksum))
+ {
+ }
+
+ build& build::
+ operator= (build&& b)
+ {
+ if (this != &b)
+ {
+ id = move (b.id);
+ package_version = move (b.package_version);
+ toolchain_version = move (b.toolchain_version);
+ state = b.state;
+ interactive = move (b.interactive);
+ timestamp = b.timestamp;
+ force = b.force;
+ status = b.status;
+ soft_timestamp = b.soft_timestamp;
+ hard_timestamp = b.hard_timestamp;
+ agent_fingerprint = move (b.agent_fingerprint);
+ agent_challenge = move (b.agent_challenge);
+ machine = move (b.machine);
+ auxiliary_machines = move (b.auxiliary_machines);
+ auxiliary_machines_section = move (b.auxiliary_machines_section);
+ results = move (b.results);
+ results_section = move (b.results_section);
+ controller_checksum = move (b.controller_checksum);
+ machine_checksum = move (b.machine_checksum);
+ agent_checksum = move (b.agent_checksum);
+ worker_checksum = move (b.worker_checksum);
+ dependency_checksum = move (b.dependency_checksum);
+ }
+
+ return *this;
+ }
+
+ // build_delay
+ //
+ build_delay::
+ build_delay (string tnt,
+ package_name_type pnm, version pvr,
+ target_triplet trg,
+ string tcf,
+ string pcf,
+ string tnm, version tvr,
+ timestamp ptm)
+ : id (package_id (move (tnt), move (pnm), pvr),
+ move (trg),
+ move (tcf),
+ move (pcf),
+ move (tnm), tvr),
+ tenant (id.package.tenant),
+ package_name (id.package.name),
+ package_version (move (pvr)),
+ target (id.target),
+ target_config_name (id.target_config_name),
+ package_config_name (id.package_config_name),
+ toolchain_name (id.toolchain_name),
+ toolchain_version (move (tvr)),
+ package_timestamp (ptm)
{
}
}
diff --git a/libbrep/build.hxx b/libbrep/build.hxx
index fc9f675..af49c03 100644
--- a/libbrep/build.hxx
+++ b/libbrep/build.hxx
@@ -1,5 +1,4 @@
// file : libbrep/build.hxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
#ifndef LIBBREP_BUILD_HXX
@@ -10,28 +9,30 @@
#include <odb/core.hxx>
#include <odb/section.hxx>
-#include <libbutl/target-triplet.mxx>
-
-#include <libbbot/manifest.hxx>
-
#include <libbrep/types.hxx>
#include <libbrep/utility.hxx>
-// Must be included last (see assert in libbrep/common.hxx).
-//
#include <libbrep/common.hxx>
#include <libbrep/build-package.hxx>
+// Must be included after libbrep/common.hxx, so that the _version structure
+// get defined before libbpkg/manifest.hxx inclusion.
+//
+// Note that if we start using assert() in get/set expressions in this header,
+// we will have to redefine it for ODB compiler after all include directives
+// (see libbrep/common.hxx for details).
+//
+#include <libbbot/manifest.hxx>
+
// Used by the data migration entries.
//
-#define LIBBREP_BUILD_SCHEMA_VERSION_BASE 9
+#define LIBBREP_BUILD_SCHEMA_VERSION_BASE 20
-#pragma db model version(LIBBREP_BUILD_SCHEMA_VERSION_BASE, 9, closed)
+#pragma db model version(LIBBREP_BUILD_SCHEMA_VERSION_BASE, 27, closed)
-// We have to keep these mappings at the global scope instead of inside
-// the brep namespace because they need to be also effective in the
-// bbot namespace from which we "borrow" types (and some of them use the mapped
-// types).
+// We have to keep these mappings at the global scope instead of inside the
+// brep namespace because they need to be also effective in the bbot namespace
+// from which we "borrow" types (and some of them use the mapped types).
//
#pragma db map type(bbot::result_status) as(std::string) \
to(to_string (?)) \
@@ -43,14 +44,23 @@ namespace brep
struct build_id
{
package_id package;
- string configuration;
+ target_triplet target;
+ string target_config_name;
+ string package_config_name;
string toolchain_name;
canonical_version toolchain_version;
build_id () = default;
- build_id (package_id p, string c, string n, const brep::version& v)
+ build_id (package_id p,
+ target_triplet t,
+ string tc,
+ string pc,
+ string n,
+ const brep::version& v)
: package (move (p)),
- configuration (move (c)),
+ target (move (t)),
+ target_config_name (move (tc)),
+ package_config_name (move (pc)),
toolchain_name (move (n)),
toolchain_version (v) {}
};
@@ -61,7 +71,13 @@ namespace brep
if (x.package != y.package)
return x.package < y.package;
- if (int r = x.configuration.compare (y.configuration))
+ if (int r = x.target.compare (y.target))
+ return r < 0;
+
+ if (int r = x.target_config_name.compare (y.target_config_name))
+ return r < 0;
+
+ if (int r = x.package_config_name.compare (y.package_config_name))
return r < 0;
if (int r = x.toolchain_name.compare (y.toolchain_name))
@@ -70,7 +86,7 @@ namespace brep
return compare_version_lt (x.toolchain_version, y.toolchain_version, true);
}
- // These allow comparing objects that have package, configuration,
+ // These allow comparing objects that have package, configuration, target,
// toolchain_name, and toolchain_version data members to build_id values.
// The idea is that this works for both query members of build id types as
// well as for values of the build_id type.
@@ -78,35 +94,84 @@ namespace brep
template <typename T>
inline auto
operator== (const T& x, const build_id& y)
- -> decltype (x.package == y.package &&
- x.configuration == y.configuration &&
- x.toolchain_name == y.toolchain_name &&
+ -> decltype (x.package == y.package &&
+ x.target == y.target &&
+ x.target_config_name == y.target_config_name &&
+ x.package_config_name == y.package_config_name &&
+ x.toolchain_name == y.toolchain_name &&
x.toolchain_version.epoch == y.toolchain_version.epoch)
{
- return x.package == y.package &&
- x.configuration == y.configuration &&
- x.toolchain_name == y.toolchain_name &&
+ return x.package == y.package &&
+ x.target == y.target &&
+ x.target_config_name == y.target_config_name &&
+ x.package_config_name == y.package_config_name &&
+ x.toolchain_name == y.toolchain_name &&
compare_version_eq (x.toolchain_version, y.toolchain_version, true);
}
template <typename T>
inline auto
operator!= (const T& x, const build_id& y)
- -> decltype (x.package == y.package &&
- x.configuration == y.configuration &&
- x.toolchain_name == y.toolchain_name &&
+ -> decltype (x.package == y.package &&
+ x.target == y.target &&
+ x.target_config_name == y.target_config_name &&
+ x.package_config_name == y.package_config_name &&
+ x.toolchain_name == y.toolchain_name &&
x.toolchain_version.epoch == y.toolchain_version.epoch)
{
- return x.package != y.package ||
- x.configuration != y.configuration ||
- x.toolchain_name != y.toolchain_name ||
+ return x.package != y.package ||
+ x.target != y.target ||
+ x.target_config_name != y.target_config_name ||
+ x.package_config_name != y.package_config_name ||
+ x.toolchain_name != y.toolchain_name ||
compare_version_ne (x.toolchain_version, y.toolchain_version, true);
}
+ // Allow comparing the query members with the query parameters bound by
+ // reference to variables of the build id type (in particular in the
+ // prepared queries).
+ //
+ // Note that it is not operator==() since the query template parameter type
+ // can not be deduced from the function parameter types and needs to be
+ // specified explicitly.
+ //
+ template <typename T, typename ID>
+ inline auto
+ equal (const ID& x, const build_id& y, bool toolchain_version = true)
+ -> decltype (x.package.tenant == odb::query<T>::_ref (y.package.tenant) &&
+ x.package.name == odb::query<T>::_ref (y.package.name) &&
+ x.package.version.epoch ==
+ odb::query<T>::_ref (y.package.version.epoch) &&
+ x.target_config_name ==
+ odb::query<T>::_ref (y.target_config_name) &&
+ x.toolchain_name == odb::query<T>::_ref (y.toolchain_name) &&
+ x.toolchain_version.epoch ==
+ odb::query<T>::_ref (y.toolchain_version.epoch))
+ {
+ using query = odb::query<T>;
+
+ query r (equal<T> (x.package, y.package) &&
+ x.target == query::_ref (y.target) &&
+ x.target_config_name == query::_ref (y.target_config_name) &&
+ x.package_config_name == query::_ref (y.package_config_name) &&
+ x.toolchain_name == query::_ref (y.toolchain_name));
+
+ if (toolchain_version)
+ r = r && equal<T> (x.toolchain_version, y.toolchain_version);
+
+ return r;
+ }
+
// build_state
//
+ // The queued build state is semantically equivalent to a non-existent
+ // build. It is only used for those tenants, which have a third-party
+ // service associated that requires the `queued` notifications (see
+ // mod/tenant-service.hxx for background).
+ //
enum class build_state: std::uint8_t
{
+ queued,
building,
built
};
@@ -158,12 +223,6 @@ namespace brep
? bbot::to_result_status (*(?)) \
: brep::optional_result_status ())
- // target_triplet
- //
- #pragma db map type(butl::target_triplet) as(string) \
- to((?).string ()) \
- from(butl::target_triplet (?))
-
// operation_results
//
using bbot::operation_result;
@@ -171,6 +230,13 @@ namespace brep
using bbot::operation_results;
+ #pragma db value
+ struct build_machine
+ {
+ string name;
+ string summary;
+ };
+
#pragma db object pointer(shared_ptr) session
class build
{
@@ -179,29 +245,72 @@ namespace brep
using package_name_type = brep::package_name;
// Create the build object with the building state, non-existent status,
- // the timestamp set to now and the force state set to unforced.
+ // the timestamp set to now, and the force state set to unforced.
//
build (string tenant,
- package_name_type,
- version,
- string configuration,
+ package_name_type, version,
+ target_triplet,
+ string target_config_name,
+ string package_config_name,
string toolchain_name, version toolchain_version,
+ optional<string> interactive,
optional<string> agent_fingerprint,
optional<string> agent_challenge,
- string machine, string machine_summary,
- butl::target_triplet);
+ build_machine,
+ vector<build_machine> auxiliary_machines,
+ string controller_checksum,
+ string machine_checksum);
+
+ // Create the build object with the queued state.
+ //
+ build (string tenant,
+ package_name_type, version,
+ target_triplet,
+ string target_config_name,
+ string package_config_name,
+ string toolchain_name, version toolchain_version);
+
+ // Create the build object with the built state, the specified status and
+ // operation results, all the timestamps set to now, and the force state
+ // set to unforced.
+ //
+ build (string tenant,
+ package_name_type, version,
+ target_triplet,
+ string target_config_name,
+ string package_config_name,
+ string toolchain_name, version toolchain_version,
+ result_status,
+ operation_results,
+ build_machine,
+ vector<build_machine> auxiliary_machines = {});
+
+ // Move-only type.
+ //
+ build (build&&);
+ build& operator= (build&&);
+
+ build (const build&) = delete;
+ build& operator= (const build&) = delete;
build_id id;
string& tenant; // Tracks id.package.tenant.
package_name_type& package_name; // Tracks id.package.name.
upstream_version package_version; // Original of id.package.version.
- string& configuration; // Tracks id.configuration.
+ target_triplet& target; // Tracks id.target.
+ string& target_config_name; // Tracks id.target_config_name.
+ string& package_config_name; // Tracks id.package_config_name.
string& toolchain_name; // Tracks id.toolchain_name.
upstream_version toolchain_version; // Original of id.toolchain_version.
build_state state;
+ // If present, the login information for the interactive build. May be
+ // present only in the building state.
+ //
+ optional<string> interactive;
+
// Time of the last state change (the creation time initially).
//
timestamp_type timestamp;
@@ -213,14 +322,35 @@ namespace brep
//
optional<result_status> status;
+ // Times of the last soft/hard completed (re)builds. Used to decide when
+ // to perform soft and hard rebuilds, respectively.
+ //
+ // The soft timestamp is updated whenever we receive a task result.
+ //
+ // The hard timestamp is updated whenever we receive a task result with
+ // a status other than skip.
+ //
+ // Also note that whenever hard_timestamp is updated, soft_timestamp is
+ // updated as well and whenever soft_timestamp is updated, timestamp is
+ // updated as well. Thus the following condition is always true:
+ //
+ // hard_timestamp <= soft_timestamp <= timestamp
+ //
+ // Note that the "completed" above means that we may analyze the task
+ // result/log and deem it as not completed and proceed with automatic
+ // rebuild (the flake monitor idea).
+ //
+ timestamp_type soft_timestamp;
+ timestamp_type hard_timestamp;
+
// May be present only for the building state.
//
optional<string> agent_fingerprint;
optional<string> agent_challenge;
- string machine;
- string machine_summary;
- butl::target_triplet target;
+ build_machine machine;
+ vector<build_machine> auxiliary_machines;
+ odb::section auxiliary_machines_section;
// Note that the logs are stored as std::string/TEXT which is Ok since
// they are UTF-8 and our database is UTF-8.
@@ -228,6 +358,21 @@ namespace brep
operation_results results;
odb::section results_section;
+ // Checksums of entities involved in the build.
+ //
+ // Optional checksums are provided by the external entities (agent and
+ // worker). All are absent initially.
+ //
+ // Note that the agent checksum can also be absent after the hard rebuild
+ // task is issued and the worker and dependency checksums - after a failed
+ // rebuild (error result status or worse).
+ //
+ string controller_checksum;
+ string machine_checksum;
+ optional<string> agent_checksum;
+ optional<string> worker_checksum;
+ optional<string> dependency_checksum;
+
// Database mapping.
//
#pragma db member(id) id column("")
@@ -236,7 +381,9 @@ namespace brep
#pragma db member(package_name) transient
#pragma db member(package_version) \
set(this.package_version.init (this.id.package.version, (?)))
- #pragma db member(configuration) transient
+ #pragma db member(target) transient
+ #pragma db member(target_config_name) transient
+ #pragma db member(package_config_name) transient
#pragma db member(toolchain_name) transient
#pragma db member(toolchain_version) \
set(this.toolchain_version.init (this.id.toolchain_version, (?)))
@@ -245,24 +392,34 @@ namespace brep
//
#pragma db member(timestamp) index
+ #pragma db member(machine) transient
+
+ #pragma db member(machine_name) virtual(std::string) \
+ access(machine.name) column("machine")
+
+ #pragma db member(machine_summary) virtual(std::string) \
+ access(machine.summary)
+
+ #pragma db member(auxiliary_machines) id_column("") value_column("") \
+ section(auxiliary_machines_section)
+
+ #pragma db member(auxiliary_machines_section) load(lazy) update(always)
+
#pragma db member(results) id_column("") value_column("") \
section(results_section)
#pragma db member(results_section) load(lazy) update(always)
- build (const build&) = delete;
- build& operator= (const build&) = delete;
-
private:
friend class odb::access;
build ()
: tenant (id.package.tenant),
package_name (id.package.name),
- configuration (id.configuration),
- toolchain_name (id.toolchain_name)
- {
- }
+ target (id.target),
+ target_config_name (id.target_config_name),
+ package_config_name (id.package_config_name),
+ toolchain_name (id.toolchain_name) {}
};
// Note that ADL can't find the equal operator in join conditions, so we use
@@ -312,7 +469,7 @@ namespace brep
canonical_version version_;
};
- // Build of an existing buildable package.
+ // Builds of existing buildable packages.
//
#pragma db view \
object(build) \
@@ -323,6 +480,7 @@ namespace brep
struct package_build
{
shared_ptr<brep::build> build;
+ bool archived; // True if the tenant the build belongs to is archived.
};
#pragma db view \
@@ -341,6 +499,93 @@ namespace brep
//
#pragma db member(result) column("count(" + build::id.package.name + ")")
};
+
+ // Ids of existing buildable package builds.
+ //
+ #pragma db view object(build) \
+ object(build_package inner: \
+ brep::operator== (build::id.package, build_package::id) && \
+ build_package::buildable)
+ struct package_build_id
+ {
+ build_id id;
+
+ operator build_id& () {return id;}
+ };
+
+ // Used to track the package build delays since the last build or, if not
+ // present, since the first opportunity to build the package.
+ //
+ #pragma db object pointer(shared_ptr) session
+ class build_delay
+ {
+ public:
+ using package_name_type = brep::package_name;
+
+ // If toolchain version is empty, then the object represents a minimum
+ // delay across all versions of the toolchain.
+ //
+ build_delay (string tenant,
+ package_name_type, version,
+ target_triplet,
+ string target_config_name,
+ string package_config_name,
+ string toolchain_name, version toolchain_version,
+ timestamp package_timestamp);
+
+ build_id id;
+
+ string& tenant; // Tracks id.package.tenant.
+ package_name_type& package_name; // Tracks id.package.name.
+ upstream_version package_version; // Original of id.package.version.
+ target_triplet& target; // Tracks id.target.
+ string& target_config_name; // Tracks id.target_config_name.
+ string& package_config_name; // Tracks id.package_config_name.
+ string& toolchain_name; // Tracks id.toolchain_name.
+ upstream_version toolchain_version; // Original of id.toolchain_version.
+
+ // Times of the latest soft and hard rebuild delay reports. Initialized
+ // with timestamp_nonexistent by default.
+ //
+ // Note that both reports notify about initial build delays (at their
+ // respective time intervals).
+ //
+ timestamp report_soft_timestamp;
+ timestamp report_hard_timestamp;
+
+ // Time when the package is initially considered as buildable for this
+ // configuration and toolchain. It is used to track the build delay if the
+ // build object is absent (the first build task is not yet issued, the
+ // build is removed by brep-clean, etc).
+ //
+ timestamp package_timestamp;
+
+ // Database mapping.
+ //
+ #pragma db member(id) id column("")
+
+ #pragma db member(tenant) transient
+ #pragma db member(package_name) transient
+ #pragma db member(package_version) \
+ set(this.package_version.init (this.id.package.version, (?)))
+ #pragma db member(target) transient
+ #pragma db member(target_config_name) transient
+ #pragma db member(package_config_name) transient
+ #pragma db member(toolchain_name) transient
+ #pragma db member(toolchain_version) \
+ set(this.toolchain_version.init (this.id.toolchain_version, (?)))
+
+ private:
+ friend class odb::access;
+
+ build_delay ()
+ : tenant (id.package.tenant),
+ package_name (id.package.name),
+ target (id.target),
+ target_config_name (id.target_config_name),
+ package_config_name (id.package_config_name),
+ toolchain_name (id.toolchain_name) {}
+ };
}
#endif // LIBBREP_BUILD_HXX
diff --git a/libbrep/build.xml b/libbrep/build.xml
index 3ade7c8..1eba85a 100644
--- a/libbrep/build.xml
+++ b/libbrep/build.xml
@@ -1,5 +1,90 @@
<changelog xmlns="http://www.codesynthesis.com/xmlns/odb/changelog" database="pgsql" schema-name="build" version="1">
- <model version="9">
+ <changeset version="27"/>
+
+ <changeset version="26"/>
+
+ <changeset version="25">
+ <add-table name="build_auxiliary_machines" kind="container">
+ <column name="package_tenant" type="TEXT" null="false"/>
+ <column name="package_name" type="CITEXT" null="false"/>
+ <column name="package_version_epoch" type="INTEGER" null="false"/>
+ <column name="package_version_canonical_upstream" type="TEXT" null="false"/>
+ <column name="package_version_canonical_release" type="TEXT" null="false" options="COLLATE &quot;C&quot;"/>
+ <column name="package_version_revision" type="INTEGER" null="false"/>
+ <column name="target" type="TEXT" null="false"/>
+ <column name="target_config_name" type="TEXT" null="false"/>
+ <column name="package_config_name" type="TEXT" null="false"/>
+ <column name="toolchain_name" type="TEXT" null="false"/>
+ <column name="toolchain_version_epoch" type="INTEGER" null="false"/>
+ <column name="toolchain_version_canonical_upstream" type="TEXT" null="false"/>
+ <column name="toolchain_version_canonical_release" type="TEXT" null="false" options="COLLATE &quot;C&quot;"/>
+ <column name="toolchain_version_revision" type="INTEGER" null="false"/>
+ <column name="index" type="BIGINT" null="false"/>
+ <column name="name" type="TEXT" null="false"/>
+ <column name="summary" type="TEXT" null="false"/>
+ <foreign-key name="object_id_fk" on-delete="CASCADE">
+ <column name="package_tenant"/>
+ <column name="package_name"/>
+ <column name="package_version_epoch"/>
+ <column name="package_version_canonical_upstream"/>
+ <column name="package_version_canonical_release"/>
+ <column name="package_version_revision"/>
+ <column name="target"/>
+ <column name="target_config_name"/>
+ <column name="package_config_name"/>
+ <column name="toolchain_name"/>
+ <column name="toolchain_version_epoch"/>
+ <column name="toolchain_version_canonical_upstream"/>
+ <column name="toolchain_version_canonical_release"/>
+ <column name="toolchain_version_revision"/>
+ <references table="build">
+ <column name="package_tenant"/>
+ <column name="package_name"/>
+ <column name="package_version_epoch"/>
+ <column name="package_version_canonical_upstream"/>
+ <column name="package_version_canonical_release"/>
+ <column name="package_version_revision"/>
+ <column name="target"/>
+ <column name="target_config_name"/>
+ <column name="package_config_name"/>
+ <column name="toolchain_name"/>
+ <column name="toolchain_version_epoch"/>
+ <column name="toolchain_version_canonical_upstream"/>
+ <column name="toolchain_version_canonical_release"/>
+ <column name="toolchain_version_revision"/>
+ </references>
+ </foreign-key>
+ <index name="build_auxiliary_machines_object_id_i">
+ <column name="package_tenant"/>
+ <column name="package_name"/>
+ <column name="package_version_epoch"/>
+ <column name="package_version_canonical_upstream"/>
+ <column name="package_version_canonical_release"/>
+ <column name="package_version_revision"/>
+ <column name="target"/>
+ <column name="target_config_name"/>
+ <column name="package_config_name"/>
+ <column name="toolchain_name"/>
+ <column name="toolchain_version_epoch"/>
+ <column name="toolchain_version_canonical_upstream"/>
+ <column name="toolchain_version_canonical_release"/>
+ <column name="toolchain_version_revision"/>
+ </index>
+ <index name="build_auxiliary_machines_index_i">
+ <column name="index"/>
+ </index>
+ </add-table>
+ </changeset>
+
+ <changeset version="24"/>
+
+ <changeset version="23"/>
+
+ <changeset version="22"/>
+
+ <changeset version="21"/>
+
+ <model version="20">
<table name="build" kind="object">
<column name="package_tenant" type="TEXT" null="false"/>
<column name="package_name" type="CITEXT" null="false"/>
@@ -7,7 +92,9 @@
<column name="package_version_canonical_upstream" type="TEXT" null="false"/>
<column name="package_version_canonical_release" type="TEXT" null="false" options="COLLATE &quot;C&quot;"/>
<column name="package_version_revision" type="INTEGER" null="false"/>
- <column name="configuration" type="TEXT" null="false"/>
+ <column name="target" type="TEXT" null="false"/>
+ <column name="target_config_name" type="TEXT" null="false"/>
+ <column name="package_config_name" type="TEXT" null="false"/>
<column name="toolchain_name" type="TEXT" null="false"/>
<column name="toolchain_version_epoch" type="INTEGER" null="false"/>
<column name="toolchain_version_canonical_upstream" type="TEXT" null="false"/>
@@ -18,14 +105,21 @@
<column name="toolchain_version_upstream" type="TEXT" null="false"/>
<column name="toolchain_version_release" type="TEXT" null="true"/>
<column name="state" type="TEXT" null="false"/>
+ <column name="interactive" type="TEXT" null="true"/>
<column name="timestamp" type="BIGINT" null="false"/>
<column name="force" type="TEXT" null="false"/>
<column name="status" type="TEXT" null="true"/>
+ <column name="soft_timestamp" type="BIGINT" null="false"/>
+ <column name="hard_timestamp" type="BIGINT" null="false"/>
<column name="agent_fingerprint" type="TEXT" null="true"/>
<column name="agent_challenge" type="TEXT" null="true"/>
<column name="machine" type="TEXT" null="false"/>
<column name="machine_summary" type="TEXT" null="false"/>
- <column name="target" type="TEXT" null="false"/>
+ <column name="controller_checksum" type="TEXT" null="false"/>
+ <column name="machine_checksum" type="TEXT" null="false"/>
+ <column name="agent_checksum" type="TEXT" null="true"/>
+ <column name="worker_checksum" type="TEXT" null="true"/>
+ <column name="dependency_checksum" type="TEXT" null="true"/>
<primary-key>
<column name="package_tenant"/>
<column name="package_name"/>
@@ -33,7 +127,9 @@
<column name="package_version_canonical_upstream"/>
<column name="package_version_canonical_release"/>
<column name="package_version_revision"/>
- <column name="configuration"/>
+ <column name="target"/>
+ <column name="target_config_name"/>
+ <column name="package_config_name"/>
<column name="toolchain_name"/>
<column name="toolchain_version_epoch"/>
<column name="toolchain_version_canonical_upstream"/>
@@ -51,7 +147,9 @@
<column name="package_version_canonical_upstream" type="TEXT" null="false"/>
<column name="package_version_canonical_release" type="TEXT" null="false" options="COLLATE &quot;C&quot;"/>
<column name="package_version_revision" type="INTEGER" null="false"/>
- <column name="configuration" type="TEXT" null="false"/>
+ <column name="target" type="TEXT" null="false"/>
+ <column name="target_config_name" type="TEXT" null="false"/>
+ <column name="package_config_name" type="TEXT" null="false"/>
<column name="toolchain_name" type="TEXT" null="false"/>
<column name="toolchain_version_epoch" type="INTEGER" null="false"/>
<column name="toolchain_version_canonical_upstream" type="TEXT" null="false"/>
@@ -68,7 +166,9 @@
<column name="package_version_canonical_upstream"/>
<column name="package_version_canonical_release"/>
<column name="package_version_revision"/>
- <column name="configuration"/>
+ <column name="target"/>
+ <column name="target_config_name"/>
+ <column name="package_config_name"/>
<column name="toolchain_name"/>
<column name="toolchain_version_epoch"/>
<column name="toolchain_version_canonical_upstream"/>
@@ -81,7 +181,9 @@
<column name="package_version_canonical_upstream"/>
<column name="package_version_canonical_release"/>
<column name="package_version_revision"/>
- <column name="configuration"/>
+ <column name="target"/>
+ <column name="target_config_name"/>
+ <column name="package_config_name"/>
<column name="toolchain_name"/>
<column name="toolchain_version_epoch"/>
<column name="toolchain_version_canonical_upstream"/>
@@ -96,7 +198,9 @@
<column name="package_version_canonical_upstream"/>
<column name="package_version_canonical_release"/>
<column name="package_version_revision"/>
- <column name="configuration"/>
+ <column name="target"/>
+ <column name="target_config_name"/>
+ <column name="package_config_name"/>
<column name="toolchain_name"/>
<column name="toolchain_version_epoch"/>
<column name="toolchain_version_canonical_upstream"/>
@@ -107,5 +211,44 @@
<column name="index"/>
</index>
</table>
+ <table name="build_delay" kind="object">
+ <column name="package_tenant" type="TEXT" null="false"/>
+ <column name="package_name" type="CITEXT" null="false"/>
+ <column name="package_version_epoch" type="INTEGER" null="false"/>
+ <column name="package_version_canonical_upstream" type="TEXT" null="false"/>
+ <column name="package_version_canonical_release" type="TEXT" null="false" options="COLLATE &quot;C&quot;"/>
+ <column name="package_version_revision" type="INTEGER" null="false"/>
+ <column name="target" type="TEXT" null="false"/>
+ <column name="target_config_name" type="TEXT" null="false"/>
+ <column name="package_config_name" type="TEXT" null="false"/>
+ <column name="toolchain_name" type="TEXT" null="false"/>
+ <column name="toolchain_version_epoch" type="INTEGER" null="false"/>
+ <column name="toolchain_version_canonical_upstream" type="TEXT" null="false"/>
+ <column name="toolchain_version_canonical_release" type="TEXT" null="false" options="COLLATE &quot;C&quot;"/>
+ <column name="toolchain_version_revision" type="INTEGER" null="false"/>
+ <column name="package_version_upstream" type="TEXT" null="false"/>
+ <column name="package_version_release" type="TEXT" null="true"/>
+ <column name="toolchain_version_upstream" type="TEXT" null="false"/>
+ <column name="toolchain_version_release" type="TEXT" null="true"/>
+ <column name="report_soft_timestamp" type="BIGINT" null="false"/>
+ <column name="report_hard_timestamp" type="BIGINT" null="false"/>
+ <column name="package_timestamp" type="BIGINT" null="false"/>
+ <primary-key>
+ <column name="package_tenant"/>
+ <column name="package_name"/>
+ <column name="package_version_epoch"/>
+ <column name="package_version_canonical_upstream"/>
+ <column name="package_version_canonical_release"/>
+ <column name="package_version_revision"/>
+ <column name="target"/>
+ <column name="target_config_name"/>
+ <column name="package_config_name"/>
+ <column name="toolchain_name"/>
+ <column name="toolchain_version_epoch"/>
+ <column name="toolchain_version_canonical_upstream"/>
+ <column name="toolchain_version_canonical_release"/>
+ <column name="toolchain_version_revision"/>
+ </primary-key>
+ </table>
</model>
</changelog>
diff --git a/libbrep/buildfile b/libbrep/buildfile
index e649351..9a35a28 100644
--- a/libbrep/buildfile
+++ b/libbrep/buildfile
@@ -1,5 +1,4 @@
# file : libbrep/buildfile
-# copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
# license : MIT; see accompanying LICENSE file
define sql: file
diff --git a/libbrep/common-traits.hxx b/libbrep/common-traits.hxx
index 99002a2..141a738 100644
--- a/libbrep/common-traits.hxx
+++ b/libbrep/common-traits.hxx
@@ -1,5 +1,4 @@
// file : libbrep/common-traits.hxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
#ifndef LIBBREP_COMMON_TRAITS_HXX
@@ -11,12 +10,50 @@
#include <odb/pgsql/traits.hxx>
+#include <libbutl/target-triplet.hxx>
+
#include <libbpkg/package-name.hxx>
namespace odb
{
namespace pgsql
{
+ // target_triplet
+ //
+ template <>
+ class value_traits<butl::target_triplet, id_string>:
+ value_traits<std::string, id_string>
+ {
+ public:
+ using value_type = butl::target_triplet;
+ using query_type = butl::target_triplet;
+ using image_type = details::buffer;
+
+ using base_type = value_traits<std::string, id_string>;
+
+ static void
+ set_value (value_type& v,
+ const details::buffer& b,
+ std::size_t n,
+ bool is_null)
+ {
+ std::string s;
+ base_type::set_value (s, b, n, is_null);
+ v = !s.empty () ? value_type (s) : value_type ();
+ }
+
+ static void
+ set_image (details::buffer& b,
+ std::size_t& n,
+ bool& is_null,
+ const value_type& v)
+ {
+ base_type::set_image (b, n, is_null, v.string ());
+ }
+ };
+
+ // package_name
+ //
template <>
class value_traits<bpkg::package_name, id_string>:
value_traits<std::string, id_string>
diff --git a/libbrep/common.cxx b/libbrep/common.cxx
index b2429a6..4f729a3 100644
--- a/libbrep/common.cxx
+++ b/libbrep/common.cxx
@@ -1,5 +1,4 @@
// file : libbrep/common.cxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
#include <libbrep/common.hxx>
@@ -7,4 +6,30 @@
namespace brep
{
const version wildcard_version (0, "0", nullopt, nullopt, 0);
+
+ // unbuildable_reason
+ //
+ string
+ to_string (unbuildable_reason r)
+ {
+ switch (r)
+ {
+ case unbuildable_reason::stub: return "stub";
+ case unbuildable_reason::test: return "test";
+ case unbuildable_reason::external: return "external";
+ case unbuildable_reason::unbuildable: return "unbuildable";
+ }
+
+ return string (); // Should never reach.
+ }
+
+ unbuildable_reason
+ to_unbuildable_reason (const string& r)
+ {
+ if (r == "stub") return unbuildable_reason::stub;
+ else if (r == "test") return unbuildable_reason::test;
+ else if (r == "external") return unbuildable_reason::external;
+ else if (r == "unbuildable") return unbuildable_reason::unbuildable;
+ else throw invalid_argument ("invalid unbuildable reason '" + r + '\'');
+ }
}
diff --git a/libbrep/common.hxx b/libbrep/common.hxx
index 6dc4870..1433c8c 100644
--- a/libbrep/common.hxx
+++ b/libbrep/common.hxx
@@ -1,14 +1,19 @@
// file : libbrep/common.hxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
#ifndef LIBBREP_COMMON_HXX
#define LIBBREP_COMMON_HXX
+#include <map>
#include <ratio>
#include <chrono>
#include <type_traits> // static_assert
+#include <odb/query.hxx>
+#include <odb/nested-container.hxx>
+
+#include <libbutl/target-triplet.hxx>
+
#include <libbpkg/package-name.hxx>
#include <libbrep/types.hxx>
@@ -112,7 +117,7 @@ namespace brep
std::chrono::nanoseconds::period>::value,
"The following timestamp ODB mapping is invalid");
- // As it pointed out in libbutl/timestamp.mxx we will overflow in year 2262,
+ // As it pointed out in libbutl/timestamp.hxx we will overflow in year 2262,
// but by that time some larger basic type will be available for mapping.
//
#pragma db map type(timestamp) as(uint64_t) \
@@ -122,6 +127,20 @@ namespace brep
std::chrono::duration_cast<brep::timestamp::duration> ( \
std::chrono::nanoseconds (?))))
+ using optional_timestamp = optional<timestamp>;
+ using optional_uint64 = optional<uint64_t>;
+
+ #pragma db map type(optional_timestamp) as(brep::optional_uint64) \
+ to((?) \
+ ? std::chrono::duration_cast<std::chrono::nanoseconds> ( \
+ (?)->time_since_epoch ()).count () \
+ : brep::optional_uint64 ()) \
+ from((?) \
+ ? brep::timestamp ( \
+ std::chrono::duration_cast<brep::timestamp::duration> ( \
+ std::chrono::nanoseconds (*(?)))) \
+ : brep::optional_timestamp ())
+
// version
//
using bpkg::version;
@@ -227,6 +246,12 @@ namespace brep
//
extern const version wildcard_version;
+ // target_triplet
+ //
+ using butl::target_triplet;
+
+ #pragma db value(target_triplet) type("TEXT")
+
// package_name
//
using bpkg::package_name;
@@ -301,6 +326,19 @@ namespace brep
: tenant (move (t)), canonical_name (move (n)) {}
};
+ // public_key_id
+ //
+ #pragma db value
+ struct public_key_id
+ {
+ string tenant;
+ string fingerprint;
+
+ public_key_id () = default;
+ public_key_id (string t, string f)
+ : tenant (move (t)), fingerprint (move (f)) {}
+ };
+
// build_class_expr
//
using bpkg::build_class_expr;
@@ -322,15 +360,326 @@ namespace brep
#pragma db value(build_constraint) definition
+ // build_auxiliaries
+ //
+ using bpkg::build_auxiliary;
+ using build_auxiliaries = vector<build_auxiliary>;
+
+ #pragma db value(build_auxiliary) definition
+
+ // build_toolchain
+ //
+ #pragma db value
+ struct build_toolchain
+ {
+ string name;
+ brep::version version;
+ };
+
+ // email
+ //
+ using bpkg::email;
+
+ #pragma db value(email) definition
+ #pragma db member(email::value) virtual(string) before access(this) column("")
+
+ // build_package_config_template
+ //
+ using bpkg::build_package_config_template;
+
+ // 1 for the default configuration which is always present.
+ //
+ template <typename K>
+ using build_package_configs_template =
+ small_vector<build_package_config_template<K>, 1>;
+
+ // Return the address of the configuration object with the specified name,
+ // if present, and NULL otherwise.
+ //
+ template <typename K>
+ inline build_package_config_template<K>*
+ find (const string& name, build_package_configs_template<K>& cs)
+ {
+ auto i (find_if (cs.begin (), cs.end (),
+ [&name] (const build_package_config_template<K>& c)
+ {return c.name == name;}));
+
+ return i != cs.end () ? &*i : nullptr;
+ }
+
+ // Note that ODB doesn't support containers of value types which contain
+ // containers. Thus, we will persist/load
+ // build_package_config_template<K>::{builds,constraint,auxiliaries,bot_keys}
+ // via the separate nested containers using the adapter classes.
+ //
+
+ // build_package_config_template<K>::builds
+ //
+ using build_class_expr_key = odb::nested_key<build_class_exprs>;
+ using build_class_exprs_map = std::map<build_class_expr_key, build_class_expr>;
+
+ #pragma db value(build_class_expr_key)
+ #pragma db member(build_class_expr_key::outer) column("config_index")
+ #pragma db member(build_class_expr_key::inner) column("index")
+
+ // Adapter for build_package_config_template<K>::builds.
+ //
+ // Note: 1 as for build_package_configs_template.
+ //
+ class build_package_config_builds: public small_vector<build_class_exprs, 1>
+ {
+ public:
+ build_package_config_builds () = default;
+
+ template <typename K>
+ explicit
+ build_package_config_builds (const build_package_configs_template<K>& cs)
+ {
+ reserve (cs.size ());
+ for (const build_package_config_template<K>& c: cs)
+ push_back (c.builds);
+ }
+
+ template <typename K>
+ void
+ to_configs (build_package_configs_template<K>& cs) &&
+ {
+ // Note that the empty trailing entries will be missing (see ODB's
+ // nested-container.hxx for details).
+ //
+ assert (size () <= cs.size ());
+
+ auto i (cs.begin ());
+ for (build_class_exprs& ces: *this)
+ i++->builds = move (ces);
+ }
+ };
+
+ // build_package_config_template<K>::constraints
+ //
+ using build_constraint_key = odb::nested_key<build_constraints>;
+ using build_constraints_map = std::map<build_constraint_key, build_constraint>;
+
+ #pragma db value(build_constraint_key)
+ #pragma db member(build_constraint_key::outer) column("config_index")
+ #pragma db member(build_constraint_key::inner) column("index")
+
+ // Adapter for build_package_config_template<K>::constraints.
+ //
+ // Note: 1 as for build_package_configs_template.
+ //
+ class build_package_config_constraints:
+ public small_vector<build_constraints, 1>
+ {
+ public:
+ build_package_config_constraints () = default;
+
+ template <typename K>
+ explicit
+ build_package_config_constraints (
+ const build_package_configs_template<K>& cs)
+ {
+ reserve (cs.size ());
+ for (const build_package_config_template<K>& c: cs)
+ push_back (c.constraints);
+ }
+
+ template <typename K>
+ void
+ to_configs (build_package_configs_template<K>& cs) &&
+ {
+ // Note that the empty trailing entries will be missing (see ODB's
+ // nested-container.hxx for details).
+ //
+ assert (size () <= cs.size ());
+
+ auto i (cs.begin ());
+ for (build_constraints& bcs: *this)
+ i++->constraints = move (bcs);
+ }
+ };
+
+ // build_package_config_template<K>::auxiliaries
+ //
+ using build_auxiliary_key = odb::nested_key<build_auxiliaries>;
+ using build_auxiliaries_map = std::map<build_auxiliary_key, build_auxiliary>;
+
+ #pragma db value(build_auxiliary_key)
+ #pragma db member(build_auxiliary_key::outer) column("config_index")
+ #pragma db member(build_auxiliary_key::inner) column("index")
+
+ // Adapter for build_package_config_template<K>::auxiliaries.
+ //
+ // Note: 1 as for build_package_configs_template.
+ //
+ class build_package_config_auxiliaries:
+ public small_vector<build_auxiliaries, 1>
+ {
+ public:
+ build_package_config_auxiliaries () = default;
+
+ template <typename K>
+ explicit
+ build_package_config_auxiliaries (
+ const build_package_configs_template<K>& cs)
+ {
+ reserve (cs.size ());
+ for (const build_package_config_template<K>& c: cs)
+ push_back (c.auxiliaries);
+ }
+
+ template <typename K>
+ void
+ to_configs (build_package_configs_template<K>& cs) &&
+ {
+ // Note that the empty trailing entries will be missing (see ODB's
+ // nested-container.hxx for details).
+ //
+ assert (size () <= cs.size ());
+
+ auto i (cs.begin ());
+ for (build_auxiliaries& bas: *this)
+ i++->auxiliaries = move (bas);
+ }
+ };
+
+ // build_package_config_template<K>::bot_keys
+ //
+ // Adapter for build_package_config_template<K>::bot_keys.
+ //
+ // Note: 1 as for build_package_configs_template.
+ //
+ template <typename K>
+ class build_package_config_bot_keys: public small_vector<vector<K>, 1>
+ {
+ public:
+ build_package_config_bot_keys () = default;
+
+ explicit
+ build_package_config_bot_keys (const build_package_configs_template<K>& cs)
+ {
+ this->reserve (cs.size ());
+ for (const build_package_config_template<K>& c: cs)
+ this->push_back (c.bot_keys);
+ }
+
+ void
+ to_configs (build_package_configs_template<K>& cs) &&
+ {
+ // Note that the empty trailing entries will be missing (see ODB's
+ // nested-container.hxx for details).
+ //
+ assert (this->size () <= cs.size ());
+
+ auto i (cs.begin ());
+ for (vector<K>& bks: *this)
+ i++->bot_keys = move (bks);
+ }
+ };
+
+ // The primary reason why a package is unbuildable by the build bot
+ // controller service.
+ //
+ enum class unbuildable_reason: std::uint8_t
+ {
+ stub, // A stub, otherwise
+ test, // A separate test (built as part of primary), otherwise
+ external, // From an external repository, otherwise
+ unbuildable // From an internal unbuildable repository.
+ };
+
+ string
+ to_string (unbuildable_reason);
+
+ unbuildable_reason
+ to_unbuildable_reason (const string&); // May throw invalid_argument.
+
+ inline ostream&
+ operator<< (ostream& os, unbuildable_reason r) {return os << to_string (r);}
+
+ using optional_unbuildable_reason = optional<unbuildable_reason>;
+
+ #pragma db map type(unbuildable_reason) as(string) \
+ to(to_string (?)) \
+ from(brep::to_unbuildable_reason (?))
+
+ #pragma db map type(optional_unbuildable_reason) as(brep::optional_string) \
+ to((?) ? to_string (*(?)) : brep::optional_string ()) \
+ from((?) \
+ ? brep::to_unbuildable_reason (*(?)) \
+ : brep::optional_unbuildable_reason ()) \
+
+ // version_constraint
+ //
+ using bpkg::version_constraint;
+
+ #pragma db value(version_constraint) definition
+
+ // test_dependency_type
+ //
+ using bpkg::test_dependency_type;
+ using bpkg::to_test_dependency_type;
+
+ #pragma db map type(test_dependency_type) as(string) \
+ to(to_string (?)) \
+ from(brep::to_test_dependency_type (?))
+
+ // requirements
+ //
+ // Note that this is a 2-level nested container (see package.hxx for
+ // details).
+ //
+ using bpkg::requirement_alternative;
+ using bpkg::requirement_alternatives;
+ using requirements = vector<requirement_alternatives>;
+
+ #pragma db value(requirement_alternative) definition
+ #pragma db value(requirement_alternatives) definition
+
+ using requirement_alternative_key =
+ odb::nested_key<requirement_alternatives>;
+
+ using requirement_alternatives_map =
+ std::map<requirement_alternative_key, requirement_alternative>;
+
+ #pragma db value(requirement_alternative_key)
+ #pragma db member(requirement_alternative_key::outer) column("requirement_index")
+ #pragma db member(requirement_alternative_key::inner) column("index")
+
+ using requirement_key = odb::nested2_key<requirement_alternatives>;
+
+ using requirement_alternative_requirements_map =
+ std::map<requirement_key, string>;
+
+ #pragma db value(requirement_key)
+ #pragma db member(requirement_key::outer) column("requirement_index")
+ #pragma db member(requirement_key::middle) column("alternative_index")
+ #pragma db member(requirement_key::inner) column("index")
+
+ // Third-party service state which may optionally be associated with a
+ // tenant (see also mod/tenant-service.hxx for background).
+ //
+ #pragma db value
+ struct tenant_service
+ {
+ string id;
+ string type;
+ optional<string> data;
+
+ tenant_service () = default;
+
+ tenant_service (string i, string t, optional<string> d = nullopt)
+ : id (move (i)), type (move (t)), data (move (d)) {}
+ };
+
// Version comparison operators.
//
- // They allow comparing objects that have epoch, canonical_upstream,
- // canonical_release, and revision data members. The idea is that this
- // works for both query members of types version and canonical_version.
- // Note, though, that the object revisions should be comparable (both
- // optional, numeric, etc), so to compare version to query member or
- // canonical_version you may need to explicitly convert the version object
- // to canonical_version.
+ // Compare objects that have epoch, canonical_upstream, canonical_release,
+ // and revision data members. The idea is that this works for both query
+ // members of types version and canonical_version. Note, though, that the
+ // object revisions should be comparable (both optional, numeric, etc), so
+ // to compare version to query member or canonical_version you may need to
+ // explicitly convert the version object to canonical_version.
//
template <typename T1, typename T2>
inline auto
@@ -482,10 +831,9 @@ namespace brep
return compare_version_lt (x.version, y.version, true);
}
- // They allow comparing objects that have tenant, name, and version data
- // members. The idea is that this works for both query members of package id
- // types (in particular in join conditions) as well as for values of
- // package_id type.
+ // Compare objects that have tenant, name, and version data members. The
+ // idea is that this works for both query members of package id types (in
+ // particular in join conditions) as well as for values of package_id type.
//
template <typename T1, typename T2>
inline auto
@@ -511,6 +859,49 @@ namespace brep
compare_version_ne (x.version, y.version, true);
}
+ // Allow comparing the query members with the query parameters bound by
+ // reference to variables of the canonical version type (in particular in
+ // the prepared queries).
+ //
+ // Note that it is not operator==() since the query template parameter type
+ // can not be deduced from the function parameter types and needs to be
+ // specified explicitly.
+ //
+ template <typename T, typename V>
+ inline auto
+ equal (const V& x, const canonical_version& y)
+ -> decltype (x.epoch == odb::query<T>::_ref (y.epoch))
+ {
+ using query = odb::query<T>;
+
+ return x.epoch == query::_ref (y.epoch) &&
+ x.canonical_upstream == query::_ref (y.canonical_upstream) &&
+ x.canonical_release == query::_ref (y.canonical_release) &&
+ x.revision == query::_ref (y.revision);
+ }
+
+ // Allow comparing the query members with the query parameters bound by
+ // reference to variables of the package id type (in particular in the
+ // prepared queries).
+ //
+ // Note that it is not operator==() since the query template parameter type
+ // can not be deduced from the function parameter types and needs to be
+ // specified explicitly.
+ //
+ template <typename T, typename ID>
+ inline auto
+ equal (const ID& x, const package_id& y)
+ -> decltype (x.tenant == odb::query<T>::_ref (y.tenant) &&
+ x.name == odb::query<T>::_ref (y.name) &&
+ x.version.epoch == odb::query<T>::_ref (y.version.epoch))
+ {
+ using query = odb::query<T>;
+
+ return x.tenant == query::_ref (y.tenant) &&
+ x.name == query::_ref (y.name) &&
+ equal<T> (x.version, y.version);
+ }
+
// Repository id comparison operators.
//
inline bool
@@ -522,10 +913,10 @@ namespace brep
return x.canonical_name.compare (y.canonical_name) < 0;
}
- // They allow comparing objects that have tenant and canonical_name data
- // members. The idea is that this works for both query members of repository
- // id types (in particular in join conditions) as well as for values of
- // repository_id type.
+ // Compare objects that have tenant and canonical_name data members. The
+ // idea is that this works for both query members of repository id types (in
+ // particular in join conditions) as well as for values of repository_id
+ // type.
//
template <typename T1, typename T2>
inline auto
@@ -542,6 +933,38 @@ namespace brep
{
return x.tenant != y.tenant || x.canonical_name != y.canonical_name;
}
+
+ // Public key id comparison operators.
+ //
+ inline bool
+ operator< (const public_key_id& x, const public_key_id& y)
+ {
+ if (int r = x.tenant.compare (y.tenant))
+ return r < 0;
+
+ return x.fingerprint.compare (y.fingerprint) < 0;
+ }
+
+ // Compare objects that have tenant and fingerprint data members. The idea
+ // is that this works for both query members of public key id types (in
+ // particular in join conditions) as well as for values of public_key_id
+ // type.
+ //
+ template <typename T1, typename T2>
+ inline auto
+ operator== (const T1& x, const T2& y)
+ -> decltype (x.tenant == y.tenant && x.fingerprint == y.fingerprint)
+ {
+ return x.tenant == y.tenant && x.fingerprint == y.fingerprint;
+ }
+
+ template <typename T1, typename T2>
+ inline auto
+ operator!= (const T1& x, const T2& y)
+ -> decltype (x.tenant == y.tenant && x.fingerprint == y.fingerprint)
+ {
+ return x.tenant != y.tenant || x.fingerprint != y.fingerprint;
+ }
}
#endif // LIBBREP_COMMON_HXX
diff --git a/libbrep/database-lock.cxx b/libbrep/database-lock.cxx
index b295472..1b7f730 100644
--- a/libbrep/database-lock.cxx
+++ b/libbrep/database-lock.cxx
@@ -1,5 +1,4 @@
// file : libbrep/database-lock.cxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
#include <libbrep/database-lock.hxx>
diff --git a/libbrep/database-lock.hxx b/libbrep/database-lock.hxx
index 832010f..ab5441d 100644
--- a/libbrep/database-lock.hxx
+++ b/libbrep/database-lock.hxx
@@ -1,5 +1,4 @@
// file : libbrep/database-lock.hxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
#ifndef LIBBREP_DATABASE_LOCK_HXX
diff --git a/libbrep/odb.sh b/libbrep/odb.sh
index 9ee11fa..608ca41 100755
--- a/libbrep/odb.sh
+++ b/libbrep/odb.sh
@@ -53,7 +53,7 @@ $odb "${inc[@]}" -d pgsql --std c++14 --generate-query \
--hxx-prologue '#include <libbrep/common-traits.hxx>' \
-DLIBODB_BUILD2 -DLIBODB_PGSQL_BUILD2 \
--include-with-brackets --include-prefix libbrep \
- --guard-prefix LIBBREP \
+ --guard-prefix LIBBREP \
common.hxx
$odb "${inc[@]}" -d pgsql --std c++14 --generate-query --generate-schema \
@@ -74,7 +74,7 @@ $odb "${inc[@]}" -d pgsql --std c++14 --generate-query --generate-schema \
--odb-epilogue '#include <libbrep/wrapper-traits.hxx>' \
--generate-prepared -DLIBODB_BUILD2 -DLIBODB_PGSQL_BUILD2 \
--include-with-brackets --include-prefix libbrep \
- --guard-prefix LIBBREP \
+ --guard-prefix LIBBREP \
build.hxx
$odb "${inc[@]}" -d pgsql --std c++14 --generate-query \
diff --git a/libbrep/package-extra.sql b/libbrep/package-extra.sql
index fe936ff..5c04147 100644
--- a/libbrep/package-extra.sql
+++ b/libbrep/package-extra.sql
@@ -38,16 +38,17 @@ DROP TYPE IF EXISTS weighted_text CASCADE;
CREATE TYPE weighted_text AS (a TEXT, b TEXT, c TEXT, d TEXT);
-- Return the latest versions of matching a tenant internal packages as a set
--- of package rows. If tenant is NULL, then match all tenants.
+-- of package rows. If tenant is NULL, then match all public tenants.
--
CREATE FUNCTION
latest_packages(IN tenant TEXT)
RETURNS SETOF package AS $$
SELECT p1.*
- FROM package p1 LEFT JOIN package p2 ON (
+ FROM package p1
+ LEFT JOIN package p2 ON (
p1.internal_repository_canonical_name IS NOT NULL AND
- p1.tenant = p2.tenant AND
- p1.name = p2.name AND
+ p1.tenant = p2.tenant AND
+ p1.name = p2.name AND
p2.internal_repository_canonical_name IS NOT NULL AND
(p1.version_epoch < p2.version_epoch OR
p1.version_epoch = p2.version_epoch AND
@@ -56,8 +57,12 @@ RETURNS SETOF package AS $$
(p1.version_canonical_release < p2.version_canonical_release OR
p1.version_canonical_release = p2.version_canonical_release AND
p1.version_revision < p2.version_revision))))
+ JOIN tenant t ON (p1.tenant = t.id)
WHERE
- (latest_packages.tenant IS NULL OR p1.tenant = latest_packages.tenant) AND
+ CASE
+ WHEN latest_packages.tenant IS NULL THEN NOT t.private
+ ELSE p1.tenant = latest_packages.tenant
+ END AND
p1.internal_repository_canonical_name IS NOT NULL AND
p2.name IS NULL;
$$ LANGUAGE SQL STABLE;
@@ -83,7 +88,8 @@ $$ LANGUAGE SQL STABLE;
-- Search for the latest version of an internal packages matching the
-- specified search query and tenant. Return a set of rows containing the
-- package id and search rank. If query is NULL, then match all packages and
--- return 0 rank for all rows. If tenant is NULL, then match all tenants.
+-- return 0 rank for all rows. If tenant is NULL, then match all public
+-- tenants.
--
CREATE FUNCTION
search_latest_packages(IN query tsquery,
@@ -107,9 +113,9 @@ RETURNS SETOF record AS $$
$$ LANGUAGE SQL STABLE;
-- Search for packages matching the search query and tenant and having the
--- specified name. Return a set of rows containing the package id and search
+-- specified name. Return a set of rows containing the package id and search
-- rank. If query is NULL, then match all packages and return 0 rank for all
--- rows. If tenant is NULL, then match all tenants.
+-- rows. If tenant is NULL, then match all public tenants.
--
CREATE FUNCTION
search_packages(IN query tsquery,
@@ -121,19 +127,22 @@ search_packages(IN query tsquery,
OUT version_revision INTEGER,
OUT rank real)
RETURNS SETOF record AS $$
- SELECT tenant, name, version_epoch, version_canonical_upstream,
- version_canonical_release, version_revision,
+ SELECT p.tenant, p.name, p.version_epoch, p.version_canonical_upstream,
+ p.version_canonical_release, p.version_revision,
CASE
WHEN query IS NULL THEN 0
-- Weight mapping: D C B A
ELSE ts_rank_cd('{0.05, 0.2, 0.9, 1.0}', search_index, query)
END AS rank
- FROM package
+ FROM package p JOIN tenant t ON (p.tenant = t.id)
WHERE
- (search_packages.tenant IS NULL OR tenant = search_packages.tenant) AND
- name = search_packages.name AND
- internal_repository_canonical_name IS NOT NULL AND
- (query IS NULL OR search_index @@ query);
+ CASE
+ WHEN search_packages.tenant IS NULL THEN NOT t.private
+ ELSE p.tenant = search_packages.tenant
+ END AND
+ name = search_packages.name AND
+ internal_repository_canonical_name IS NOT NULL AND
+ (query IS NULL OR search_index @@ query);
$$ LANGUAGE SQL STABLE;
-- Parse weighted_text to tsvector.
diff --git a/libbrep/package-traits.cxx b/libbrep/package-traits.cxx
index 1b90483..d6d3525 100644
--- a/libbrep/package-traits.cxx
+++ b/libbrep/package-traits.cxx
@@ -1,5 +1,4 @@
// file : libbrep/package-traits.cxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
#include <libbrep/package-traits.hxx>
diff --git a/libbrep/package-traits.hxx b/libbrep/package-traits.hxx
index f30459a..f3efa20 100644
--- a/libbrep/package-traits.hxx
+++ b/libbrep/package-traits.hxx
@@ -1,5 +1,4 @@
// file : libbrep/package-traits.hxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
#ifndef LIBBREP_PACKAGE_TRAITS
diff --git a/libbrep/package.cxx b/libbrep/package.cxx
index e6c543d..4eb6fe8 100644
--- a/libbrep/package.cxx
+++ b/libbrep/package.cxx
@@ -1,5 +1,4 @@
// file : libbrep/package.cxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
#include <libbrep/package.hxx>
@@ -41,9 +40,15 @@ namespace brep
// tenant
//
tenant::
- tenant (string i)
+ tenant (string i,
+ bool p,
+ optional<string> r,
+ optional<tenant_service> s)
: id (move (i)),
- creation_timestamp (timestamp::clock::now ())
+ private_ (p),
+ interactive (move (r)),
+ creation_timestamp (timestamp::clock::now ()),
+ service (move (s))
{
}
@@ -59,13 +64,13 @@ namespace brep
license_alternatives_type la,
small_vector<string, 5> tp,
small_vector<string, 5> kw,
- optional<string> ds,
- optional<text_type> dt,
- string ch,
- optional<url_type> ur,
- optional<url_type> du,
- optional<url_type> su,
- optional<url_type> pu,
+ optional<typed_text> ds,
+ optional<typed_text> pds,
+ optional<typed_text> ch,
+ optional<manifest_url> ur,
+ optional<manifest_url> du,
+ optional<manifest_url> su,
+ optional<manifest_url> pu,
optional<email_type> em,
optional<email_type> pe,
optional<email_type> be,
@@ -73,11 +78,12 @@ namespace brep
optional<email_type> bee,
dependencies_type dp,
requirements_type rq,
- small_vector<dependency, 1> ts,
- small_vector<dependency, 1> es,
- small_vector<dependency, 1> bms,
+ small_vector<test_dependency, 1> ts,
build_class_exprs bs,
build_constraints_type bc,
+ build_auxiliaries_type ac,
+ package_build_bot_keys bk,
+ package_build_configs bcs,
optional<path> lc,
optional<string> fr,
optional<string> sh,
@@ -94,7 +100,7 @@ namespace brep
topics (move (tp)),
keywords (move (kw)),
description (move (ds)),
- description_type (move (dt)),
+ package_description (move (pds)),
changes (move (ch)),
url (move (ur)),
doc_url (move (du)),
@@ -108,29 +114,82 @@ namespace brep
dependencies (move (dp)),
requirements (move (rq)),
tests (move (ts)),
- examples (move (es)),
- benchmarks (move (bms)),
builds (move (bs)),
- build_constraints (!stub () ? move (bc) : build_constraints_type ()),
+ build_constraints (move (bc)),
+ build_auxiliaries (move (ac)),
+ build_bot_keys (move (bk)),
+ build_configs (move (bcs)),
internal_repository (move (rp)),
location (move (lc)),
fragment (move (fr)),
- sha256sum (move (sh)),
- buildable (!stub () && internal_repository->buildable)
+ sha256sum (move (sh))
{
+ // The default configuration is always added by the package manifest
+ // parser (see libbpkg/manifest.cxx for details).
+ //
+ assert (find ("default", build_configs) != nullptr);
+
+ if (stub ())
+ unbuildable_reason = brep::unbuildable_reason::stub;
+ else if (!internal_repository->buildable)
+ unbuildable_reason = brep::unbuildable_reason::unbuildable;
+
+ buildable = !unbuildable_reason;
+
+ // If the package is buildable deduce the custom_bot flag.
+ //
+ if (buildable)
+ {
+ for (const package_build_config& bc: build_configs)
+ {
+ bool custom (!bc.effective_bot_keys (build_bot_keys).empty ());
+
+ if (!custom_bot)
+ {
+ custom_bot = custom;
+ }
+ //
+ // If both the custom and default bots are used by the package, then
+ // reset the custom_bot flag to nullopt and bail out from the build
+ // package configurations loop.
+ //
+ else if (*custom_bot != custom)
+ {
+ custom_bot = nullopt;
+ break;
+ }
+ }
+ }
+
assert (internal_repository->internal);
}
package::
package (package_name nm,
version_type vr,
+ build_class_exprs bs,
+ build_constraints_type bc,
+ build_auxiliaries_type ac,
+ package_build_configs bcs,
shared_ptr<repository_type> rp)
: id (rp->tenant, move (nm), vr),
tenant (id.tenant),
name (id.name),
version (move (vr)),
- buildable (false)
+ builds (move (bs)),
+ build_constraints (move (bc)),
+ build_auxiliaries (move (ac)),
+ build_configs (move (bcs)),
+ buildable (false),
+ unbuildable_reason (stub ()
+ ? brep::unbuildable_reason::stub
+ : brep::unbuildable_reason::external)
{
+ // The default configuration is always added by the package manifest
+ // parser (see libbpkg/manifest.cxx for details).
+ //
+ assert (find ("default", build_configs) != nullptr);
+
assert (!rp->internal);
other_repositories.emplace_back (move (rp));
}
@@ -151,11 +210,11 @@ namespace brep
// Probably drop-box would be better as also tells what are
// the available internal repositories.
//
- string k (project.string () + " " + name.string () + " " +
- version.string () + " " + version.string (true));
+ string k (project.string () + ' ' + name.string () + ' ' +
+ version.string () + ' ' + version.string (true));
if (upstream_version)
- k += " " + *upstream_version;
+ k += ' ' + *upstream_version;
// Add licenses to search keywords.
//
@@ -163,13 +222,13 @@ namespace brep
{
for (const auto& l: la)
{
- k += " " + l;
+ k += ' ' + l;
// If license is say LGPLv2 then LGPL is also a search keyword.
//
size_t n (l.size ());
if (n > 2 && l[n - 2] == 'v' && l[n - 1] >= '0' && l[n - 1] <= '9')
- k += " " + string (l, 0, n - 2);
+ k += ' ' + string (l, 0, n - 2);
}
}
@@ -180,14 +239,24 @@ namespace brep
// Add topics to the second-strongest search keywords.
//
for (const auto& t: topics)
- k2 += " " + t;
+ k2 += ' ' + t;
// Add keywords to the second-strongest search keywords.
//
for (const auto& k: keywords)
- k2 += " " + k;
+ k2 += ' ' + k;
+
+ string d (description ? description->text : "");
+
+ if (package_description)
+ {
+ if (description)
+ d += ' ';
+
+ d += package_description->text;
+ }
- return {move (k), move (k2), description ? *description : "", changes};
+ return {move (k), move (k2), move (d), changes ? changes->text : ""};
}
// repository
diff --git a/libbrep/package.hxx b/libbrep/package.hxx
index 47f0ebe..45008d4 100644
--- a/libbrep/package.hxx
+++ b/libbrep/package.hxx
@@ -1,5 +1,4 @@
// file : libbrep/package.hxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
#ifndef LIBBREP_PACKAGE_HXX
@@ -19,9 +18,9 @@
// Used by the data migration entries.
//
-#define LIBBREP_PACKAGE_SCHEMA_VERSION_BASE 17
+#define LIBBREP_PACKAGE_SCHEMA_VERSION_BASE 27
-#pragma db model version(LIBBREP_PACKAGE_SCHEMA_VERSION_BASE, 17, closed)
+#pragma db model version(LIBBREP_PACKAGE_SCHEMA_VERSION_BASE, 33, closed)
namespace brep
{
@@ -50,9 +49,12 @@ namespace brep
using bpkg::text_type;
using bpkg::to_text_type;
+ // Note that here we assume that the saved string representation of a type
+ // is always recognized later.
+ //
#pragma db map type(text_type) as(string) \
to(to_string (?)) \
- from(brep::to_text_type (?))
+ from(*brep::to_text_type (?))
using optional_text_type = optional<text_type>;
@@ -60,36 +62,25 @@ namespace brep
to((?) ? to_string (*(?)) : brep::optional_string ()) \
from((?) ? brep::to_text_type (*(?)) : brep::optional_text_type ())
- // url
+ // manifest_url
//
- using bpkg::url;
+ using bpkg::manifest_url;
- #pragma db value(url) definition
- #pragma db member(url::value) virtual(string) before \
- get(this.string ()) \
- set(this = brep::url ((?), "" /* comment */)) \
+ #pragma db value(manifest_url) definition
+ #pragma db member(manifest_url::value) virtual(string) before \
+ get(this.string ()) \
+ set(this = brep::manifest_url ((?), "" /* comment */)) \
column("")
- // email
- //
- using bpkg::email;
-
- #pragma db value(email) definition
- #pragma db member(email::value) virtual(string) before access(this) column("")
-
// licenses
//
using bpkg::licenses;
- using license_alternatives = vector<licenses>;
+ using license_alternatives = small_vector<licenses, 1>;
#pragma db value(licenses) definition
// dependencies
//
- using bpkg::version_constraint;
-
- #pragma db value(version_constraint) definition
-
// Notes:
//
// 1. Will the package be always resolvable? What if it is in
@@ -141,8 +132,8 @@ namespace brep
package_name name;
optional<version_constraint> constraint;
- // Resolved dependency package. NULL if the repository load was shallow
- // and so the package dependencies are not resolved.
+ // Resolved dependency package. Can be NULL if the repository load was
+ // shallow and the package dependency could not be resolved.
//
lazy_shared_ptr<package_type> package;
@@ -161,26 +152,74 @@ namespace brep
operator!= (const dependency&, const dependency&);
#pragma db value
- class dependency_alternatives: public vector<dependency>
+ class dependency_alternative: public small_vector<dependency, 1>
+ {
+ public:
+ // While we currently don't use the reflect, prefer, accept, and require
+ // values, let's save them for completeness.
+ //
+ optional<string> enable;
+ optional<string> reflect;
+ optional<string> prefer;
+ optional<string> accept;
+ optional<string> require;
+
+ dependency_alternative () = default;
+ dependency_alternative (optional<string> e,
+ optional<string> r,
+ optional<string> p,
+ optional<string> a,
+ optional<string> q)
+ : enable (move (e)),
+ reflect (move (r)),
+ prefer (move (p)),
+ accept (move (a)),
+ require (move (q)) {}
+ };
+
+ #pragma db value
+ class dependency_alternatives: public small_vector<dependency_alternative, 1>
{
public:
- bool conditional;
bool buildtime;
string comment;
dependency_alternatives () = default;
- dependency_alternatives (bool d, bool b, string c)
- : conditional (d), buildtime (b), comment (move (c)) {}
+ dependency_alternatives (bool b, string c)
+ : buildtime (b), comment (move (c)) {}
};
using dependencies = vector<dependency_alternatives>;
- // requirements
+ // tests
//
- using bpkg::requirement_alternatives;
- using requirements = vector<requirement_alternatives>;
+ #pragma db value
+ struct test_dependency: dependency
+ {
+ test_dependency_type type;
+ bool buildtime;
+ optional<string> enable;
+ optional<string> reflect;
+
+ test_dependency () = default;
+ test_dependency (package_name n,
+ test_dependency_type t,
+ bool b,
+ optional<version_constraint> c,
+ optional<string> e,
+ optional<string> r)
+ : dependency {move (n), move (c), nullptr /* package */},
+ type (t),
+ buildtime (b),
+ enable (move (e)),
+ reflect (move (r))
+ {
+ }
- #pragma db value(requirement_alternatives) definition
+ // Database mapping.
+ //
+ #pragma db member(buildtime)
+ };
// certificate
//
@@ -202,17 +241,82 @@ namespace brep
// Create the tenant object with the timestamp set to now and the archived
// flag set to false.
//
- explicit
- tenant (string id);
+ tenant (string id,
+ bool private_,
+ optional<string> interactive,
+ optional<tenant_service>);
string id;
+ // If this flag is true, then display the packages in the web interface
+ // only in the tenant view mode.
+ //
+ bool private_; // Note: foreign-mapped in build.
+
+ // Interactive package build breakpoint.
+ //
+ // If present, then packages from this tenant will only be built
+ // interactively and only non-interactively otherwise.
+ //
+ optional<string> interactive; // Note: foreign-mapped in build.
+
timestamp creation_timestamp;
- bool archived = false; // Note: foreign-mapped in build.
+ bool archived = false; // Note: foreign-mapped in build.
+
+ optional<tenant_service> service; // Note: foreign-mapped in build.
+
+ // Note that due to the implementation complexity and performance
+ // considerations, the service notifications are not synchronized. This
+ // leads to a potential race, so that before we have sent the `queued`
+ // notification for a package build, some other thread (potentially in a
+ // different process) could have already sent the `building` notification
+ // for it. It feels like there is no easy way to reliably fix that.
+ // Instead, we just decrease the probability of such a notifications
+ // sequence failure by delaying builds of the freshly queued packages for
+ // some time. Specifically, whenever the `queued` notification is ought
+ // to be sent (normally out of the database transaction, since it likely
+ // sends an HTTP request, etc) the tenant's queued_timestamp member is set
+ // to the current time. During the configured time interval since that
+ // time point the build tasks may not be issued for the tenant's packages.
+ //
+ // Also note that while there are similar potential races for other
+ // notification sequences, their probability is rather low due to the
+ // natural reasons (non-zero build task execution time, etc) and thus we
+ // just ignore them.
+ //
+ optional<timestamp> queued_timestamp; // Note: foreign-mapped in build.
+
+ // Note that after the package tenant is created but before the first
+ // build object is created, there is no easy way to produce a list of
+ // unbuilt package configurations. That would require to know the build
+ // toolchain(s), which are normally extracted from the build objects.
+ // Thus, the empty unbuilt package configurations list is ambiguous and
+ // can either mean that no more package configurations can be built or
+ // that we have not enough information to produce the list. To
+ // disambiguate the empty list in the interface, in the latter case we
+ // want to display the question mark instead of 0 as an unbuilt package
+ // configurations count. To achieve this we will stash the build toolchain
+ // in the tenant when a package from this tenant is considered for a build
+ // for the first time but no configuration is picked for the build (the
+ // target configurations are excluded, an auxiliary machine is not
+ // available, etc). We will also use the stashed toolchain as a fallback
+ // until we are able to retrieve the toolchain(s) from the tenant builds
+ // to produce the unbuilt package configurations list.
+ //
+ // Note: foreign-mapped in build.
+ //
+ optional<brep::build_toolchain> build_toolchain;
// Database mapping.
//
#pragma db member(id) id
+ #pragma db member(private_)
+
+ #pragma db index("tenant_service_i") \
+ unique \
+ members(service.id, service.type)
+
+ #pragma db index member(service.id)
private:
friend class odb::access;
@@ -343,6 +447,67 @@ namespace brep
string d;
};
+ #pragma db value
+ struct typed_text
+ {
+ string text;
+ text_type type;
+
+ #pragma db member(text) column("")
+ };
+
+ // Tweak public_key_id mapping to include a constraint (this only affects the
+ // database schema).
+ //
+ #pragma db member(public_key_id::tenant) points_to(tenant)
+
+ #pragma db object pointer(shared_ptr) session
+ class public_key: public string
+ {
+ public:
+ public_key (string tenant, string fingerprint, string key)
+ : string (move (key)), id (move (tenant), move (fingerprint)) {}
+
+ public_key_id id;
+
+ // Database mapping.
+ //
+ #pragma db member(id) id column("")
+
+ #pragma db member(data) virtual(string) access(this)
+
+ private:
+ friend class odb::access;
+ public_key () = default;
+ };
+
+ // package_build_config
+ //
+ using package_build_config =
+ build_package_config_template<lazy_shared_ptr<public_key>>;
+
+ using package_build_configs =
+ build_package_configs_template<lazy_shared_ptr<public_key>>;
+
+ #pragma db value(package_build_config) definition
+
+ #pragma db member(package_build_config::builds) transient
+ #pragma db member(package_build_config::constraints) transient
+ #pragma db member(package_build_config::auxiliaries) transient
+ #pragma db member(package_build_config::bot_keys) transient
+
+ // package_build_bot_keys
+ //
+ using package_build_bot_keys = vector<lazy_shared_ptr<public_key>>;
+ using package_build_bot_key_key = odb::nested_key<package_build_bot_keys>;
+
+ using package_build_bot_keys_map = std::map<package_build_bot_key_key,
+ lazy_shared_ptr<public_key>>;
+
+ #pragma db value(package_build_bot_key_key)
+ #pragma db member(package_build_bot_key_key::outer) column("config_index")
+ #pragma db member(package_build_bot_key_key::inner) column("index")
+
// Tweak package_id mapping to include a constraint (this only affects the
// database schema).
//
@@ -357,14 +522,15 @@ namespace brep
using upstream_version_type = brep::upstream_version;
using priority_type = brep::priority;
using license_alternatives_type = brep::license_alternatives;
- using url_type = brep::url;
using email_type = brep::email;
using dependencies_type = brep::dependencies;
using requirements_type = brep::requirements;
using build_constraints_type = brep::build_constraints;
+ using build_auxiliaries_type = brep::build_auxiliaries;
- // Create internal package object. Note that for stubs the build
- // constraints are meaningless, and so not saved.
+ // Create internal package object.
+ //
+ // Note: the default build package config is expected to always be present.
//
package (package_name,
version_type,
@@ -375,13 +541,13 @@ namespace brep
license_alternatives_type,
small_vector<string, 5> topics,
small_vector<string, 5> keywords,
- optional<string> description,
- optional<text_type> description_type,
- string changes,
- optional<url_type>,
- optional<url_type> doc_url,
- optional<url_type> src_url,
- optional<url_type> package_url,
+ optional<typed_text> description,
+ optional<typed_text> package_description,
+ optional<typed_text> changes,
+ optional<manifest_url> url,
+ optional<manifest_url> doc_url,
+ optional<manifest_url> src_url,
+ optional<manifest_url> package_url,
optional<email_type>,
optional<email_type> package_email,
optional<email_type> build_email,
@@ -389,11 +555,12 @@ namespace brep
optional<email_type> build_error_email,
dependencies_type,
requirements_type,
- small_vector<dependency, 1> tests,
- small_vector<dependency, 1> examples,
- small_vector<dependency, 1> benchmarks,
+ small_vector<test_dependency, 1> tests,
build_class_exprs,
build_constraints_type,
+ build_auxiliaries_type,
+ package_build_bot_keys,
+ package_build_configs,
optional<path> location,
optional<string> fragment,
optional<string> sha256sum,
@@ -401,12 +568,28 @@ namespace brep
// Create external package object.
//
- // External repository packages can appear on the WEB interface only in
- // dependency list in the form of a link to the corresponding WEB page.
- // The only package information required to compose such a link is the
- // package name, version, and repository location.
+ // External package can appear on the WEB interface only in dependency
+ // list in the form of a link to the corresponding WEB page. The only
+ // package information required to compose such a link is the package name,
+ // version, and repository location.
+ //
+ // External package can also be a separate test for some primary package
+ // (and belong to a complement but yet external repository), and so we may
+ // need its build class expressions, constraints, and configurations to
+ // decide if to build it together with the primary package or not (see
+ // test-exclude task manifest value for details). Additionally, when the
+ // test package is being built the auxiliary machines may also be
+ // required.
//
- package (package_name name, version_type, shared_ptr<repository_type>);
+ // Note: the default build package config is expected to always be present.
+ //
+ package (package_name name,
+ version_type,
+ build_class_exprs,
+ build_constraints_type,
+ build_auxiliaries_type,
+ package_build_configs,
+ shared_ptr<repository_type>);
bool
internal () const noexcept {return internal_repository != nullptr;}
@@ -431,33 +614,53 @@ namespace brep
// Matches the package name if the project name is not specified in
// the manifest.
//
- package_name project;
+ package_name project; // Note: foreign-mapped in build.
priority_type priority;
string summary;
license_alternatives_type license_alternatives;
small_vector<string, 5> topics;
small_vector<string, 5> keywords;
- optional<string> description; // Absent if type is unknown.
- optional<text_type> description_type; // Present if description is present.
- string changes;
- optional<url_type> url;
- optional<url_type> doc_url;
- optional<url_type> src_url;
- optional<url_type> package_url;
+
+ // Note that the descriptions and changes are absent if the respective
+ // type is unknown.
+ //
+ optional<typed_text> description;
+ optional<typed_text> package_description;
+ optional<typed_text> changes;
+
+ optional<manifest_url> url;
+ optional<manifest_url> doc_url;
+ optional<manifest_url> src_url;
+ optional<manifest_url> package_url;
optional<email_type> email;
optional<email_type> package_email;
- optional<email_type> build_email;
- optional<email_type> build_warning_email;
- optional<email_type> build_error_email;
+ optional<email_type> build_email; // Note: foreign-mapped in build.
+ optional<email_type> build_warning_email; // Note: foreign-mapped in build.
+ optional<email_type> build_error_email; // Note: foreign-mapped in build.
dependencies_type dependencies;
- requirements_type requirements;
- small_vector<dependency, 1> tests;
- small_vector<dependency, 1> examples;
- small_vector<dependency, 1> benchmarks;
+ requirements_type requirements; // Note: foreign-mapped in build.
+ small_vector<test_dependency, 1> tests; // Note: foreign-mapped in build.
+ // Common build classes, constraints, auxiliaries, and bot keys that apply
+ // to all configurations unless overridden.
+ //
build_class_exprs builds; // Note: foreign-mapped in build.
build_constraints_type build_constraints; // Note: foreign-mapped in build.
+ build_auxiliaries_type build_auxiliaries; // Note: foreign-mapped in build.
+ package_build_bot_keys build_bot_keys; // Note: foreign-mapped in build.
+ package_build_configs build_configs; // Note: foreign-mapped in build.
+
+ // Group the build_configs, builds, and build_constraints members of this
+ // object together with their respective nested configs entries into the
+ // separate section for an explicit load.
+ //
+ // Note that while the build auxiliaries and bot keys are persisted via
+ // the newly created package objects, they are only used via the
+ // foreign-mapped build_package objects (see build-package.hxx for
+ // details). Thus, we add them to the never-loaded unused_section (see
+ // below).
+ //
odb::section build_section;
// Note that it is foreign-mapped in build.
@@ -479,16 +682,26 @@ namespace brep
vector<lazy_shared_ptr<repository_type>> other_repositories;
- // Whether the package is buildable by the build bot controller service.
- // Can only be true for non-stubs that belong to at least one buildable
- // (internal) repository.
+ // Whether the package is buildable by the build bot controller service
+ // and the reason if it's not.
//
// While we could potentially calculate this flag on the fly, that would
// complicate the database queries significantly.
//
- // Note: foreign-mapped in build.
+ bool buildable; // Note: foreign-mapped in build.
+ optional<brep::unbuildable_reason> unbuildable_reason;
+
+ // If this flag is true, then all the package configurations are buildable
+ // with the custom build bots. If false, then all configurations are
+ // buildable with the default bots. If nullopt, then some configurations
+ // are buildable with the custom and some with the default build bots.
//
- bool buildable;
+ // Note: meaningless if buildable is false.
+ //
+ optional<bool> custom_bot; // Note: foreign-mapped in build.
+
+ private:
+ odb::section unused_section;
// Database mapping.
//
@@ -524,48 +737,80 @@ namespace brep
// dependencies
//
- using _dependency_key = odb::nested_key<dependency_alternatives>;
+ // Note that this is a 2-level nested container which is mapped to three
+ // container tables each containing data of each dimension.
+
+ // Container of the dependency_alternatives values.
+ //
+ #pragma db member(dependencies) id_column("") value_column("")
+
+ // Container of the dependency_alternative values.
+ //
+ using _dependency_alternative_key =
+ odb::nested_key<dependency_alternatives>;
+
using _dependency_alternatives_type =
- std::map<_dependency_key, dependency>;
+ std::map<_dependency_alternative_key, dependency_alternative>;
- #pragma db value(_dependency_key)
- #pragma db member(_dependency_key::outer) column("dependency_index")
- #pragma db member(_dependency_key::inner) column("index")
+ #pragma db value(_dependency_alternative_key)
+ #pragma db member(_dependency_alternative_key::outer) column("dependency_index")
+ #pragma db member(_dependency_alternative_key::inner) column("index")
- #pragma db member(dependencies) id_column("") value_column("")
#pragma db member(dependency_alternatives) \
virtual(_dependency_alternatives_type) \
after(dependencies) \
get(odb::nested_get (this.dependencies)) \
set(odb::nested_set (this.dependencies, std::move (?))) \
+ id_column("") key_column("") value_column("")
+
+ // Container of the dependency values.
+ //
+ using _dependency_key = odb::nested2_key<dependency_alternatives>;
+ using _dependency_alternative_dependencies_type =
+ std::map<_dependency_key, dependency>;
+
+ #pragma db value(_dependency_key)
+ #pragma db member(_dependency_key::outer) column("dependency_index")
+ #pragma db member(_dependency_key::middle) column("alternative_index")
+ #pragma db member(_dependency_key::inner) column("index")
+
+ #pragma db member(dependency_alternative_dependencies) \
+ virtual(_dependency_alternative_dependencies_type) \
+ after(dependency_alternatives) \
+ get(odb::nested2_get (this.dependencies)) \
+ set(odb::nested2_set (this.dependencies, std::move (?))) \
id_column("") key_column("") value_column("dep_")
// requirements
//
- using _requirement_key = odb::nested_key<requirement_alternatives>;
- using _requirement_alternatives_type =
- std::map<_requirement_key, string>;
-
- #pragma db value(_requirement_key)
- #pragma db member(_requirement_key::outer) column("requirement_index")
- #pragma db member(_requirement_key::inner) column("index")
+ // Note that this is a 2-level nested container which is mapped to three
+ // container tables each containing data of each dimension.
+ // Container of the requirement_alternatives values.
+ //
#pragma db member(requirements) id_column("") value_column("")
+
+ // Container of the requirement_alternative values.
+ //
#pragma db member(requirement_alternatives) \
- virtual(_requirement_alternatives_type) \
+ virtual(requirement_alternatives_map) \
after(requirements) \
get(odb::nested_get (this.requirements)) \
set(odb::nested_set (this.requirements, std::move (?))) \
- id_column("") key_column("") value_column("id")
+ id_column("") key_column("") value_column("")
- // tests, examples, benchmarks
+ // Container of the requirement (string) values.
//
- // Seeing that these reuse the dependency types, we are also going to
- // have identical database mapping.
+ #pragma db member(requirement_alternative_requirements) \
+ virtual(requirement_alternative_requirements_map) \
+ after(requirement_alternatives) \
+ get(odb::nested2_get (this.requirements)) \
+ set(odb::nested2_set (this.requirements, std::move (?))) \
+ id_column("") key_column("") value_column("id")
+
+ // tests
//
- #pragma db member(tests) id_column("") value_column("dep_")
- #pragma db member(examples) id_column("") value_column("dep_")
- #pragma db member(benchmarks) id_column("") value_column("dep_")
+ #pragma db member(tests) id_column("") value_column("test_")
// builds
//
@@ -577,7 +822,74 @@ namespace brep
#pragma db member(build_constraints) id_column("") value_column("") \
section(build_section)
- #pragma db member(build_section) load(lazy) update(always)
+ // build_auxiliaries
+ //
+ #pragma db member(build_auxiliaries) id_column("") value_column("") \
+ section(unused_section)
+
+ // build_bot_keys
+ //
+ #pragma db member(build_bot_keys) \
+ id_column("") value_column("key_") value_not_null \
+ section(unused_section)
+
+ // build_configs
+ //
+ // Note that package_build_config::{builds,constraints,auxiliaries,
+ // bot_keys} are persisted/loaded via the separate nested containers (see
+ // commons.hxx for details).
+ //
+ #pragma db member(build_configs) id_column("") value_column("config_") \
+ section(build_section)
+
+ #pragma db member(build_config_builds) \
+ virtual(build_class_exprs_map) \
+ after(build_configs) \
+ get(odb::nested_get ( \
+ brep::build_package_config_builds (this.build_configs))) \
+ set(brep::build_package_config_builds bs; \
+ odb::nested_set (bs, std::move (?)); \
+ move (bs).to_configs (this.build_configs)) \
+ id_column("") key_column("") value_column("") \
+ section(build_section)
+
+ #pragma db member(build_config_constraints) \
+ virtual(build_constraints_map) \
+ after(build_config_builds) \
+ get(odb::nested_get ( \
+ brep::build_package_config_constraints (this.build_configs))) \
+ set(brep::build_package_config_constraints cs; \
+ odb::nested_set (cs, std::move (?)); \
+ move (cs).to_configs (this.build_configs)) \
+ id_column("") key_column("") value_column("") \
+ section(build_section)
+
+ #pragma db member(build_config_auxiliaries) \
+ virtual(build_auxiliaries_map) \
+ after(build_config_constraints) \
+ get(odb::nested_get ( \
+ brep::build_package_config_auxiliaries (this.build_configs))) \
+ set(brep::build_package_config_auxiliaries as; \
+ odb::nested_set (as, std::move (?)); \
+ move (as).to_configs (this.build_configs)) \
+ id_column("") key_column("") value_column("") \
+ section(unused_section)
+
+ #pragma db member(build_config_bot_keys) \
+ virtual(package_build_bot_keys_map) \
+ after(build_config_auxiliaries) \
+ get(odb::nested_get ( \
+ brep::build_package_config_bot_keys< \
+ lazy_shared_ptr<brep::public_key>> (this.build_configs))) \
+ set(brep::build_package_config_bot_keys< \
+ lazy_shared_ptr<brep::public_key>> bks; \
+ odb::nested_set (bks, std::move (?)); \
+ move (bks).to_configs (this.build_configs)) \
+ id_column("") key_column("") value_column("key_") value_not_null \
+ section(unused_section)
+
+ #pragma db member(build_section) load(lazy) update(always)
+ #pragma db member(unused_section) load(lazy) update(manual)
// other_repositories
//
@@ -595,9 +907,9 @@ namespace brep
friend class odb::access;
package (): tenant (id.tenant), name (id.name) {}
- // Save keywords, summary, description, and changes to weighted_text
- // a, b, c, d members, respectively. So a word found in keywords will
- // have a higher weight than if it's found in the summary.
+ // Save keywords, summary, descriptions, and changes to weighted_text a,
+ // b, c, d members, respectively. So a word found in keywords will have a
+ // higher weight than if it's found in the summary.
//
weighted_text
search_text () const;
diff --git a/libbrep/package.xml b/libbrep/package.xml
index 785ae0f..96e93a7 100644
--- a/libbrep/package.xml
+++ b/libbrep/package.xml
@@ -1,7 +1,280 @@
<changelog xmlns="http://www.codesynthesis.com/xmlns/odb/changelog" database="pgsql" schema-name="package" version="1">
- <model version="17">
+ <changeset version="33">
+ <add-table name="public_key" kind="object">
+ <column name="tenant" type="TEXT" null="false"/>
+ <column name="fingerprint" type="TEXT" null="false"/>
+ <column name="data" type="TEXT" null="false"/>
+ <primary-key>
+ <column name="tenant"/>
+ <column name="fingerprint"/>
+ </primary-key>
+ <foreign-key name="tenant_fk" deferrable="DEFERRED">
+ <column name="tenant"/>
+ <references table="tenant">
+ <column name="id"/>
+ </references>
+ </foreign-key>
+ </add-table>
+ <alter-table name="package">
+ <add-column name="custom_bot" type="BOOLEAN" null="true"/>
+ </alter-table>
+ <add-table name="package_build_bot_keys" kind="container">
+ <column name="tenant" type="TEXT" null="false"/>
+ <column name="name" type="CITEXT" null="false"/>
+ <column name="version_epoch" type="INTEGER" null="false"/>
+ <column name="version_canonical_upstream" type="TEXT" null="false"/>
+ <column name="version_canonical_release" type="TEXT" null="false" options="COLLATE &quot;C&quot;"/>
+ <column name="version_revision" type="INTEGER" null="false"/>
+ <column name="index" type="BIGINT" null="false"/>
+ <column name="key_tenant" type="TEXT" null="false"/>
+ <column name="key_fingerprint" type="TEXT" null="false"/>
+ <foreign-key name="tenant_fk" deferrable="DEFERRED">
+ <column name="tenant"/>
+ <references table="tenant">
+ <column name="id"/>
+ </references>
+ </foreign-key>
+ <foreign-key name="object_id_fk" on-delete="CASCADE">
+ <column name="tenant"/>
+ <column name="name"/>
+ <column name="version_epoch"/>
+ <column name="version_canonical_upstream"/>
+ <column name="version_canonical_release"/>
+ <column name="version_revision"/>
+ <references table="package">
+ <column name="tenant"/>
+ <column name="name"/>
+ <column name="version_epoch"/>
+ <column name="version_canonical_upstream"/>
+ <column name="version_canonical_release"/>
+ <column name="version_revision"/>
+ </references>
+ </foreign-key>
+ <index name="package_build_bot_keys_object_id_i">
+ <column name="tenant"/>
+ <column name="name"/>
+ <column name="version_epoch"/>
+ <column name="version_canonical_upstream"/>
+ <column name="version_canonical_release"/>
+ <column name="version_revision"/>
+ </index>
+ <index name="package_build_bot_keys_index_i">
+ <column name="index"/>
+ </index>
+ <foreign-key name="key_tenant_fk" deferrable="DEFERRED">
+ <column name="key_tenant"/>
+ <references table="tenant">
+ <column name="id"/>
+ </references>
+ </foreign-key>
+ <foreign-key name="key_fk" deferrable="DEFERRED">
+ <column name="key_tenant"/>
+ <column name="key_fingerprint"/>
+ <references table="public_key">
+ <column name="tenant"/>
+ <column name="fingerprint"/>
+ </references>
+ </foreign-key>
+ </add-table>
+ <add-table name="package_build_config_bot_keys" kind="container">
+ <column name="tenant" type="TEXT" null="false"/>
+ <column name="name" type="CITEXT" null="false"/>
+ <column name="version_epoch" type="INTEGER" null="false"/>
+ <column name="version_canonical_upstream" type="TEXT" null="false"/>
+ <column name="version_canonical_release" type="TEXT" null="false" options="COLLATE &quot;C&quot;"/>
+ <column name="version_revision" type="INTEGER" null="false"/>
+ <column name="config_index" type="BIGINT" null="false"/>
+ <column name="index" type="BIGINT" null="false"/>
+ <column name="key_tenant" type="TEXT" null="false"/>
+ <column name="key_fingerprint" type="TEXT" null="false"/>
+ <foreign-key name="tenant_fk" deferrable="DEFERRED">
+ <column name="tenant"/>
+ <references table="tenant">
+ <column name="id"/>
+ </references>
+ </foreign-key>
+ <foreign-key name="object_id_fk" on-delete="CASCADE">
+ <column name="tenant"/>
+ <column name="name"/>
+ <column name="version_epoch"/>
+ <column name="version_canonical_upstream"/>
+ <column name="version_canonical_release"/>
+ <column name="version_revision"/>
+ <references table="package">
+ <column name="tenant"/>
+ <column name="name"/>
+ <column name="version_epoch"/>
+ <column name="version_canonical_upstream"/>
+ <column name="version_canonical_release"/>
+ <column name="version_revision"/>
+ </references>
+ </foreign-key>
+ <index name="package_build_config_bot_keys_object_id_i">
+ <column name="tenant"/>
+ <column name="name"/>
+ <column name="version_epoch"/>
+ <column name="version_canonical_upstream"/>
+ <column name="version_canonical_release"/>
+ <column name="version_revision"/>
+ </index>
+ <foreign-key name="key_tenant_fk" deferrable="DEFERRED">
+ <column name="key_tenant"/>
+ <references table="tenant">
+ <column name="id"/>
+ </references>
+ </foreign-key>
+ <foreign-key name="key_fk" deferrable="DEFERRED">
+ <column name="key_tenant"/>
+ <column name="key_fingerprint"/>
+ <references table="public_key">
+ <column name="tenant"/>
+ <column name="fingerprint"/>
+ </references>
+ </foreign-key>
+ </add-table>
+ </changeset>
+
+ <changeset version="32">
+ <alter-table name="tenant">
+ <add-column name="build_toolchain_name" type="TEXT" null="true"/>
+ <add-column name="build_toolchain_version_epoch" type="INTEGER" null="true"/>
+ <add-column name="build_toolchain_version_canonical_upstream" type="TEXT" null="true"/>
+ <add-column name="build_toolchain_version_canonical_release" type="TEXT" null="true"/>
+ <add-column name="build_toolchain_version_revision" type="INTEGER" null="true"/>
+ <add-column name="build_toolchain_version_upstream" type="TEXT" null="true"/>
+ <add-column name="build_toolchain_version_release" type="TEXT" null="true"/>
+ </alter-table>
+ </changeset>
+
+ <changeset version="31">
+ <add-table name="package_build_auxiliaries" kind="container">
+ <column name="tenant" type="TEXT" null="false"/>
+ <column name="name" type="CITEXT" null="false"/>
+ <column name="version_epoch" type="INTEGER" null="false"/>
+ <column name="version_canonical_upstream" type="TEXT" null="false"/>
+ <column name="version_canonical_release" type="TEXT" null="false" options="COLLATE &quot;C&quot;"/>
+ <column name="version_revision" type="INTEGER" null="false"/>
+ <column name="index" type="BIGINT" null="false"/>
+ <column name="environment_name" type="TEXT" null="false"/>
+ <column name="config" type="TEXT" null="false"/>
+ <column name="comment" type="TEXT" null="false"/>
+ <foreign-key name="tenant_fk" deferrable="DEFERRED">
+ <column name="tenant"/>
+ <references table="tenant">
+ <column name="id"/>
+ </references>
+ </foreign-key>
+ <foreign-key name="object_id_fk" on-delete="CASCADE">
+ <column name="tenant"/>
+ <column name="name"/>
+ <column name="version_epoch"/>
+ <column name="version_canonical_upstream"/>
+ <column name="version_canonical_release"/>
+ <column name="version_revision"/>
+ <references table="package">
+ <column name="tenant"/>
+ <column name="name"/>
+ <column name="version_epoch"/>
+ <column name="version_canonical_upstream"/>
+ <column name="version_canonical_release"/>
+ <column name="version_revision"/>
+ </references>
+ </foreign-key>
+ <index name="package_build_auxiliaries_object_id_i">
+ <column name="tenant"/>
+ <column name="name"/>
+ <column name="version_epoch"/>
+ <column name="version_canonical_upstream"/>
+ <column name="version_canonical_release"/>
+ <column name="version_revision"/>
+ </index>
+ <index name="package_build_auxiliaries_index_i">
+ <column name="index"/>
+ </index>
+ </add-table>
+ <add-table name="package_build_config_auxiliaries" kind="container">
+ <column name="tenant" type="TEXT" null="false"/>
+ <column name="name" type="CITEXT" null="false"/>
+ <column name="version_epoch" type="INTEGER" null="false"/>
+ <column name="version_canonical_upstream" type="TEXT" null="false"/>
+ <column name="version_canonical_release" type="TEXT" null="false" options="COLLATE &quot;C&quot;"/>
+ <column name="version_revision" type="INTEGER" null="false"/>
+ <column name="config_index" type="BIGINT" null="false"/>
+ <column name="index" type="BIGINT" null="false"/>
+ <column name="environment_name" type="TEXT" null="false"/>
+ <column name="config" type="TEXT" null="false"/>
+ <column name="comment" type="TEXT" null="false"/>
+ <foreign-key name="tenant_fk" deferrable="DEFERRED">
+ <column name="tenant"/>
+ <references table="tenant">
+ <column name="id"/>
+ </references>
+ </foreign-key>
+ <foreign-key name="object_id_fk" on-delete="CASCADE">
+ <column name="tenant"/>
+ <column name="name"/>
+ <column name="version_epoch"/>
+ <column name="version_canonical_upstream"/>
+ <column name="version_canonical_release"/>
+ <column name="version_revision"/>
+ <references table="package">
+ <column name="tenant"/>
+ <column name="name"/>
+ <column name="version_epoch"/>
+ <column name="version_canonical_upstream"/>
+ <column name="version_canonical_release"/>
+ <column name="version_revision"/>
+ </references>
+ </foreign-key>
+ <index name="package_build_config_auxiliaries_object_id_i">
+ <column name="tenant"/>
+ <column name="name"/>
+ <column name="version_epoch"/>
+ <column name="version_canonical_upstream"/>
+ <column name="version_canonical_release"/>
+ <column name="version_revision"/>
+ </index>
+ </add-table>
+ </changeset>
+
+ <changeset version="30">
+ <alter-table name="tenant">
+ <add-column name="service_id" type="TEXT" null="true"/>
+ <add-column name="service_type" type="TEXT" null="true"/>
+ <add-column name="service_data" type="TEXT" null="true"/>
+ <add-column name="queued_timestamp" type="BIGINT" null="true"/>
+ <add-index name="tenant_service_i" type="UNIQUE">
+ <column name="service_id"/>
+ <column name="service_type"/>
+ </add-index>
+ <add-index name="tenant_service_id_i">
+ <column name="service_id"/>
+ </add-index>
+ </alter-table>
+ </changeset>
+
+ <changeset version="29">
+ <alter-table name="package_tests">
+ <add-column name="test_enable" type="TEXT" null="true"/>
+ </alter-table>
+ </changeset>
+
+ <changeset version="28">
+ <alter-table name="package_build_configs">
+ <add-column name="config_email" type="TEXT" null="true"/>
+ <add-column name="config_email_comment" type="TEXT" null="true"/>
+ <add-column name="config_warning_email" type="TEXT" null="true"/>
+ <add-column name="config_warning_email_comment" type="TEXT" null="true"/>
+ <add-column name="config_error_email" type="TEXT" null="true"/>
+ <add-column name="config_error_email_comment" type="TEXT" null="true"/>
+ </alter-table>
+ </changeset>
+
+ <model version="27">
<table name="tenant" kind="object">
<column name="id" type="TEXT" null="false"/>
+ <column name="private" type="BOOLEAN" null="false"/>
+ <column name="interactive" type="TEXT" null="true"/>
<column name="creation_timestamp" type="BIGINT" null="false"/>
<column name="archived" type="BOOLEAN" null="false"/>
<primary-key>
@@ -142,7 +415,10 @@
<column name="summary" type="TEXT" null="false"/>
<column name="description" type="TEXT" null="true"/>
<column name="description_type" type="TEXT" null="true"/>
- <column name="changes" type="TEXT" null="false"/>
+ <column name="package_description" type="TEXT" null="true"/>
+ <column name="package_description_type" type="TEXT" null="true"/>
+ <column name="changes" type="TEXT" null="true"/>
+ <column name="changes_type" type="TEXT" null="true"/>
<column name="url" type="TEXT" null="true"/>
<column name="url_comment" type="TEXT" null="true"/>
<column name="doc_url" type="TEXT" null="true"/>
@@ -167,6 +443,7 @@
<column name="fragment" type="TEXT" null="true"/>
<column name="sha256sum" type="TEXT" null="true"/>
<column name="buildable" type="BOOLEAN" null="false"/>
+ <column name="unbuildable_reason" type="TEXT" null="true"/>
<column name="search_index" type="tsvector" null="true"/>
<primary-key>
<column name="tenant"/>
@@ -378,7 +655,6 @@
<column name="version_canonical_release" type="TEXT" null="false" options="COLLATE &quot;C&quot;"/>
<column name="version_revision" type="INTEGER" null="false"/>
<column name="index" type="BIGINT" null="false"/>
- <column name="conditional" type="BOOLEAN" null="false"/>
<column name="buildtime" type="BOOLEAN" null="false"/>
<column name="comment" type="TEXT" null="false"/>
<foreign-key name="tenant_fk" deferrable="DEFERRED">
@@ -424,6 +700,52 @@
<column name="version_revision" type="INTEGER" null="false"/>
<column name="dependency_index" type="BIGINT" null="false"/>
<column name="index" type="BIGINT" null="false"/>
+ <column name="enable" type="TEXT" null="true"/>
+ <column name="reflect" type="TEXT" null="true"/>
+ <column name="prefer" type="TEXT" null="true"/>
+ <column name="accept" type="TEXT" null="true"/>
+ <column name="require" type="TEXT" null="true"/>
+ <foreign-key name="tenant_fk" deferrable="DEFERRED">
+ <column name="tenant"/>
+ <references table="tenant">
+ <column name="id"/>
+ </references>
+ </foreign-key>
+ <foreign-key name="object_id_fk" on-delete="CASCADE">
+ <column name="tenant"/>
+ <column name="name"/>
+ <column name="version_epoch"/>
+ <column name="version_canonical_upstream"/>
+ <column name="version_canonical_release"/>
+ <column name="version_revision"/>
+ <references table="package">
+ <column name="tenant"/>
+ <column name="name"/>
+ <column name="version_epoch"/>
+ <column name="version_canonical_upstream"/>
+ <column name="version_canonical_release"/>
+ <column name="version_revision"/>
+ </references>
+ </foreign-key>
+ <index name="package_dependency_alternatives_object_id_i">
+ <column name="tenant"/>
+ <column name="name"/>
+ <column name="version_epoch"/>
+ <column name="version_canonical_upstream"/>
+ <column name="version_canonical_release"/>
+ <column name="version_revision"/>
+ </index>
+ </table>
+ <table name="package_dependency_alternative_dependencies" kind="container">
+ <column name="tenant" type="TEXT" null="false"/>
+ <column name="name" type="CITEXT" null="false"/>
+ <column name="version_epoch" type="INTEGER" null="false"/>
+ <column name="version_canonical_upstream" type="TEXT" null="false"/>
+ <column name="version_canonical_release" type="TEXT" null="false" options="COLLATE &quot;C&quot;"/>
+ <column name="version_revision" type="INTEGER" null="false"/>
+ <column name="dependency_index" type="BIGINT" null="false"/>
+ <column name="alternative_index" type="BIGINT" null="false"/>
+ <column name="index" type="BIGINT" null="false"/>
<column name="dep_name" type="CITEXT" null="false"/>
<column name="dep_min_version_epoch" type="INTEGER" null="true"/>
<column name="dep_min_version_canonical_upstream" type="TEXT" null="true"/>
@@ -467,7 +789,7 @@
<column name="version_revision"/>
</references>
</foreign-key>
- <index name="package_dependency_alternatives_object_id_i">
+ <index name="package_dependency_alternative_dependencies_object_id_i">
<column name="tenant"/>
<column name="name"/>
<column name="version_epoch"/>
@@ -506,7 +828,6 @@
<column name="version_canonical_release" type="TEXT" null="false" options="COLLATE &quot;C&quot;"/>
<column name="version_revision" type="INTEGER" null="false"/>
<column name="index" type="BIGINT" null="false"/>
- <column name="conditional" type="BOOLEAN" null="false"/>
<column name="buildtime" type="BOOLEAN" null="false"/>
<column name="comment" type="TEXT" null="false"/>
<foreign-key name="tenant_fk" deferrable="DEFERRED">
@@ -552,7 +873,8 @@
<column name="version_revision" type="INTEGER" null="false"/>
<column name="requirement_index" type="BIGINT" null="false"/>
<column name="index" type="BIGINT" null="false"/>
- <column name="id" type="TEXT" null="false"/>
+ <column name="enable" type="TEXT" null="true"/>
+ <column name="reflect" type="TEXT" null="true"/>
<foreign-key name="tenant_fk" deferrable="DEFERRED">
<column name="tenant"/>
<references table="tenant">
@@ -584,6 +906,48 @@
<column name="version_revision"/>
</index>
</table>
+ <table name="package_requirement_alternative_requirements" kind="container">
+ <column name="tenant" type="TEXT" null="false"/>
+ <column name="name" type="CITEXT" null="false"/>
+ <column name="version_epoch" type="INTEGER" null="false"/>
+ <column name="version_canonical_upstream" type="TEXT" null="false"/>
+ <column name="version_canonical_release" type="TEXT" null="false" options="COLLATE &quot;C&quot;"/>
+ <column name="version_revision" type="INTEGER" null="false"/>
+ <column name="requirement_index" type="BIGINT" null="false"/>
+ <column name="alternative_index" type="BIGINT" null="false"/>
+ <column name="index" type="BIGINT" null="false"/>
+ <column name="id" type="TEXT" null="false"/>
+ <foreign-key name="tenant_fk" deferrable="DEFERRED">
+ <column name="tenant"/>
+ <references table="tenant">
+ <column name="id"/>
+ </references>
+ </foreign-key>
+ <foreign-key name="object_id_fk" on-delete="CASCADE">
+ <column name="tenant"/>
+ <column name="name"/>
+ <column name="version_epoch"/>
+ <column name="version_canonical_upstream"/>
+ <column name="version_canonical_release"/>
+ <column name="version_revision"/>
+ <references table="package">
+ <column name="tenant"/>
+ <column name="name"/>
+ <column name="version_epoch"/>
+ <column name="version_canonical_upstream"/>
+ <column name="version_canonical_release"/>
+ <column name="version_revision"/>
+ </references>
+ </foreign-key>
+ <index name="package_requirement_alternative_requirements_object_id_i">
+ <column name="tenant"/>
+ <column name="name"/>
+ <column name="version_epoch"/>
+ <column name="version_canonical_upstream"/>
+ <column name="version_canonical_release"/>
+ <column name="version_revision"/>
+ </index>
+ </table>
<table name="package_tests" kind="container">
<column name="tenant" type="TEXT" null="false"/>
<column name="name" type="CITEXT" null="false"/>
@@ -592,27 +956,30 @@
<column name="version_canonical_release" type="TEXT" null="false" options="COLLATE &quot;C&quot;"/>
<column name="version_revision" type="INTEGER" null="false"/>
<column name="index" type="BIGINT" null="false"/>
- <column name="dep_name" type="CITEXT" null="false"/>
- <column name="dep_min_version_epoch" type="INTEGER" null="true"/>
- <column name="dep_min_version_canonical_upstream" type="TEXT" null="true"/>
- <column name="dep_min_version_canonical_release" type="TEXT" null="true"/>
- <column name="dep_min_version_revision" type="INTEGER" null="true"/>
- <column name="dep_min_version_upstream" type="TEXT" null="true"/>
- <column name="dep_min_version_release" type="TEXT" null="true"/>
- <column name="dep_max_version_epoch" type="INTEGER" null="true"/>
- <column name="dep_max_version_canonical_upstream" type="TEXT" null="true"/>
- <column name="dep_max_version_canonical_release" type="TEXT" null="true"/>
- <column name="dep_max_version_revision" type="INTEGER" null="true"/>
- <column name="dep_max_version_upstream" type="TEXT" null="true"/>
- <column name="dep_max_version_release" type="TEXT" null="true"/>
- <column name="dep_min_open" type="BOOLEAN" null="true"/>
- <column name="dep_max_open" type="BOOLEAN" null="true"/>
- <column name="dep_package_tenant" type="TEXT" null="true"/>
- <column name="dep_package_name" type="CITEXT" null="true"/>
- <column name="dep_package_version_epoch" type="INTEGER" null="true"/>
- <column name="dep_package_version_canonical_upstream" type="TEXT" null="true"/>
- <column name="dep_package_version_canonical_release" type="TEXT" null="true" options="COLLATE &quot;C&quot;"/>
- <column name="dep_package_version_revision" type="INTEGER" null="true"/>
+ <column name="test_name" type="CITEXT" null="false"/>
+ <column name="test_min_version_epoch" type="INTEGER" null="true"/>
+ <column name="test_min_version_canonical_upstream" type="TEXT" null="true"/>
+ <column name="test_min_version_canonical_release" type="TEXT" null="true"/>
+ <column name="test_min_version_revision" type="INTEGER" null="true"/>
+ <column name="test_min_version_upstream" type="TEXT" null="true"/>
+ <column name="test_min_version_release" type="TEXT" null="true"/>
+ <column name="test_max_version_epoch" type="INTEGER" null="true"/>
+ <column name="test_max_version_canonical_upstream" type="TEXT" null="true"/>
+ <column name="test_max_version_canonical_release" type="TEXT" null="true"/>
+ <column name="test_max_version_revision" type="INTEGER" null="true"/>
+ <column name="test_max_version_upstream" type="TEXT" null="true"/>
+ <column name="test_max_version_release" type="TEXT" null="true"/>
+ <column name="test_min_open" type="BOOLEAN" null="true"/>
+ <column name="test_max_open" type="BOOLEAN" null="true"/>
+ <column name="test_package_tenant" type="TEXT" null="true"/>
+ <column name="test_package_name" type="CITEXT" null="true"/>
+ <column name="test_package_version_epoch" type="INTEGER" null="true"/>
+ <column name="test_package_version_canonical_upstream" type="TEXT" null="true"/>
+ <column name="test_package_version_canonical_release" type="TEXT" null="true" options="COLLATE &quot;C&quot;"/>
+ <column name="test_package_version_revision" type="INTEGER" null="true"/>
+ <column name="test_type" type="TEXT" null="false"/>
+ <column name="test_buildtime" type="BOOLEAN" null="false"/>
+ <column name="test_reflect" type="TEXT" null="true"/>
<foreign-key name="tenant_fk" deferrable="DEFERRED">
<column name="tenant"/>
<references table="tenant">
@@ -646,19 +1013,19 @@
<index name="package_tests_index_i">
<column name="index"/>
</index>
- <foreign-key name="dep_package_tenant_fk" deferrable="DEFERRED">
- <column name="dep_package_tenant"/>
+ <foreign-key name="test_package_tenant_fk" deferrable="DEFERRED">
+ <column name="test_package_tenant"/>
<references table="tenant">
<column name="id"/>
</references>
</foreign-key>
- <foreign-key name="dep_package_fk" deferrable="DEFERRED">
- <column name="dep_package_tenant"/>
- <column name="dep_package_name"/>
- <column name="dep_package_version_epoch"/>
- <column name="dep_package_version_canonical_upstream"/>
- <column name="dep_package_version_canonical_release"/>
- <column name="dep_package_version_revision"/>
+ <foreign-key name="test_package_fk" deferrable="DEFERRED">
+ <column name="test_package_tenant"/>
+ <column name="test_package_name"/>
+ <column name="test_package_version_epoch"/>
+ <column name="test_package_version_canonical_upstream"/>
+ <column name="test_package_version_canonical_release"/>
+ <column name="test_package_version_revision"/>
<references table="package">
<column name="tenant"/>
<column name="name"/>
@@ -669,7 +1036,7 @@
</references>
</foreign-key>
</table>
- <table name="package_examples" kind="container">
+ <table name="package_builds" kind="container">
<column name="tenant" type="TEXT" null="false"/>
<column name="name" type="CITEXT" null="false"/>
<column name="version_epoch" type="INTEGER" null="false"/>
@@ -677,27 +1044,8 @@
<column name="version_canonical_release" type="TEXT" null="false" options="COLLATE &quot;C&quot;"/>
<column name="version_revision" type="INTEGER" null="false"/>
<column name="index" type="BIGINT" null="false"/>
- <column name="dep_name" type="CITEXT" null="false"/>
- <column name="dep_min_version_epoch" type="INTEGER" null="true"/>
- <column name="dep_min_version_canonical_upstream" type="TEXT" null="true"/>
- <column name="dep_min_version_canonical_release" type="TEXT" null="true"/>
- <column name="dep_min_version_revision" type="INTEGER" null="true"/>
- <column name="dep_min_version_upstream" type="TEXT" null="true"/>
- <column name="dep_min_version_release" type="TEXT" null="true"/>
- <column name="dep_max_version_epoch" type="INTEGER" null="true"/>
- <column name="dep_max_version_canonical_upstream" type="TEXT" null="true"/>
- <column name="dep_max_version_canonical_release" type="TEXT" null="true"/>
- <column name="dep_max_version_revision" type="INTEGER" null="true"/>
- <column name="dep_max_version_upstream" type="TEXT" null="true"/>
- <column name="dep_max_version_release" type="TEXT" null="true"/>
- <column name="dep_min_open" type="BOOLEAN" null="true"/>
- <column name="dep_max_open" type="BOOLEAN" null="true"/>
- <column name="dep_package_tenant" type="TEXT" null="true"/>
- <column name="dep_package_name" type="CITEXT" null="true"/>
- <column name="dep_package_version_epoch" type="INTEGER" null="true"/>
- <column name="dep_package_version_canonical_upstream" type="TEXT" null="true"/>
- <column name="dep_package_version_canonical_release" type="TEXT" null="true" options="COLLATE &quot;C&quot;"/>
- <column name="dep_package_version_revision" type="INTEGER" null="true"/>
+ <column name="expression" type="TEXT" null="false"/>
+ <column name="comment" type="TEXT" null="false"/>
<foreign-key name="tenant_fk" deferrable="DEFERRED">
<column name="tenant"/>
<references table="tenant">
@@ -720,7 +1068,7 @@
<column name="version_revision"/>
</references>
</foreign-key>
- <index name="package_examples_object_id_i">
+ <index name="package_builds_object_id_i">
<column name="tenant"/>
<column name="name"/>
<column name="version_epoch"/>
@@ -728,33 +1076,11 @@
<column name="version_canonical_release"/>
<column name="version_revision"/>
</index>
- <index name="package_examples_index_i">
+ <index name="package_builds_index_i">
<column name="index"/>
</index>
- <foreign-key name="dep_package_tenant_fk" deferrable="DEFERRED">
- <column name="dep_package_tenant"/>
- <references table="tenant">
- <column name="id"/>
- </references>
- </foreign-key>
- <foreign-key name="dep_package_fk" deferrable="DEFERRED">
- <column name="dep_package_tenant"/>
- <column name="dep_package_name"/>
- <column name="dep_package_version_epoch"/>
- <column name="dep_package_version_canonical_upstream"/>
- <column name="dep_package_version_canonical_release"/>
- <column name="dep_package_version_revision"/>
- <references table="package">
- <column name="tenant"/>
- <column name="name"/>
- <column name="version_epoch"/>
- <column name="version_canonical_upstream"/>
- <column name="version_canonical_release"/>
- <column name="version_revision"/>
- </references>
- </foreign-key>
</table>
- <table name="package_benchmarks" kind="container">
+ <table name="package_build_constraints" kind="container">
<column name="tenant" type="TEXT" null="false"/>
<column name="name" type="CITEXT" null="false"/>
<column name="version_epoch" type="INTEGER" null="false"/>
@@ -762,27 +1088,10 @@
<column name="version_canonical_release" type="TEXT" null="false" options="COLLATE &quot;C&quot;"/>
<column name="version_revision" type="INTEGER" null="false"/>
<column name="index" type="BIGINT" null="false"/>
- <column name="dep_name" type="CITEXT" null="false"/>
- <column name="dep_min_version_epoch" type="INTEGER" null="true"/>
- <column name="dep_min_version_canonical_upstream" type="TEXT" null="true"/>
- <column name="dep_min_version_canonical_release" type="TEXT" null="true"/>
- <column name="dep_min_version_revision" type="INTEGER" null="true"/>
- <column name="dep_min_version_upstream" type="TEXT" null="true"/>
- <column name="dep_min_version_release" type="TEXT" null="true"/>
- <column name="dep_max_version_epoch" type="INTEGER" null="true"/>
- <column name="dep_max_version_canonical_upstream" type="TEXT" null="true"/>
- <column name="dep_max_version_canonical_release" type="TEXT" null="true"/>
- <column name="dep_max_version_revision" type="INTEGER" null="true"/>
- <column name="dep_max_version_upstream" type="TEXT" null="true"/>
- <column name="dep_max_version_release" type="TEXT" null="true"/>
- <column name="dep_min_open" type="BOOLEAN" null="true"/>
- <column name="dep_max_open" type="BOOLEAN" null="true"/>
- <column name="dep_package_tenant" type="TEXT" null="true"/>
- <column name="dep_package_name" type="CITEXT" null="true"/>
- <column name="dep_package_version_epoch" type="INTEGER" null="true"/>
- <column name="dep_package_version_canonical_upstream" type="TEXT" null="true"/>
- <column name="dep_package_version_canonical_release" type="TEXT" null="true" options="COLLATE &quot;C&quot;"/>
- <column name="dep_package_version_revision" type="INTEGER" null="true"/>
+ <column name="exclusion" type="BOOLEAN" null="false"/>
+ <column name="config" type="TEXT" null="false"/>
+ <column name="target" type="TEXT" null="true"/>
+ <column name="comment" type="TEXT" null="false"/>
<foreign-key name="tenant_fk" deferrable="DEFERRED">
<column name="tenant"/>
<references table="tenant">
@@ -805,7 +1114,7 @@
<column name="version_revision"/>
</references>
</foreign-key>
- <index name="package_benchmarks_object_id_i">
+ <index name="package_build_constraints_object_id_i">
<column name="tenant"/>
<column name="name"/>
<column name="version_epoch"/>
@@ -813,22 +1122,34 @@
<column name="version_canonical_release"/>
<column name="version_revision"/>
</index>
- <index name="package_benchmarks_index_i">
+ <index name="package_build_constraints_index_i">
<column name="index"/>
</index>
- <foreign-key name="dep_package_tenant_fk" deferrable="DEFERRED">
- <column name="dep_package_tenant"/>
+ </table>
+ <table name="package_build_configs" kind="container">
+ <column name="tenant" type="TEXT" null="false"/>
+ <column name="name" type="CITEXT" null="false"/>
+ <column name="version_epoch" type="INTEGER" null="false"/>
+ <column name="version_canonical_upstream" type="TEXT" null="false"/>
+ <column name="version_canonical_release" type="TEXT" null="false" options="COLLATE &quot;C&quot;"/>
+ <column name="version_revision" type="INTEGER" null="false"/>
+ <column name="index" type="BIGINT" null="false"/>
+ <column name="config_name" type="TEXT" null="false"/>
+ <column name="config_arguments" type="TEXT" null="false"/>
+ <column name="config_comment" type="TEXT" null="false"/>
+ <foreign-key name="tenant_fk" deferrable="DEFERRED">
+ <column name="tenant"/>
<references table="tenant">
<column name="id"/>
</references>
</foreign-key>
- <foreign-key name="dep_package_fk" deferrable="DEFERRED">
- <column name="dep_package_tenant"/>
- <column name="dep_package_name"/>
- <column name="dep_package_version_epoch"/>
- <column name="dep_package_version_canonical_upstream"/>
- <column name="dep_package_version_canonical_release"/>
- <column name="dep_package_version_revision"/>
+ <foreign-key name="object_id_fk" on-delete="CASCADE">
+ <column name="tenant"/>
+ <column name="name"/>
+ <column name="version_epoch"/>
+ <column name="version_canonical_upstream"/>
+ <column name="version_canonical_release"/>
+ <column name="version_revision"/>
<references table="package">
<column name="tenant"/>
<column name="name"/>
@@ -838,14 +1159,26 @@
<column name="version_revision"/>
</references>
</foreign-key>
+ <index name="package_build_configs_object_id_i">
+ <column name="tenant"/>
+ <column name="name"/>
+ <column name="version_epoch"/>
+ <column name="version_canonical_upstream"/>
+ <column name="version_canonical_release"/>
+ <column name="version_revision"/>
+ </index>
+ <index name="package_build_configs_index_i">
+ <column name="index"/>
+ </index>
</table>
- <table name="package_builds" kind="container">
+ <table name="package_build_config_builds" kind="container">
<column name="tenant" type="TEXT" null="false"/>
<column name="name" type="CITEXT" null="false"/>
<column name="version_epoch" type="INTEGER" null="false"/>
<column name="version_canonical_upstream" type="TEXT" null="false"/>
<column name="version_canonical_release" type="TEXT" null="false" options="COLLATE &quot;C&quot;"/>
<column name="version_revision" type="INTEGER" null="false"/>
+ <column name="config_index" type="BIGINT" null="false"/>
<column name="index" type="BIGINT" null="false"/>
<column name="expression" type="TEXT" null="false"/>
<column name="comment" type="TEXT" null="false"/>
@@ -871,7 +1204,7 @@
<column name="version_revision"/>
</references>
</foreign-key>
- <index name="package_builds_object_id_i">
+ <index name="package_build_config_builds_object_id_i">
<column name="tenant"/>
<column name="name"/>
<column name="version_epoch"/>
@@ -879,17 +1212,15 @@
<column name="version_canonical_release"/>
<column name="version_revision"/>
</index>
- <index name="package_builds_index_i">
- <column name="index"/>
- </index>
</table>
- <table name="package_build_constraints" kind="container">
+ <table name="package_build_config_constraints" kind="container">
<column name="tenant" type="TEXT" null="false"/>
<column name="name" type="CITEXT" null="false"/>
<column name="version_epoch" type="INTEGER" null="false"/>
<column name="version_canonical_upstream" type="TEXT" null="false"/>
<column name="version_canonical_release" type="TEXT" null="false" options="COLLATE &quot;C&quot;"/>
<column name="version_revision" type="INTEGER" null="false"/>
+ <column name="config_index" type="BIGINT" null="false"/>
<column name="index" type="BIGINT" null="false"/>
<column name="exclusion" type="BOOLEAN" null="false"/>
<column name="config" type="TEXT" null="false"/>
@@ -917,7 +1248,7 @@
<column name="version_revision"/>
</references>
</foreign-key>
- <index name="package_build_constraints_object_id_i">
+ <index name="package_build_config_constraints_object_id_i">
<column name="tenant"/>
<column name="name"/>
<column name="version_epoch"/>
@@ -925,9 +1256,6 @@
<column name="version_canonical_release"/>
<column name="version_revision"/>
</index>
- <index name="package_build_constraints_index_i">
- <column name="index"/>
- </index>
</table>
<table name="package_other_repositories" kind="container">
<column name="tenant" type="TEXT" null="false"/>
diff --git a/libbrep/types.hxx b/libbrep/types.hxx
index 65c9791..3b5777d 100644
--- a/libbrep/types.hxx
+++ b/libbrep/types.hxx
@@ -1,5 +1,4 @@
// file : libbrep/types.hxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
#ifndef LIBBREP_TYPES_HXX
@@ -22,11 +21,12 @@
#include <odb/lazy-ptr.hxx>
-#include <libbutl/path.mxx>
-#include <libbutl/path-io.mxx>
-#include <libbutl/optional.mxx>
-#include <libbutl/timestamp.mxx>
-#include <libbutl/small-vector.mxx>
+#include <libbutl/url.hxx>
+#include <libbutl/path.hxx>
+#include <libbutl/path-io.hxx>
+#include <libbutl/optional.hxx>
+#include <libbutl/timestamp.hxx>
+#include <libbutl/small-vector.hxx>
namespace brep
{
@@ -50,7 +50,7 @@ namespace brep
using std::weak_ptr;
using std::vector;
- using butl::small_vector; // <libbutl/small-vector.mxx>
+ using butl::small_vector; // <libbutl/small-vector.hxx>
using strings = vector<string>;
using cstrings = vector<const char*>;
@@ -69,7 +69,7 @@ namespace brep
using std::generic_category;
- // <libbutl/optional.mxx>
+ // <libbutl/optional.hxx>
//
using butl::optional;
using butl::nullopt;
@@ -79,7 +79,7 @@ namespace brep
using odb::lazy_shared_ptr;
using odb::lazy_weak_ptr;
- // <libbutl/path.mxx>
+ // <libbutl/path.hxx>
//
using butl::path;
using butl::dir_path;
@@ -91,10 +91,15 @@ namespace brep
using butl::path_cast;
- // <libbutl/timestamp.mxx>
+ // <libbutl/url.hxx>
+ //
+ using butl::url;
+
+ // <libbutl/timestamp.hxx>
//
using butl::system_clock;
using butl::timestamp;
+ using butl::duration;
using butl::timestamp_nonexistent;
}
diff --git a/libbrep/utility.hxx b/libbrep/utility.hxx
index 96cf1d4..fce8fb5 100644
--- a/libbrep/utility.hxx
+++ b/libbrep/utility.hxx
@@ -1,17 +1,17 @@
// file : libbrep/utility.hxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
#ifndef LIBBREP_UTILITY_HXX
#define LIBBREP_UTILITY_HXX
-#include <memory> // make_shared()
-#include <string> // to_string()
-#include <utility> // move(), forward(), declval(), make_pair()
-#include <cassert> // assert()
-#include <iterator> // make_move_iterator()
+#include <memory> // make_shared()
+#include <string> // to_string()
+#include <utility> // move(), forward(), declval(), make_pair()
+#include <cassert> // assert()
+#include <iterator> // make_move_iterator()
+#include <algorithm> // *
-#include <libbutl/utility.mxx> // icasecmp(), reverse_iterate(),
+#include <libbutl/utility.hxx> // icasecmp(), reverse_iterate(),
// operator<<(ostream, exception)
namespace brep
@@ -25,8 +25,9 @@ namespace brep
using std::make_move_iterator;
using std::to_string;
- // <libbutl/utility.mxx>
+ // <libbutl/utility.hxx>
//
+ using butl::utf8;
using butl::icasecmp;
using butl::reverse_iterate;
}
diff --git a/libbrep/version.hxx.in b/libbrep/version.hxx.in
index 00fb4ff..3ac3752 100644
--- a/libbrep/version.hxx.in
+++ b/libbrep/version.hxx.in
@@ -1,5 +1,4 @@
// file : libbrep/version.hxx.in -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
#ifndef BREP_VERSION // Note: using the version macro itself.
diff --git a/libbrep/wrapper-traits.hxx b/libbrep/wrapper-traits.hxx
index 54c5ef1..8c9d830 100644
--- a/libbrep/wrapper-traits.hxx
+++ b/libbrep/wrapper-traits.hxx
@@ -1,5 +1,4 @@
// file : libbrep/wrapper-traits.hxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
#ifndef LIBBREP_WRAPPER_TRAITS_HXX
@@ -7,7 +6,7 @@
#include <odb/pre.hxx>
-#include <libbutl/optional.mxx>
+#include <libbutl/optional.hxx>
#include <odb/wrapper-traits.hxx>
diff --git a/load/buildfile b/load/buildfile
index 493d067..4278f20 100644
--- a/load/buildfile
+++ b/load/buildfile
@@ -1,5 +1,4 @@
# file : load/buildfile
-# copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
# license : MIT; see accompanying LICENSE file
import libs = libodb%lib{odb}
@@ -12,6 +11,10 @@ include ../libbrep/
exe{brep-load}: {hxx ixx cxx}{* -load-options} {hxx ixx cxx}{load-options} \
../libbrep/lib{brep} $libs
+# Build options.
+#
+obj{load}: cxx.poptions += -DBREP_COPYRIGHT=\"$copyright\"
+
# Generated options parser.
#
if $cli.configured
@@ -20,8 +23,8 @@ if $cli.configured
cli.options += --std c++11 -I $src_root --include-with-brackets \
--include-prefix load --guard-prefix LOAD --generate-specifier \
---cxx-prologue "#include <load/types-parsers.hxx>" --page-usage print_ \
---ansi-color --long-usage
+--generate-modifier --cxx-prologue "#include <load/types-parsers.hxx>" \
+--page-usage print_ --ansi-color --long-usage
# Include the generated cli files into the distribution and don't remove
# them when cleaning in src (so that clean results in a state identical to
diff --git a/load/load.cli b/load/load.cli
index 74c91f2..99d76f6 100644
--- a/load/load.cli
+++ b/load/load.cli
@@ -1,5 +1,4 @@
// file : load/load.cli
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
include <vector>;
@@ -54,9 +53,18 @@ class options
bool --shallow
{
"Don't load package information from prerequisite or complement
- repositories."
+ repositories, don't fail if unable to resolve a package dependency, and
+ don't detect package dependency cycles."
};
+ bool --ignore-unresolved-tests
+ {
+ "Ignore tests, examples, and benchmarks package manifest entries which
+ cannot be resolved from the main package's complement repositories,
+ recursively. Note that in contrast to --shallow option, such entries will
+ be removed from the main package manifests outright."
+ }
+
std::string --tenant
{
"<id>",
@@ -64,6 +72,41 @@ class options
specified, then the single-tenant mode is assumed."
};
+ bool --private
+ {
+ "Display the tenant packages in the web interface only in the tenant view
+ mode."
+ };
+
+ std::string --interactive
+ {
+ "<bkp>",
+ "Build the tenant packages interactively, stopping builds at the specified
+ breakpoint. Implies \cb{--private}."
+ };
+
+ std::string --service-id
+ {
+ "<id>",
+ "Third party service information to associate with the being created
+ tenant. Requires the \cb{--tenant} and \cb{--service-type} options to be
+ specified."
+ };
+
+ std::string --service-type
+ {
+ "<type>",
+ "Type of the service to associate with the being created tenant. Requires
+ the \cb{--service-id} option to be specified."
+ };
+
+ std::string --service-data
+ {
+ "<data>",
+ "Service data to associate with the being created tenant. Requires the
+ \cb{--service-id} option to be specified."
+ };
+
brep::path --overrides-file
{
"<file>",
@@ -124,6 +167,22 @@ class options
this option to specify multiple package manager options."
}
+ brep::path openssl = "openssl"
+ {
+ "<path>",
+ "The openssl program to be used for crypto operations. You can also
+ specify additional options that should be passed to the openssl program
+ with \cb{openssl-option}. If the openssl program is not explicitly
+ specified, then \cb{brep-load} will use \cb{openssl} by default."
+ }
+
+ brep::strings openssl-option
+ {
+ "<opt>",
+ "Additional option to be passed to the openssl program (see \cb{openssl}
+ for details). Repeat this option to specify multiple openssl options."
+ }
+
std::string --pager // String to allow empty value.
{
"<path>",
@@ -162,8 +221,8 @@ Fatal error.|
\li|\cb{2}
-An instance of \cb{brep-load} or \l{brep-migrate(1)} is already running. Try
-again.|
+An instance of \cb{brep-load} or some other \cb{brep} utility is already
+running. Try again.|
\li|\cb{3}
diff --git a/load/load.cxx b/load/load.cxx
index 83cc9e6..474b443 100644
--- a/load/load.cxx
+++ b/load/load.cxx
@@ -1,13 +1,13 @@
// file : load/load.cxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
#include <signal.h> // signal()
#include <cerrno>
-#include <cstring> // strncmp()
+#include <chrono>
+#include <thread> // this_thread::sleep_for()
+#include <cstring> // strncmp()
#include <iostream>
-#include <algorithm> // find(), find_if()
#include <odb/session.hxx>
#include <odb/database.hxx>
@@ -17,13 +17,14 @@
#include <odb/pgsql/database.hxx>
-#include <libbutl/pager.mxx>
-#include <libbutl/sha256.mxx>
-#include <libbutl/process.mxx>
-#include <libbutl/fdstream.mxx>
-#include <libbutl/filesystem.mxx>
-#include <libbutl/tab-parser.mxx>
-#include <libbutl/manifest-parser.mxx>
+#include <libbutl/pager.hxx>
+#include <libbutl/sha256.hxx>
+#include <libbutl/process.hxx>
+#include <libbutl/openssl.hxx>
+#include <libbutl/fdstream.hxx>
+#include <libbutl/filesystem.hxx>
+#include <libbutl/tab-parser.hxx>
+#include <libbutl/manifest-parser.hxx>
#include <libbpkg/manifest.hxx>
@@ -37,6 +38,7 @@ using std::cout;
using std::cerr;
using std::endl;
+using namespace std::this_thread;
using namespace odb::core;
using namespace butl;
using namespace bpkg;
@@ -54,6 +56,17 @@ static const char* help_info (
static const path packages ("packages.manifest");
static const path repositories ("repositories.manifest");
+// Retry executing bpkg on recoverable errors for about 10 seconds.
+//
+// Should we just exit with some "bpkg recoverable" code instead and leave it
+// to the caller to perform retries? Feels like it's better to handle such
+// errors ourselves rather than to complicate every caller. Note that having
+// some frequently updated prerequisite repository can make these errors quite
+// probable, even if the internal repositories are rarely updated.
+//
+static const size_t bpkg_retries (10);
+static const std::chrono::seconds bpkg_retry_timeout (1);
+
struct internal_repository
{
repository_location location;
@@ -249,7 +262,7 @@ load_repositories (path p)
bad_line ("invalid buildable option value");
}
else
- bad_line ("invalid option '" + nv + "'");
+ bad_line ("invalid option '" + nv + '\'');
}
// For now cache option is mandatory.
@@ -352,23 +365,22 @@ repository_info (const options& lo, const string& rl, const cstrings& options)
// the repository. Should be called once per repository.
//
static void
-load_packages (const shared_ptr<repository>& rp,
+load_packages (const options& lo,
+ const shared_ptr<repository>& rp,
+ const repository_location& cl,
database& db,
bool ignore_unknown,
- const manifest_name_values& overrides)
+ const manifest_name_values& overrides,
+ const string& overrides_name)
{
// packages_timestamp other than timestamp_nonexistent signals the
// repository packages are already loaded.
//
assert (rp->packages_timestamp == timestamp_nonexistent);
- // Only locally accessible repositories allowed until package manager API is
- // ready.
- //
- assert (!rp->cache_location.empty ());
-
vector<package_manifest> pms;
- const repository_location& cl (rp->cache_location);
+
+ assert (!cl.empty ());
path p (cl.path () / packages);
@@ -395,8 +407,8 @@ load_packages (const shared_ptr<repository>& rp,
mp,
move (nv),
ignore_unknown,
- false /* complete_depends */,
- package_manifest_flags::forbid_incomplete_dependencies);
+ false /* complete_values */,
+ package_manifest_flags::forbid_incomplete_values);
}
else
pms = pkg_package_manifests (mp, ignore_unknown);
@@ -408,11 +420,15 @@ load_packages (const shared_ptr<repository>& rp,
}
using brep::dependency;
+ using brep::dependency_alternative;
+ using brep::dependency_alternatives;
+
+ const string& tenant (rp->tenant);
for (package_manifest& pm: pms)
{
shared_ptr<package> p (
- db.find<package> (package_id (rp->tenant, pm.name, pm.version)));
+ db.find<package> (package_id (tenant, pm.name, pm.version)));
// sha256sum should always be present if the package manifest comes from
// the packages.manifest file belonging to the pkg repository.
@@ -421,115 +437,296 @@ load_packages (const shared_ptr<repository>& rp,
if (p == nullptr)
{
- if (rp->internal)
+ // Apply the package manifest overrides.
+ //
+ if (!overrides.empty ())
+ try
{
- try
- {
- pm.override (overrides, "" /* name */);
- }
- catch (const manifest_parsing&)
+ pm.override (overrides, overrides_name);
+ }
+ catch (const manifest_parsing& e)
+ {
+ cerr << "error: unable to override " << pm.name << ' ' << pm.version
+ << " manifest: " << e << endl;
+
+ throw failed ();
+ }
+
+ // Convert the package manifest build configurations (contain public
+ // keys data) into the brep's build package configurations (contain
+ // public key object lazy pointers). Keep the bot key lists empty if
+ // the package is not buildable.
+ //
+ package_build_configs build_configs;
+
+ if (!pm.build_configs.empty ())
+ {
+ build_configs.reserve (pm.build_configs.size ());
+
+ for (bpkg::build_package_config& c: pm.build_configs)
{
- // Overrides are already validated (see below).
- //
- assert (false);
+ build_configs.emplace_back (move (c.name),
+ move (c.arguments),
+ move (c.comment),
+ move (c.builds),
+ move (c.constraints),
+ move (c.auxiliaries),
+ package_build_bot_keys (),
+ move (c.email),
+ move (c.warning_email),
+ move (c.error_email));
}
+ }
+ if (rp->internal)
+ {
// Create internal package object.
//
- optional<string> dsc;
- optional<text_type> dst;
-
- if (pm.description)
+ // Return nullopt if the text is in a file (can happen if the
+ // repository is of a type other than pkg) or if the type is not
+ // recognized (can only happen in the "ignore unknown" mode).
+ //
+ auto to_typed_text = [&cl, ignore_unknown] (typed_text_file&& v)
{
+ optional<typed_text> r;
+
// The description value should not be of the file type if the
// package manifest comes from the pkg repository.
//
- assert (!pm.description->file || cl.type () != repository_type::pkg);
+ assert (!v.file || cl.type () != repository_type::pkg);
- if (!pm.description->file)
+ if (!v.file)
{
- dst = pm.effective_description_type (ignore_unknown);
+ // Cannot throw since the manifest parser has already verified the
+ // effective type in the same "ignore unknown" mode.
+ //
+ optional<text_type> t (v.effective_type (ignore_unknown));
// If the description type is unknown (which may be the case for
// some "transitional" period and only if --ignore-unknown is
// specified) we just silently drop the description.
//
- assert (dst || ignore_unknown);
+ assert (t || ignore_unknown);
- if (dst)
- dsc = move (pm.description->text);
+ if (t)
+ r = typed_text {move (v.text), *t};
}
- }
- string chn;
+ return r;
+ };
+
+ // Convert descriptions.
+ //
+ optional<typed_text> ds (
+ pm.description
+ ? to_typed_text (move (*pm.description))
+ : optional<typed_text> ());
+
+ optional<typed_text> pds (
+ pm.package_description
+ ? to_typed_text (move (*pm.package_description))
+ : optional<typed_text> ());
+
+ // Merge changes into a single typed text object.
+ //
+ // If the text type is not recognized for any changes entry or some
+ // entry refers to a file, then assume that no changes are specified.
+ //
+ optional<typed_text> chn;
+
for (auto& c: pm.changes)
{
- // The changes value should not be of the file type if the package
- // manifest comes from the pkg repository.
- //
- assert (!c.file || cl.type () != repository_type::pkg);
+ optional<typed_text> tc (to_typed_text (move (c)));
- if (!c.file)
+ if (!tc)
{
- if (chn.empty ())
- chn = move (c.text);
- else
- {
- if (chn.back () != '\n')
- chn += '\n'; // Always have a blank line as a separator.
-
- chn += "\n" + c.text;
- }
+ chn = nullopt;
+ break;
}
- }
- dependencies ds;
-
- for (auto& pda: pm.dependencies)
- {
- // Ignore special build2 and bpkg dependencies. We may not have
- // packages for them and also showing them for every package is
- // probably not very helpful.
- //
- if (pda.buildtime && !pda.empty ())
+ if (!chn)
{
- const package_name& n (pda.front ().name);
- if (n == "build2" || n == "bpkg")
- continue;
+ chn = move (*tc);
}
+ else
+ {
+ // Should have failed while parsing the manifest otherwise.
+ //
+ assert (tc->type == chn->type);
- ds.emplace_back (pda.conditional, pda.buildtime, move (pda.comment));
+ string& v (chn->text);
- for (auto& pd: pda)
- // The package member will be assigned during dependency
- // resolution procedure.
- //
- ds.back ().push_back (dependency {move (pd.name),
- move (pd.constraint),
- nullptr /* package */});
+ assert (!v.empty ()); // Changes manifest value cannot be empty.
+
+ if (v.back () != '\n')
+ v += '\n'; // Always have a blank line as a separator.
+
+ v += '\n';
+ v += tc->text;
+ }
}
- auto deps = [] (small_vector<bpkg::dependency, 1>&& ds)
+ dependencies tds;
+
+ for (auto& das: pm.dependencies)
{
- small_vector<dependency, 1> r;
+ dependency_alternatives tdas (das.buildtime, move (das.comment));
- if (!ds.empty ())
+ for (auto& da: das)
{
- r.reserve (ds.size ());
+ dependency_alternative tda (move (da.enable),
+ move (da.reflect),
+ move (da.prefer),
+ move (da.accept),
+ move (da.require));
+
+ for (auto& d: da)
+ {
+ package_name& n (d.name);
+
+ // Ignore special build2 and bpkg dependencies. We may not have
+ // packages for them and also showing them for every package is
+ // probably not very helpful.
+ //
+ if (das.buildtime && (n == "build2" || n == "bpkg"))
+ continue;
+
+ // The package member will be assigned during dependency
+ // resolution procedure.
+ //
+ tda.push_back (dependency {move (n),
+ move (d.constraint),
+ nullptr /* package */});
+ }
- for (bpkg::dependency& d: ds)
- r.push_back (dependency {move (d.name),
- move (d.constraint),
- nullptr /* package */});
+ if (!tda.empty ())
+ tdas.push_back (move (tda));
}
- return r;
- };
+ if (!tdas.empty ())
+ tds.push_back (move (tdas));
+ }
+
+ small_vector<brep::test_dependency, 1> ts;
+
+ if (!pm.tests.empty ())
+ {
+ ts.reserve (pm.tests.size ());
+
+ for (bpkg::test_dependency& td: pm.tests)
+ ts.emplace_back (move (td.name),
+ td.type,
+ td.buildtime,
+ move (td.constraint),
+ move (td.enable),
+ move (td.reflect));
+ }
// Cache before the package name is moved.
//
package_name project (pm.effective_project ());
+ // If the package is buildable, then save the package manifest's
+ // common and build configuration-specific bot keys into the database
+ // and translate the key data lists into the lists of the public key
+ // object lazy pointers.
+ //
+ package_build_bot_keys bot_keys;
+
+ if (rp->buildable)
+ {
+ // Save the specified bot keys into the database as public key
+ // objects, unless they are already persisted. Translate these keys
+ // into the public key object lazy pointers.
+ //
+ auto keys_to_objects = [&lo,
+ &pm,
+ &tenant,
+ &db] (strings&& keys)
+ {
+ package_build_bot_keys r;
+
+ if (keys.empty ())
+ return r;
+
+ r.reserve (keys.size ());
+
+ for (string& key: keys)
+ {
+ // Calculate the key fingerprint.
+ //
+ string fp;
+
+ try
+ {
+ openssl os (path ("-"), path ("-"), 2,
+ lo.openssl (),
+ "pkey",
+ lo.openssl_option (), "-pubin", "-outform", "DER");
+
+ os.out << key;
+ os.out.close ();
+
+ fp = sha256 (os.in).string ();
+ os.in.close ();
+
+ if (!os.wait ())
+ {
+ cerr << "process " << lo.openssl () << ' ' << *os.exit
+ << endl;
+
+ throw io_error ("");
+ }
+ }
+ catch (const io_error&)
+ {
+ cerr << "error: unable to convert custom build bot public key "
+ << "for package " << pm.name << ' ' << pm.version << endl
+ << " info: key:" << endl
+ << key << endl;
+
+ throw failed ();
+ }
+ catch (const process_error& e)
+ {
+ cerr << "error: unable to convert custom build bot public key "
+ << "for package " << pm.name << ' ' << pm.version << ": "
+ << e << endl;
+
+ throw failed ();
+ }
+
+ // Try to find the public_key object for the calculated
+ // fingerprint. If it doesn't exist, then create and persist the
+ // new object.
+ //
+ public_key_id id (tenant, move (fp));
+ shared_ptr<public_key> k (db.find<public_key> (id));
+
+ if (k == nullptr)
+ {
+ k = make_shared<public_key> (move (id.tenant),
+ move (id.fingerprint),
+ move (key));
+
+ db.persist (k);
+ }
+
+ r.push_back (move (k));
+ }
+
+ return r;
+ };
+
+ bot_keys = keys_to_objects (move (pm.build_bot_keys));
+
+ assert (build_configs.size () == pm.build_configs.size ());
+
+ for (size_t i (0); i != build_configs.size (); ++i)
+ build_configs[i].bot_keys =
+ keys_to_objects (move (pm.build_configs[i].bot_keys));
+ }
+
p = make_shared<package> (
move (pm.name),
move (pm.version),
@@ -540,8 +737,8 @@ load_packages (const shared_ptr<repository>& rp,
move (pm.license_alternatives),
move (pm.topics),
move (pm.keywords),
- move (dsc),
- move (dst),
+ move (ds),
+ move (pds),
move (chn),
move (pm.url),
move (pm.doc_url),
@@ -552,13 +749,14 @@ load_packages (const shared_ptr<repository>& rp,
move (pm.build_email),
move (pm.build_warning_email),
move (pm.build_error_email),
- move (ds),
+ move (tds),
move (pm.requirements),
- deps (move (pm.tests)),
- deps (move (pm.examples)),
- deps (move (pm.benchmarks)),
+ move (ts),
move (pm.builds),
move (pm.build_constraints),
+ move (pm.build_auxiliaries),
+ move (bot_keys),
+ move (build_configs),
move (pm.location),
move (pm.fragment),
move (pm.sha256sum),
@@ -567,7 +765,13 @@ load_packages (const shared_ptr<repository>& rp,
else
// Create external package object.
//
- p = make_shared<package> (move (pm.name), move (pm.version), rp);
+ p = make_shared<package> (move (pm.name),
+ move (pm.version),
+ move (pm.builds),
+ move (pm.build_constraints),
+ move (pm.build_auxiliaries),
+ move (build_configs),
+ rp);
db.persist (p);
}
@@ -601,9 +805,14 @@ load_packages (const shared_ptr<repository>& rp,
// A non-stub package is buildable if belongs to at least one
// buildable repository (see libbrep/package.hxx for details).
+ // Note that if this is an external test package it will be marked as
+ // unbuildable later (see resolve_dependencies() for details).
//
- if (!p->stub () && !p->buildable)
- p->buildable = rp->buildable;
+ if (rp->buildable && !p->buildable && !p->stub ())
+ {
+ p->buildable = true;
+ p->unbuildable_reason = nullopt;
+ }
}
p->other_repositories.push_back (rp);
@@ -620,7 +829,9 @@ load_packages (const shared_ptr<repository>& rp,
// changed members. Should be called once per persisted internal repository.
//
static void
-load_repositories (const shared_ptr<repository>& rp,
+load_repositories (const options& lo,
+ const shared_ptr<repository>& rp,
+ const repository_location& cl,
database& db,
bool ignore_unknown,
bool shallow)
@@ -630,11 +841,6 @@ load_repositories (const shared_ptr<repository>& rp,
//
assert (rp->repositories_timestamp == timestamp_nonexistent);
- // Only locally accessible repositories allowed until package manager API is
- // ready.
- //
- assert (!rp->cache_location.empty ());
-
const string& tenant (rp->tenant);
// Repository is already persisted by the load_packages() function call.
@@ -644,7 +850,9 @@ load_repositories (const shared_ptr<repository>& rp,
pkg_repository_manifests rpm;
- path p (rp->cache_location.path () / repositories);
+ assert (!cl.empty ());
+
+ path p (cl.path () / repositories);
try
{
@@ -653,6 +861,9 @@ load_repositories (const shared_ptr<repository>& rp,
manifest_parser mp (ifs, p.string ());
rpm = pkg_repository_manifests (mp, ignore_unknown);
+
+ if (rpm.empty ())
+ rpm.emplace_back (repository_manifest ()); // Add the base repository.
}
catch (const io_error& e)
{
@@ -779,15 +990,19 @@ load_repositories (const shared_ptr<repository>& rp,
pr = make_shared<repository> (tenant, move (rl));
- // If the prerequsite repository location is a relative path, then
- // calculate its cache location.
+ // If the base repository is internal and the prerequsite repository
+ // location is a relative path, then calculate its cache location.
//
- if (rm.location.relative ())
+ if (rp->internal && rm.location.relative ())
{
+ // For an internal repository the cache location always comes from the
+ // loadtab file.
+ //
+ assert (cl.path () == rp->cache_location.path ());
+
try
{
- pr->cache_location =
- repository_location (rm.location, rp->cache_location);
+ pr->cache_location = repository_location (rm.location, cl);
}
catch (const invalid_argument&)
{
@@ -795,21 +1010,162 @@ load_repositories (const shared_ptr<repository>& rp,
<< "repository '" << rm.location << "'" << endl
<< " info: base (internal) repository location is "
<< rp->location << endl
- << " info: base repository cache location is "
- << rp->cache_location << endl;
+ << " info: base repository cache location is " << cl << endl;
throw failed ();
}
}
+ // If the (external) prerequisite repository cache location is empty, then
+ // check if the repository is local and, if that's the case, use its
+ // location as a cache location. Otherwise, fetch the repository
+ // information creating a temporary cache for it.
+ //
+ auto_rmdir cdr; // Remove the temporary cache after the repo load.
+ repository_location cl; // Repository temporary cache location.
+
+ if (pr->cache_location.empty ())
+ {
+ if (pr->location.local ())
+ {
+ pr->cache_location = pr->location;
+ }
+ else
+ {
+ dir_path cd;
+
+ try
+ {
+ cd = dir_path::temp_path ("brep-load-cache");
+ }
+ catch (const system_error& e)
+ {
+ cerr << "unable to obtain temporary directory: " << e;
+ throw failed ();
+ }
+
+ // It's highly unlikely but still possible that the temporary cache
+ // directory already exists. This can only happen due to the unclean
+ // loader termination. Let's remove it and retry.
+ //
+ try
+ {
+ if (try_mkdir (cd) == mkdir_status::already_exists)
+ {
+ try_rmdir_r (cd);
+
+ if (try_mkdir (cd) == mkdir_status::already_exists)
+ throw_generic_error (EEXIST);
+ }
+ }
+ catch (const system_error& e)
+ {
+ cerr << "unable to create directory '" << cd << "': " << e;
+ throw failed ();
+ }
+
+ cdr = auto_rmdir (cd);
+
+ path rf (cd / repositories);
+ path pf (cd / packages);
+
+ // Note that the fetch timeout can be overridden via --bpkg-option.
+ //
+ cstrings args {
+ "--fetch-timeout", "60", // 1 minute.
+ "--deep",
+ "--manifest",
+ "--repositories",
+ "--repositories-file", rf.string ().c_str (),
+ "--packages",
+ "--packages-file", pf.string ().c_str ()};
+
+ if (rm.trust)
+ {
+ args.push_back ("--trust");
+ args.push_back (rm.trust->c_str ());
+ }
+
+ // Always add it, so bpkg won't try to prompt for a certificate
+ // authentication if the fingerprint doesn't match.
+ //
+ args.push_back ("--trust-no");
+
+ // Retry bpkg-rep-info on recoverable errors, for a while.
+ //
+ for (size_t i (0);; ++i)
+ {
+ if (i != 0)
+ {
+ // Let's follow up the bpkg's diagnostics with the number of
+ // retries left.
+ //
+ cerr << bpkg_retries - i + 1 << " retries left" << endl;
+ sleep_for (bpkg_retry_timeout);
+ }
+
+ process p (repository_info (lo, pr->location.string (), args));
+
+ try
+ {
+ // Bail out from the retry loop on success.
+ //
+ if (p.wait ())
+ break;
+
+ // Assume the child issued diagnostics if terminated normally.
+ //
+ if (p.exit->normal ())
+ {
+ // Retry the manifests fetch on a recoverable error, unless the
+ // retries limit is reached.
+ //
+ if (p.exit->code () == 2 && i != bpkg_retries)
+ continue;
+ }
+ else
+ cerr << "process " << lo.bpkg () << " " << *p.exit << endl;
+
+ cerr << "error: unable to fetch manifests for "
+ << pr->canonical_name << endl
+ << " info: base repository location is "
+ << rp->location << endl;
+
+ throw failed ();
+ }
+ catch (const process_error& e)
+ {
+ cerr << "error: unable to fetch manifests for "
+ << pr->canonical_name << ": " << e << endl;
+
+ throw failed ();
+ }
+ }
+
+ // Note that this is a non-pkg repository cache and so we create the
+ // dir repository location (see load_repositories(path) for details).
+ //
+ cl = repository_location (repository_url (cd.string ()),
+ repository_type::dir);
+ }
+ }
+
// We don't apply overrides to the external packages.
//
- load_packages (pr,
+ load_packages (lo,
+ pr,
+ !pr->cache_location.empty () ? pr->cache_location : cl,
db,
ignore_unknown,
- manifest_name_values () /* overrides */);
-
- load_repositories (pr, db, ignore_unknown, false /* shallow */);
+ manifest_name_values () /* overrides */,
+ "" /* overrides_name */);
+
+ load_repositories (lo,
+ pr,
+ !pr->cache_location.empty () ? pr->cache_location : cl,
+ db,
+ ignore_unknown,
+ false /* shallow */);
}
db.update (rp);
@@ -850,29 +1206,36 @@ find (const lazy_shared_ptr<repository>& r,
return false;
}
-// Resolve package run-time dependencies, tests, examples, and benchmarks.
-// Make sure that the best matching dependency belongs to the package
-// repositories, their complements, recursively, or their immediate
-// prerequisite repositories (only for run-time dependencies). Should be
-// called once per internal package.
+// Resolve package regular dependencies and external tests. Make sure that the
+// best matching dependency belongs to the package repositories, their
+// complements, recursively, or their immediate prerequisite repositories
+// (only for regular dependencies). Set the buildable flag to false for the
+// resolved external tests packages. Fail if unable to resolve a regular
+// dependency, unless ignore_unresolved is true in which case leave this
+// dependency NULL. Fail if unable to resolve an external test, unless
+// ignore_unresolved or ignore_unresolved_tests is true in which case leave
+// this dependency NULL, if ignore_unresolved_tests is false, and remove the
+// respective tests manifest entry otherwise. Should be called once per
+// internal package.
//
static void
-resolve_dependencies (package& p, database& db)
+resolve_dependencies (package& p,
+ database& db,
+ bool ignore_unresolved,
+ bool ignore_unresolved_tests)
{
using brep::dependency;
+ using brep::dependency_alternative;
using brep::dependency_alternatives;
// Resolve dependencies for internal packages only.
//
assert (p.internal ());
- if (p.dependencies.empty () &&
- p.tests.empty () &&
- p.examples.empty () &&
- p.benchmarks.empty ())
+ if (p.dependencies.empty () && p.tests.empty ())
return;
- auto resolve = [&p, &db] (dependency& d, bool prereq)
+ auto resolve = [&p, &db] (dependency& d, bool test)
{
// Dependency should not be resolved yet.
//
@@ -934,9 +1297,26 @@ resolve_dependencies (package& p, database& db)
for (const auto& pp: db.query<package> (q + order_by_version_desc (vm)))
{
- if (find (p.internal_repository, pp, prereq))
+ if (find (p.internal_repository, pp, !test))
{
d.package.reset (db, pp.id);
+
+ // If the resolved dependency is an external test, then mark it as
+ // such, unless it is a stub.
+ //
+ if (test)
+ {
+ shared_ptr<package> dp (d.package.load ());
+
+ if (!dp->stub ())
+ {
+ dp->buildable = false;
+ dp->unbuildable_reason = unbuildable_reason::test;
+
+ db.update (dp);
+ }
+ }
+
return true;
}
}
@@ -944,49 +1324,50 @@ resolve_dependencies (package& p, database& db)
return false;
};
- auto bail = [&p] (const dependency& d, const char* what)
+ auto bail = [&p] (const dependency& d, const string& what)
{
- cerr << "error: can't resolve " << what << " " << d << " for the package "
- << p.name << " " << p.version << endl
+ cerr << "error: can't resolve " << what << ' ' << d << " for the package "
+ << p.name << ' ' << p.version << endl
<< " info: repository " << p.internal_repository.load ()->location
<< " appears to be broken" << endl;
throw failed ();
};
- for (dependency_alternatives& da: p.dependencies)
+ for (dependency_alternatives& das: p.dependencies)
{
- for (dependency& d: da)
+ // Practically it is enough to resolve at least one dependency alternative
+ // to build a package. Meanwhile here we consider an error specifying in
+ // the manifest file an alternative which can't be resolved, unless
+ // unresolved dependencies are allowed.
+ //
+ for (dependency_alternative& da: das)
{
- // Practically it is enough to resolve at least one dependency
- // alternative to build a package. Meanwhile here we consider an error
- // specifying in the manifest file an alternative which can't be
- // resolved.
- //
- if (!resolve (d, true /* prereq */))
- bail (d, "dependency");
+ for (dependency& d: da)
+ {
+ if (!resolve (d, false /* test */) && !ignore_unresolved)
+ bail (d, "dependency");
+ }
}
}
- // Should we allow tests, examples, and benchmarks packages to be
- // unresolvable? Let's forbid that until we see a use case for that.
- //
- for (dependency& d: p.tests)
+ for (auto i (p.tests.begin ()); i != p.tests.end (); )
{
- if (!resolve (d, false /* prereq */))
- bail (d, "tests");
- }
+ brep::test_dependency& td (*i);
- for (dependency& d: p.examples)
- {
- if (!resolve (d, false /* prereq */))
- bail (d, "examples");
- }
+ if (!resolve (td, true /* test */))
+ {
+ if (!ignore_unresolved && !ignore_unresolved_tests)
+ bail (td, to_string (td.type));
- for (dependency& d: p.benchmarks)
- {
- if (!resolve (d, false /* prereq */))
- bail (d, "benchmarks");
+ if (ignore_unresolved_tests)
+ {
+ i = p.tests.erase (i);
+ continue;
+ }
+ }
+
+ ++i;
}
db.update (p); // Update the package state.
@@ -1043,10 +1424,13 @@ detect_dependency_cycle (const package_id& id,
chain.push_back (id);
shared_ptr<package> p (db.load<package> (id));
- for (const auto& da: p->dependencies)
+ for (const auto& das: p->dependencies)
{
- for (const auto& d: da)
- detect_dependency_cycle (d.package.object_id (), chain, db);
+ for (const auto& da: das)
+ {
+ for (const auto& d: da)
+ detect_dependency_cycle (d.package.object_id (), chain, db);
+ }
}
chain.pop_back ();
@@ -1064,105 +1448,130 @@ certificate_info (const options& lo,
const repository_location& rl,
const optional<string>& fp)
{
- try
- {
- cstrings args {
- "--cert-fingerprint",
- "--cert-name",
- "--cert-organization",
- "--cert-email",
- "-q"}; // Don't print info messages.
+ cstrings args {
+ "--cert-fingerprint",
+ "--cert-name",
+ "--cert-organization",
+ "--cert-email",
+ "-q"}; // Don't print info messages.
- const char* trust ("--trust-no");
+ const char* trust ("--trust-no");
- if (fp)
+ if (fp)
+ {
+ if (!fp->empty ())
{
- if (!fp->empty ())
- {
- args.push_back ("--trust");
- args.push_back (fp->c_str ());
- }
- else
- trust = "--trust-yes";
+ args.push_back ("--trust");
+ args.push_back (fp->c_str ());
+ }
+ else
+ trust = "--trust-yes";
- if (!rl.remote ())
- {
- args.push_back ("--auth");
- args.push_back ("all");
- }
+ if (!rl.remote ())
+ {
+ args.push_back ("--auth");
+ args.push_back ("all");
}
+ }
- args.push_back (trust);
+ args.push_back (trust);
- process pr (repository_info (lo, rl.string (), args));
+ // Retry bpkg-rep-info on recoverable errors, for a while.
+ //
+ for (size_t i (0);; ++i)
+ {
+ if (i != 0)
+ {
+ // Let's follow up the bpkg's diagnostics with the number of retries
+ // left.
+ //
+ cerr << bpkg_retries - i + 1 << " retries left" << endl;
+ sleep_for (bpkg_retry_timeout);
+ }
try
{
- ifdstream is (
- move (pr.in_ofd),
- ifdstream::failbit | ifdstream::badbit | ifdstream::eofbit);
+ process pr (repository_info (lo, rl.string (), args));
- optional<certificate> cert;
+ try
+ {
+ ifdstream is (
+ move (pr.in_ofd),
+ ifdstream::failbit | ifdstream::badbit | ifdstream::eofbit);
- string fingerprint;
- getline (is, fingerprint);
+ optional<certificate> cert;
- if (!fingerprint.empty ())
- {
- cert = certificate ();
- cert->fingerprint = move (fingerprint);
- getline (is, cert->name);
- getline (is, cert->organization);
- getline (is, cert->email);
+ string fingerprint;
+ getline (is, fingerprint);
+
+ if (!fingerprint.empty ())
+ {
+ cert = certificate ();
+ cert->fingerprint = move (fingerprint);
+ getline (is, cert->name);
+ getline (is, cert->organization);
+ getline (is, cert->email);
+ }
+ else
+ {
+ // Read out empty lines.
+ //
+ string s;
+ getline (is, s); // Name.
+ getline (is, s); // Organization.
+ getline (is, s); // Email.
+ }
+
+ // Check that EOF is successfully reached.
+ //
+ is.exceptions (ifdstream::failbit | ifdstream::badbit);
+ if (is.peek () != ifdstream::traits_type::eof ())
+ throw io_error ("");
+
+ is.close ();
+
+ if (pr.wait ())
+ return cert;
+
+ // Fall through.
+ //
}
- else
+ catch (const io_error&)
{
- // Read out empty lines.
+ // Child exit status doesn't matter. Just wait for the process
+ // completion and fall through.
//
- string s;
- getline (is, s); // Name.
- getline (is, s); // Organization.
- getline (is, s); // Email.
+ pr.wait ();
}
- // Check that EOF is successfully reached.
+ // Assume the child issued diagnostics if terminated normally.
//
- is.exceptions (ifdstream::failbit | ifdstream::badbit);
- if (is.peek () != ifdstream::traits_type::eof ())
- throw io_error ("");
-
- is.close ();
+ if (pr.exit->normal ())
+ {
+ // Retry the certificate fetch on a recoverable error, unless the
+ // retries limit is reached.
+ //
+ if (pr.exit->code () == 2 && i != bpkg_retries)
+ continue;
+ }
+ else
+ cerr << "process " << lo.bpkg () << " " << *pr.exit << endl;
- if (pr.wait ())
- return cert;
+ cerr << "error: unable to fetch certificate information for "
+ << rl.canonical_name () << endl;
// Fall through.
- //
}
- catch (const io_error&)
+ catch (const process_error& e)
{
- // Child exit status doesn't matter. Just wait for the process
- // completion and fall through.
- //
- pr.wait ();
- }
-
- // Assume the child issued diagnostics.
- //
- cerr << "error: unable to fetch certificate information for "
- << rl.canonical_name () << endl;
+ cerr << "error: unable to fetch certificate information for "
+ << rl.canonical_name () << ": " << e << endl;
- // Fall through.
- }
- catch (const process_error& e)
- {
- cerr << "error: unable to fetch certificate information for "
- << rl.canonical_name () << ": " << e << endl;
+ // Fall through.
+ }
- // Fall through.
+ throw failed ();
}
-
- throw failed ();
}
int
@@ -1193,7 +1602,7 @@ try
<< "libbbot " << LIBBBOT_VERSION_ID << endl
<< "libbpkg " << LIBBPKG_VERSION_ID << endl
<< "libbutl " << LIBBUTL_VERSION_ID << endl
- << "Copyright (c) 2014-2019 Code Synthesis Ltd" << endl
+ << "Copyright (c) " << BREP_COPYRIGHT << "." << endl
<< "This is free software released under the MIT license." << endl;
return 0;
@@ -1241,8 +1650,46 @@ try
throw failed ();
}
+ // Verify the --service-* options.
+ //
+ if (ops.service_id_specified ())
+ {
+ if (!ops.tenant_specified ())
+ {
+ cerr << "error: --service-id requires --tenant" << endl;
+ throw failed ();
+ }
+
+ if (ops.service_type ().empty ())
+ {
+ cerr << "error: --service-id requires --service-type"
+ << endl;
+ throw failed ();
+ }
+ }
+ else
+ {
+ if (ops.service_type_specified ())
+ {
+ cerr << "error: --service-type requires --service-id"
+ << endl;
+ throw failed ();
+ }
+
+ if (ops.service_data_specified ())
+ {
+ cerr << "error: --service-data requires --service-id"
+ << endl;
+ throw failed ();
+ }
+ }
+
// Parse and validate overrides, if specified.
//
+ // Note that here we make sure that the overrides manifest is valid.
+ // Applying overrides to a specific package manifest may still fail (see
+ // package_manifest::validate_overrides() for details).
+ //
manifest_name_values overrides;
if (ops.overrides_file_specified ())
@@ -1277,7 +1724,7 @@ try
ops.db_port (),
"options='-c default_transaction_isolation=serializable'");
- // Prevent several brep-load/migrate instances from updating DB
+ // Prevent several brep utility instances from updating the package database
// simultaneously.
//
database_lock l (db);
@@ -1294,6 +1741,11 @@ try
throw failed ();
}
+ // Note: the interactive tenant implies private.
+ //
+ if (ops.interactive_specified ())
+ ops.private_ (true);
+
// Load the description of all the internal repositories from the
// configuration file.
//
@@ -1313,6 +1765,7 @@ try
{
db.erase_query<package> ();
db.erase_query<repository> ();
+ db.erase_query<public_key> ();
db.erase_query<tenant> ();
}
else // Multi-tenant mode.
@@ -1325,13 +1778,39 @@ try
db.erase_query<repository> (
query<repository>::id.tenant.in_range (ts.begin (), ts.end ()));
+ db.erase_query<public_key> (
+ query<public_key>::id.tenant.in_range (ts.begin (), ts.end ()));
+
db.erase_query<tenant> (
query<tenant>::id.in_range (ts.begin (), ts.end ()));
}
// Persist the tenant.
//
- db.persist (tenant (tnt));
+ // Note that if the tenant service is specified and some tenant with the
+ // same service id and type is already persisted, then we will end up with
+ // the `object already persistent` error and terminate with the exit code
+ // 1 (fatal error). We could potentially dedicate a special exit code for
+ // such a case, so that the caller may recognize it and behave accordingly
+ // (CI request handler can treat it as a client error rather than an
+ // internal error, etc). However, let's first see if it ever becomes a
+ // problem.
+ //
+ optional<tenant_service> service;
+
+ if (ops.service_id_specified ())
+ service = tenant_service (ops.service_id (),
+ ops.service_type (),
+ (ops.service_data_specified ()
+ ? ops.service_data ()
+ : optional<string> ()));
+
+ db.persist (tenant (tnt,
+ ops.private_ (),
+ (ops.interactive_specified ()
+ ? ops.interactive ()
+ : optional<string> ()),
+ move (service)));
// On the first pass over the internal repositories we load their
// certificate information and packages.
@@ -1356,7 +1835,13 @@ try
ir.buildable,
priority++));
- load_packages (r, db, ops.ignore_unknown (), overrides);
+ load_packages (ops,
+ r,
+ r->cache_location,
+ db,
+ ops.ignore_unknown (),
+ overrides,
+ ops.overrides_file ().string ());
}
// On the second pass over the internal repositories we load their
@@ -1369,12 +1854,17 @@ try
db.load<repository> (
repository_id (tnt, ir.location.canonical_name ())));
- load_repositories (r, db, ops.ignore_unknown (), ops.shallow ());
+ load_repositories (ops,
+ r,
+ r->cache_location,
+ db,
+ ops.ignore_unknown (),
+ ops.shallow ());
}
- // Resolve internal packages dependencies unless this is a shallow load.
+ // Resolve internal packages dependencies and, unless this is a shallow
+ // load, make sure there are no package dependency cycles.
//
- if (!ops.shallow ())
{
session s;
using query = query<package>;
@@ -1383,16 +1873,20 @@ try
db.query<package> (
query::id.tenant == tnt &&
query::internal_repository.canonical_name.is_not_null ()))
- resolve_dependencies (p, db);
+ resolve_dependencies (p,
+ db,
+ ops.shallow (),
+ ops.ignore_unresolved_tests ());
- // Make sure there is no package dependency cycles.
- //
- package_ids chain;
- for (const auto& p:
- db.query<package> (
- query::id.tenant == tnt &&
- query::internal_repository.canonical_name.is_not_null ()))
- detect_dependency_cycle (p.id, chain, db);
+ if (!ops.shallow ())
+ {
+ package_ids chain;
+ for (const auto& p:
+ db.query<package> (
+ query::id.tenant == tnt &&
+ query::internal_repository.canonical_name.is_not_null ()))
+ detect_dependency_cycle (p.id, chain, db);
+ }
}
}
diff --git a/load/types-parsers.cxx b/load/types-parsers.cxx
index bc829f3..4c4ea9d 100644
--- a/load/types-parsers.cxx
+++ b/load/types-parsers.cxx
@@ -1,5 +1,4 @@
// file : load/types-parsers.cxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
#include <load/types-parsers.hxx>
diff --git a/load/types-parsers.hxx b/load/types-parsers.hxx
index de7f001..1d2a6c9 100644
--- a/load/types-parsers.hxx
+++ b/load/types-parsers.hxx
@@ -1,5 +1,4 @@
// file : load/types-parsers.hxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
// CLI parsers, included into the generated source files.
diff --git a/manifest b/manifest
index 36f7fb4..63c866e 100644
--- a/manifest
+++ b/manifest
@@ -1,6 +1,6 @@
: 1
name: brep
-version: 0.13.0-a.0.z
+version: 0.17.0-a.0.z
project: build2
summary: build2 package repository web interface
license: MIT
@@ -12,24 +12,41 @@ url: https://build2.org
doc-url: https://build2.org/doc.xhtml
src-url: https://git.build2.org/cgit/brep/tree/
email: users@build2.org
-build-email: builds@build2.org
-builds: linux freebsd ; Only supports Linux and FreeBSD.
-builds: -linux -freebsd ; Requires system packages.
+build-warning-email: builds@build2.org
requires: c++14
requires: postgresql >= 9.0
requires: apache2 ; Including development files (httpd.h header, etc).
-depends: * build2 >= 0.12.0
-depends: * bpkg >= 0.12.0
-# @@ Should probably become conditional dependency.
-requires: ? cli ; Only required if changing .cli files.
+depends: * build2 >= 0.16.0-
+depends: * bpkg >= 0.16.0-
+# @@ DEP Should probably become conditional dependency.
+#requires: ? cli ; Only required if changing .cli files.
depends: libapr1
depends: libapreq2
-depends: libcmark-gfm == 0.29.0-a.1
-depends: libcmark-gfm-extensions == 0.29.0-a.1
-depends: libstudxml ^1.1.0-b.8
-depends: libodb [2.5.0-b.18.1 2.5.0-b.19)
-depends: libodb-pgsql [2.5.0-b.18.1 2.5.0-b.19)
-depends: libbutl [0.13.0-a.0.1 0.13.0-a.1)
-depends: libbpkg [0.13.0-a.0.1 0.13.0-a.1)
-depends: libbbot [0.13.0-a.0.1 0.13.0-a.1)
-depends: libbutl.bash [0.13.0-a.0.1 0.13.0-a.1)
+depends: libcmark-gfm == 0.29.0-a.4
+depends: libcmark-gfm-extensions == 0.29.0-a.4
+depends: libstudxml ^1.1.0-b.10
+depends: libodb [2.5.0-b.26.1 2.5.0-b.27)
+depends: libodb-pgsql [2.5.0-b.26.1 2.5.0-b.27)
+depends: libbutl [0.17.0-a.0.1 0.17.0-a.1)
+depends: libbpkg [0.17.0-a.0.1 0.17.0-a.1)
+depends: libbbot [0.17.0-a.0.1 0.17.0-a.1)
+depends: libbutl.bash [0.17.0-a.0.1 0.17.0-a.1)
+depends: bpkg-util [0.17.0-a.0.1 0.17.0-a.1)
+
+# This package dependens on platform-specific implementation libraries that
+# are (currently) not packaged and need to come from the system package
+# manager. It also requires rsync for tests.
+#
+builds: none
+
+debian-builds: sys
+debian-build-exclude: linux_debian_12-** ; libapreq2 not available
+debian-build-include: linux_debian*-**
+debian-build-include: linux_ubuntu*-**
+debian-build-exclude: **
+debian-build-config: sys:apache2-dev ?sys:libapr1 ?sys:libapreq2 ?sys:libpq sys:rsync
+
+fedora-builds: sys
+fedora-build-include: linux_fedora*-**
+fedora-build-exclude: **
+fedora-build-config: sys:httpd-devel ?sys:libapr1 ?sys:libapreq2 ?sys:libpq sys:rsync
diff --git a/migrate/buildfile b/migrate/buildfile
index 0fd6558..0480cc6 100644
--- a/migrate/buildfile
+++ b/migrate/buildfile
@@ -1,5 +1,4 @@
# file : migrate/buildfile
-# copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
# license : MIT; see accompanying LICENSE file
import libs = libodb%lib{odb}
@@ -12,6 +11,10 @@ exe{brep-migrate}: {hxx ixx cxx}{* -migrate-options} \
{hxx ixx cxx}{ migrate-options} \
../libbrep/lib{brep} $libs
+# Build options.
+#
+obj{migrate}: cxx.poptions += -DBREP_COPYRIGHT=\"$copyright\"
+
# Generated options parser.
#
if $cli.configured
diff --git a/migrate/migrate.cli b/migrate/migrate.cli
index 52d31cc..177f991 100644
--- a/migrate/migrate.cli
+++ b/migrate/migrate.cli
@@ -1,5 +1,4 @@
// file : migrate/migrate.cli
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
include <vector>;
@@ -126,8 +125,8 @@ Fatal error.|
\li|\cb{2}
-An instance of \cb{brep-migrate} or \l{brep-load(1)} is already running. Try
-again.|
+An instance of \cb{brep-migrate} or some other \cb{brep} utility is already
+running. Try again.|
\li|\cb{3}
diff --git a/migrate/migrate.cxx b/migrate/migrate.cxx
index c1f4dc1..090fcac 100644
--- a/migrate/migrate.cxx
+++ b/migrate/migrate.cxx
@@ -1,5 +1,4 @@
// file : migrate/migrate.cxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
#include <strings.h> // strcasecmp()
@@ -13,8 +12,10 @@
#include <odb/pgsql/database.hxx>
-#include <libbutl/pager.mxx>
+#include <libbutl/pager.hxx>
+#include <libbrep/build.hxx>
+#include <libbrep/build-odb.hxx>
#include <libbrep/package.hxx>
#include <libbrep/package-odb.hxx>
#include <libbrep/database-lock.hxx>
@@ -114,7 +115,7 @@ schema (const char* s, string name)
string kw;
i >> kw;
- statement += " " + kw;
+ statement += ' ' + kw;
if (strcasecmp (kw.c_str (), "FUNCTION") == 0)
{
@@ -132,7 +133,7 @@ schema (const char* s, string name)
else if (strcasecmp (kw.c_str (), "FOREIGN") == 0)
{
i >> kw;
- statement += " " + kw;
+ statement += ' ' + kw;
valid = strcasecmp (kw.c_str (), "TABLE") == 0;
// Fall through.
@@ -219,8 +220,28 @@ struct package_migration_entry: package_migration_entry_base<v>
: package_migration_entry_base<v> (f, "package") {}
};
-static const package_migration_entry<18>
-package_migrate_v18 ([] (database& db)
+static const package_migration_entry<26>
+package_migrate_v26 ([] (database& db)
+{
+});
+#endif
+
+// Register the data migration functions for the build database schema.
+//
+#if 0
+template <schema_version v>
+using build_migration_entry_base =
+ data_migration_entry<v, LIBBREP_BUILD_SCHEMA_VERSION_BASE>;
+
+template <schema_version v>
+struct build_migration_entry: build_migration_entry_base<v>
+{
+ build_migration_entry (void (*f) (database& db))
+ : build_migration_entry_base<v> (f, "build") {}
+};
+
+static const build_migration_entry<19>
+build_migrate_v19 ([] (database& db)
{
});
#endif
@@ -243,7 +264,7 @@ try
<< "libbbot " << LIBBBOT_VERSION_ID << endl
<< "libbpkg " << LIBBPKG_VERSION_ID << endl
<< "libbutl " << LIBBUTL_VERSION_ID << endl
- << "Copyright (c) 2014-2019 Code Synthesis Ltd" << endl
+ << "Copyright (c) " << BREP_COPYRIGHT << "." << endl
<< "This is free software released under the MIT license." << endl;
return 0;
@@ -301,12 +322,12 @@ try
ops.db_port (),
"options='-c default_transaction_isolation=serializable'");
- // Prevent several brep-migrate/load instances from updating DB
+ // Prevent several brep utility instances from updating the database
// simultaneously.
//
database_lock l (db);
- // Currently we don't support data migration for the manual database scheme
+ // Currently we don't support data migration for the manual database schema
// migration.
//
if (db.schema_migration (db_schema))
diff --git a/mod/.gitignore b/mod/.gitignore
index c6e608b..6b64ad0 100644
--- a/mod/.gitignore
+++ b/mod/.gitignore
@@ -1 +1 @@
-options.?xx
+*-options.?xx
diff --git a/mod/build-config-module.cxx b/mod/build-config-module.cxx
index 13f61b7..97c9f9e 100644
--- a/mod/build-config-module.cxx
+++ b/mod/build-config-module.cxx
@@ -1,5 +1,4 @@
// file : mod/build-config-module.cxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
#include <mod/build-config-module.hxx>
@@ -9,37 +8,35 @@
#include <map>
#include <sstream>
-#include <libbutl/sha256.mxx>
-#include <libbutl/utility.mxx> // throw_generic_error(), alpha(), etc.
-#include <libbutl/openssl.mxx>
-#include <libbutl/filesystem.mxx> // dir_iterator, dir_entry
-#include <libbutl/path-pattern.mxx>
+#include <libbutl/sha256.hxx>
+#include <libbutl/utility.hxx> // throw_generic_error()
+#include <libbutl/openssl.hxx>
+#include <libbutl/filesystem.hxx> // dir_iterator, dir_entry
namespace brep
{
using namespace std;
using namespace butl;
using namespace bpkg;
- using namespace bbot;
- // Return pointer to the shared build configurations instance, creating one
- // on the first call. Throw tab_parsing on parsing error, io_error on the
- // underlying OS error. Note: not thread-safe.
+ // Return pointer to the shared build target configurations instance,
+ // creating one on the first call. Throw tab_parsing on parsing error,
+ // io_error on the underlying OS error. Note: not thread-safe.
//
- static shared_ptr<const build_configs>
+ static shared_ptr<const build_target_configs>
shared_build_config (const path& p)
{
- static map<path, weak_ptr<build_configs>> configs;
+ static map<path, weak_ptr<build_target_configs>> configs;
auto i (configs.find (p));
if (i != configs.end ())
{
- if (shared_ptr<build_configs> c = i->second.lock ())
+ if (shared_ptr<build_target_configs> c = i->second.lock ())
return c;
}
- shared_ptr<build_configs> c (
- make_shared<build_configs> (parse_buildtab (p)));
+ shared_ptr<build_target_configs> c (
+ make_shared<build_target_configs> (bbot::parse_buildtab (p)));
configs[p] = c;
return c;
@@ -74,7 +71,7 @@ namespace brep
try
{
- for (const dir_entry& de: dir_iterator (d, false /* ignore_dangling */))
+ for (const dir_entry& de: dir_iterator (d, dir_iterator::no_follow))
{
if (de.path ().extension () == "pem" &&
de.type () == entry_type::regular)
@@ -111,7 +108,7 @@ namespace brep
catch (const system_error& e)
{
ostringstream os;
- os<< "unable to iterate over agents keys directory '" << d << "'";
+ os << "unable to iterate over agents keys directory '" << d << "'";
throw_generic_error (e.code ().value (), os.str ().c_str ());
}
@@ -124,7 +121,7 @@ namespace brep
{
try
{
- build_conf_ = shared_build_config (bo.build_config ());
+ target_conf_ = shared_build_config (bo.build_config ());
}
catch (const io_error& e)
{
@@ -139,207 +136,21 @@ namespace brep
bot_agent_key_map_ =
shared_bot_agent_keys (bo, bo.build_bot_agent_keys ());
- cstrings conf_names;
-
- using conf_map_type = map<const char*,
- const build_config*,
- compare_c_string>;
+ using conf_map_type = map<build_target_config_id,
+ const build_target_config*>;
conf_map_type conf_map;
- for (const auto& c: *build_conf_)
- {
- const char* cn (c.name.c_str ());
- conf_map[cn] = &c;
- conf_names.push_back (cn);
- }
-
- build_conf_names_ = make_shared<cstrings> (move (conf_names));
- build_conf_map_ = make_shared<conf_map_type> (move (conf_map));
- }
-
- // The default underlying class set expression (see below).
- //
- static const build_class_expr default_ucs_expr (
- {"default"}, '+', "Default.");
-
- bool build_config_module::
- exclude (const small_vector<build_class_expr, 1>& exprs,
- const vector<build_constraint>& constrs,
- const build_config& cfg,
- string* reason) const
- {
- // Save the first sentence of the reason, lower-case the first letter if
- // the beginning looks like a word (all subsequent characters until a
- // whitespace are lower-case letters).
- //
- auto sanitize = [] (const string& reason)
- {
- string r (reason.substr (0, reason.find ('.')));
-
- char c (r[0]); // Can be '\0'.
- if (alpha (c) && c == ucase (c))
- {
- bool word (true);
-
- for (size_t i (1);
- i != r.size () && (c = r[i]) != ' ' && c != '\t' && c != '\n';
- ++i)
- {
- // Is not a word if contains a non-letter or an upper-case letter.
- //
- if (!alpha (c) || c == ucase (c))
- {
- word = false;
- break;
- }
- }
-
- if (word)
- r[0] = lcase (r[0]);
- }
-
- return r;
- };
-
- // First, match the configuration against the package underlying build
- // class set and expressions.
- //
- bool m (false);
-
- // Match the configuration against an expression, updating the match
- // result.
- //
- // We will use a comment of the first encountered excluding expression
- // (changing the result from true to false) or non-including one (leaving
- // the false result) as an exclusion reason.
- //
- auto match = [&cfg, &m, reason, &sanitize, this]
- (const build_class_expr& e)
- {
- bool pm (m);
- e.match (cfg.classes, build_conf_->class_inheritance_map, m);
-
- if (reason != nullptr)
- {
- // Reset the reason which, if saved, makes no sense anymore.
- //
- if (m)
- {
- reason->clear ();
- }
- else if (reason->empty () &&
- //
- // Exclusion.
- //
- (pm ||
- //
- // Non-inclusion. Make sure that the build class expression
- // is empty or starts with an addition (+...).
- //
- e.expr.empty () ||
- e.expr.front ().operation == '+'))
- {
- *reason = sanitize (e.comment);
- }
- }
- };
-
- // Determine the underlying class set. Note that in the future we can
- // potentially extend the underlying set with special classes.
- //
- const build_class_expr* ucs (
- !exprs.empty () && !exprs.front ().underlying_classes.empty ()
- ? &exprs.front ()
- : nullptr);
-
- // Note that the combined package build configuration class expression can
- // be represented as the underlying class set used as a starting set for
- // the original expressions and a restricting set, simultaneously. For
- // example, for the expression:
- //
- // default legacy : -msvc
- //
- // the resulting expression will be:
- //
- // +( +default +legacy ) -msvc &( +default +legacy )
- //
- // Let's, however, optimize it a bit based on the following facts:
- //
- // - If the underlying class set expression (+default +legacy in the above
- // example) evaluates to false, then the resulting expression also
- // evaluates to false due to the trailing '&' operation. Thus, we don't
- // need to evaluate further if that's the case.
- //
- // - On the other hand, if the underlying class set expression evaluates
- // to true, then we don't need to apply the trailing '&' operation as it
- // cannot affect the result.
- //
- const build_class_expr& ucs_expr (
- ucs != nullptr
- ? build_class_expr (ucs->underlying_classes, '+', ucs->comment)
- : default_ucs_expr);
-
- match (ucs_expr);
-
- if (m)
- {
- for (const build_class_expr& e: exprs)
- match (e);
- }
+ for (const auto& c: *target_conf_)
+ conf_map[build_target_config_id {c.target, c.name}] = &c;
- // Exclude the configuration if it doesn't match the compound expression.
- //
- if (!m)
- return true;
-
- // Now check if the configuration is excluded/included via the patterns.
- //
- // To implement matching of absent name components with wildcard-only
- // pattern components we are going to convert names to paths (see
- // dash_components_to_path() for details).
- //
- // And if any of the build-{include,exclude} values (which is legal) or
- // the build configuration name/target (illegal) are invalid paths, then
- // we assume no match.
- //
- if (!constrs.empty ())
- try
- {
- path cn (dash_components_to_path (cfg.name));
- path tg (dash_components_to_path (cfg.target.string ()));
-
- for (const build_constraint& c: constrs)
- {
- if (path_match (cn,
- dash_components_to_path (c.config),
- dir_path () /* start */,
- path_match_flags::match_absent) &&
- (!c.target ||
- path_match (tg,
- dash_components_to_path (*c.target),
- dir_path () /* start */,
- path_match_flags::match_absent)))
- {
- if (!c.exclusion)
- return false;
-
- if (reason != nullptr)
- *reason = sanitize (c.comment);
-
- return true;
- }
- }
- }
- catch (const invalid_path&) {}
-
- return false;
+ target_conf_map_ = make_shared<conf_map_type> (move (conf_map));
}
bool build_config_module::
- belongs (const bbot::build_config& cfg, const char* cls) const
+ belongs (const build_target_config& cfg, const char* cls) const
{
- const map<string, string>& im (build_conf_->class_inheritance_map);
+ const map<string, string>& im (target_conf_->class_inheritance_map);
for (const string& c: cfg.classes)
{
@@ -361,59 +172,4 @@ namespace brep
return false;
}
-
- path build_config_module::
- dash_components_to_path (const string& pattern)
- {
- string r;
- size_t nstar (0);
- for (const path_pattern_term& pt: path_pattern_iterator (pattern))
- {
- switch (pt.type)
- {
- case path_pattern_term_type::star:
- {
- // Replace ** with */**/* and skip all the remaining stars that may
- // follow in this sequence.
- //
- if (nstar == 0)
- r += "*";
- else if (nstar == 1)
- r += "/**/*"; // The first star is already copied.
-
- break;
- }
- case path_pattern_term_type::literal:
- {
- // Replace '-' with '/' and fall through otherwise.
- //
- if (get_literal (pt) == '-')
- {
- r += '/';
- break;
- }
- }
- // Fall through.
- default:
- {
- r.append (pt.begin, pt.end); // Copy the pattern term as is.
- }
- }
-
- nstar = pt.star () ? nstar + 1 : 0;
- }
-
- // Append the trailing slash to match the resulting paths as directories.
- // This is required for the trailing /* we could append to match absent
- // directory path components (see path_match_flags::match_absent for
- // details).
- //
- // Note that valid dash components may not contain a trailing dash.
- // Anyway, any extra trailing slashes will be ignored by the path
- // constructor.
- //
- r += '/';
-
- return path (move (r));
- }
}
diff --git a/mod/build-config-module.hxx b/mod/build-config-module.hxx
index 25ddbb4..c1630b0 100644
--- a/mod/build-config-module.hxx
+++ b/mod/build-config-module.hxx
@@ -1,24 +1,20 @@
// file : mod/build-config-module.hxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
#ifndef MOD_BUILD_CONFIG_MODULE_HXX
#define MOD_BUILD_CONFIG_MODULE_HXX
#include <map>
-#include <algorithm> // find()
-#include <libbutl/utility.mxx> // compare_c_string
+#include <libbutl/target-triplet.hxx>
#include <libbpkg/manifest.hxx>
-#include <libbbot/build-config.hxx>
-
#include <libbrep/types.hxx>
#include <libbrep/utility.hxx>
-#include <mod/module.hxx>
-#include <mod/options.hxx>
+#include <mod/module-options.hxx>
+#include <mod/build-target-config.hxx>
// Base class for modules that utilize the build controller configuration.
//
@@ -40,55 +36,51 @@ namespace brep
void
init (const options::build&);
- // Return true if the specified build configuration is excluded by a
- // package based on its underlying build class set, build class
- // expressions, and build constraints, potentially extending the
- // underlying set with the special classes. Set the exclusion reason if
- // requested.
- //
+ template <typename K>
bool
- exclude (const small_vector<bpkg::build_class_expr, 1>&,
- const vector<bpkg::build_constraint>&,
- const bbot::build_config&,
- string* reason = nullptr) const;
+ exclude (const build_package_config_template<K>& pc,
+ const build_class_exprs& common_builds,
+ const build_constraints& common_constraints,
+ const build_target_config& tc,
+ string* reason = nullptr,
+ bool default_all_ucs = false) const
+ {
+ return brep::exclude (pc,
+ common_builds,
+ common_constraints,
+ tc,
+ target_conf_->class_inheritance_map,
+ reason,
+ default_all_ucs);
+ }
// Check if the configuration belongs to the specified class.
//
bool
- belongs (const bbot::build_config&, const char*) const;
+ belongs (const build_target_config&, const char*) const;
bool
- belongs (const bbot::build_config& cfg, const string& cls) const
+ belongs (const build_target_config& cfg, const string& cls) const
{
return belongs (cfg, cls.c_str ());
}
- // Convert dash-separated components (target, build configuration name,
- // machine name) or a pattern thereof into a path, replacing dashes with
- // slashes (directory separators), `**` with `*/**/*`, and appending the
- // trailing slash for a subsequent match using the path_match()
- // functionality (the idea here is for `linux**` to match `linux-gcc`
- // which is quite natural to expect). Throw invalid_path if the resulting
- // path is invalid.
- //
- // Note that the match_absent path match flag must be used for the above
- // `**` transformation to work.
+ // Target/configuration/toolchain combination that, in particular, can be
+ // used as a set value.
//
- static path
- dash_components_to_path (const string&);
-
- // Configuration/toolchain combination that, in particular, can be used as
- // a set value.
- //
- // Note: contains shallow references to the configuration, toolchain name,
- // and version.
+ // Note: all members are the shallow references.
//
struct config_toolchain
{
- const string& configuration;
+ const butl::target_triplet& target;
+ const string& target_config;
+ const string& package_config;
const string& toolchain_name;
const bpkg::version& toolchain_version;
+ // Note: the comparison reflects the order of unbuilt configurations on
+ // the Builds page.
+ //
bool
operator< (const config_toolchain& ct) const
{
@@ -98,19 +90,24 @@ namespace brep
if (toolchain_version != ct.toolchain_version)
return toolchain_version > ct.toolchain_version;
- return configuration.compare (ct.configuration) < 0;
+ if (int r = target.compare (ct.target))
+ return r < 0;
+
+ if (int r = target_config.compare (ct.target_config))
+ return r < 0;
+
+ return package_config.compare (ct.package_config) < 0;
}
};
protected:
// Build configurations.
//
- shared_ptr<const bbot::build_configs> build_conf_;
- shared_ptr<const cstrings> build_conf_names_;
+ shared_ptr<const build_target_configs> target_conf_;
- shared_ptr<const std::map<const char*,
- const bbot::build_config*,
- butl::compare_c_string>> build_conf_map_;
+ shared_ptr<const std::map<build_target_config_id,
+ const build_target_config*>>
+ target_conf_map_;
// Map of build bot agent public keys fingerprints to the key file paths.
//
diff --git a/mod/build-result-module.cxx b/mod/build-result-module.cxx
new file mode 100644
index 0000000..9ac1390
--- /dev/null
+++ b/mod/build-result-module.cxx
@@ -0,0 +1,349 @@
+// file : mod/build-result-module.cxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#include <mod/build-result-module.hxx>
+
+#include <odb/database.hxx>
+
+#include <libbutl/openssl.hxx>
+#include <libbutl/fdstream.hxx>
+#include <libbutl/process-io.hxx>
+#include <libbutl/semantic-version.hxx>
+
+#include <libbrep/build-package.hxx>
+#include <libbrep/build-package-odb.hxx>
+
+namespace brep
+{
+ using namespace std;
+ using namespace butl;
+
+ // While currently the user-defined copy constructor is not required (we
+ // don't need to deep copy nullptr's), it is a good idea to keep the
+ // placeholder ready for less trivial cases.
+ //
+ build_result_module::
+ build_result_module (const build_result_module& r)
+ : database_module (r),
+ build_config_module (r),
+ use_openssl_pkeyutl_ (r.initialized_ ? r.use_openssl_pkeyutl_ : false)
+ {
+ }
+
+ void build_result_module::
+ init (const options::build& bo, const options::build_db& bdo)
+ {
+ HANDLER_DIAG;
+
+ build_config_module::init (bo);
+ database_module::init (bdo, bdo.build_db_retry ());
+
+ try
+ {
+ optional<openssl_info> oi (
+ openssl::info ([&trace, this] (const char* args[], size_t n)
+ {
+ l2 ([&]{trace << process_args {args, n};});
+ },
+ 2,
+ bo.openssl ()));
+
+ use_openssl_pkeyutl_ = oi &&
+ oi->name == "OpenSSL" &&
+ oi->version >= semantic_version {3, 0, 0};
+ }
+ catch (const system_error& e)
+ {
+ fail << "unable to obtain openssl version: " << e;
+ }
+ }
+
+ build_result_module::parse_session_result build_result_module::
+ parse_session (const string& s) const
+ {
+ using brep::version; // Not to confuse with module::version.
+
+ parse_session_result r;
+
+ size_t p (s.find ('/')); // End of tenant.
+
+ if (p == string::npos)
+ throw invalid_argument ("no package name");
+
+ if (tenant.compare (0, tenant.size (), s, 0, p) != 0)
+ throw invalid_argument ("tenant mismatch");
+
+ size_t b (p + 1); // Start of package name.
+ p = s.find ('/', b); // End of package name.
+
+ if (p == b)
+ throw invalid_argument ("empty package name");
+
+ if (p == string::npos)
+ throw invalid_argument ("no package version");
+
+ package_name name;
+
+ try
+ {
+ name = package_name (string (s, b, p - b));
+ }
+ catch (const invalid_argument& e)
+ {
+ throw invalid_argument (
+ string ("invalid package name : ") + e.what ());
+ }
+
+ b = p + 1; // Start of version.
+ p = s.find ('/', b); // End of version.
+
+ if (p == string::npos)
+ throw invalid_argument ("no target");
+
+ auto parse_version = [&s, &b, &p] (const char* what) -> version
+ {
+ // Intercept exception handling to add the parsing error attribution.
+ //
+ try
+ {
+ return brep::version (string (s, b, p - b));
+ }
+ catch (const invalid_argument& e)
+ {
+ throw invalid_argument (
+ string ("invalid ") + what + ": " + e.what ());
+ }
+ };
+
+ r.package_version = parse_version ("package version");
+
+ b = p + 1; // Start of target.
+ p = s.find ('/', b); // End of target.
+
+ if (p == string::npos)
+ throw invalid_argument ("no target configuration name");
+
+ target_triplet target;
+ try
+ {
+ target = target_triplet (string (s, b, p - b));
+ }
+ catch (const invalid_argument& e)
+ {
+ throw invalid_argument (string ("invalid target: ") + e.what ());
+ }
+
+ b = p + 1; // Start of target configuration name.
+ p = s.find ('/', b); // End of target configuration name.
+
+ if (p == string::npos)
+ throw invalid_argument ("no package configuration name");
+
+ string target_config (s, b, p - b);
+
+ if (target_config.empty ())
+ throw invalid_argument ("empty target configuration name");
+
+ b = p + 1; // Start of package configuration name.
+ p = s.find ('/', b); // End of package configuration name.
+
+ if (p == string::npos)
+ throw invalid_argument ("no toolchain name");
+
+ string package_config (s, b, p - b);
+
+ if (package_config.empty ())
+ throw invalid_argument ("empty package configuration name");
+
+ b = p + 1; // Start of toolchain name.
+ p = s.find ('/', b); // End of toolchain name.
+
+ if (p == string::npos)
+ throw invalid_argument ("no toolchain version");
+
+ string toolchain_name (s, b, p - b);
+
+ if (toolchain_name.empty ())
+ throw invalid_argument ("empty toolchain name");
+
+ b = p + 1; // Start of toolchain version.
+ p = s.find ('/', b); // End of toolchain version.
+
+ if (p == string::npos)
+ throw invalid_argument ("no timestamp");
+
+ r.toolchain_version = parse_version ("toolchain version");
+
+ r.id = build_id (package_id (move (tenant), move (name), r.package_version),
+ move (target),
+ move (target_config),
+ move (package_config),
+ move (toolchain_name),
+ r.toolchain_version);
+
+ try
+ {
+ size_t tsn;
+ string ts (s, p + 1);
+
+ r.timestamp = timestamp (chrono::duration_cast<timestamp::duration> (
+ chrono::nanoseconds (stoull (ts, &tsn))));
+
+ if (tsn != ts.size ())
+ throw invalid_argument ("trailing junk");
+ }
+ // Handle invalid_argument or out_of_range (both derive from logic_error),
+ // that can be thrown by stoull().
+ //
+ catch (const logic_error& e)
+ {
+ throw invalid_argument (string ("invalid timestamp: ") + e.what ());
+ }
+
+ return r;
+ }
+
+ bool build_result_module::
+ authenticate_session (const options::build& o,
+ const optional<vector<char>>& challenge,
+ const build& b,
+ const string& session) const
+ {
+ HANDLER_DIAG;
+
+ auto warn_auth = [&session, &warn] (const string& d)
+ {
+ warn << "session '" << session << "' authentication failed: " << d;
+ };
+
+ bool r (false);
+
+ // Must both be present or absent.
+ //
+ if (!b.agent_challenge != !challenge)
+ {
+ warn_auth (challenge ? "unexpected challenge": "challenge is expected");
+ }
+ else if (bot_agent_key_map_ == nullptr) // Authentication is disabled.
+ {
+ r = true;
+ }
+ else if (!b.agent_challenge) // Authentication is recently enabled.
+ {
+ warn_auth ("challenge is required now");
+ }
+ else
+ {
+ assert (b.agent_fingerprint && challenge);
+
+ auto auth = [&challenge,
+ &b,
+ &o,
+ &fail, &trace,
+ &warn_auth,
+ this] (const path& key)
+ {
+ bool r (false);
+
+ try
+ {
+ openssl os ([&trace, this] (const char* args[], size_t n)
+ {
+ l2 ([&]{trace << process_args {args, n};});
+ },
+ path ("-"), fdstream_mode::text, 2,
+ process_env (o.openssl (), o.openssl_envvar ()),
+ use_openssl_pkeyutl_ ? "pkeyutl" : "rsautl",
+ o.openssl_option (),
+ use_openssl_pkeyutl_ ? "-verifyrecover" : "-verify",
+ "-pubin",
+ "-inkey", key);
+
+ for (const auto& c: *challenge)
+ os.out.put (c); // Sets badbit on failure.
+
+ os.out.close ();
+
+ string s;
+ getline (os.in, s);
+
+ bool v (os.in.eof ());
+ os.in.close ();
+
+ if (os.wait () && v)
+ {
+ r = (s == *b.agent_challenge);
+
+ if (!r)
+ warn_auth ("challenge mismatched");
+ }
+ else // The signature is presumably meaningless.
+ warn_auth ("unable to verify challenge");
+ }
+ catch (const system_error& e)
+ {
+ fail << "unable to verify challenge: " << e;
+ }
+
+ return r;
+ };
+
+ const string& fp (*b.agent_fingerprint);
+ auto i (bot_agent_key_map_->find (fp));
+
+ // Note that it is possible that the default vs custom bot
+ // classification has changed since the task request time. It feels that
+ // there is nothing wrong with that and we will handle that
+ // automatically.
+ //
+ if (i != bot_agent_key_map_->end ()) // Default bot?
+ {
+ r = auth (i->second);
+ }
+ else // Custom bot.
+ {
+ shared_ptr<build_public_key> k (
+ build_db_->find<build_public_key> (public_key_id (b.tenant, fp)));
+
+ if (k != nullptr)
+ {
+ // Temporarily save the key data to disk (note that it's the
+ // challenge which is passed via stdin to openssl). Hopefully /tmp
+ // is using tmpfs.
+ //
+ auto_rmfile arm;
+
+ try
+ {
+ arm = auto_rmfile (path::temp_path ("brep-custom-bot-key"));
+ }
+ catch (const system_error& e)
+ {
+ fail << "unable to obtain temporary file: " << e;
+ }
+
+ try
+ {
+ ofdstream os (arm.path);
+ os << *k;
+ os.close ();
+ }
+ catch (const io_error& e)
+ {
+ fail << "unable to write to '" << arm.path << "': " << e;
+ }
+
+ r = auth (arm.path);
+ }
+ else
+ {
+ // The agent's key is recently replaced.
+ //
+ warn_auth ("agent's public key not found");
+ }
+ }
+ }
+
+ return r;
+ }
+}
diff --git a/mod/build-result-module.hxx b/mod/build-result-module.hxx
new file mode 100644
index 0000000..34466e4
--- /dev/null
+++ b/mod/build-result-module.hxx
@@ -0,0 +1,78 @@
+// file : mod/build-result-module.hxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#ifndef MOD_BUILD_RESULT_MODULE_HXX
+#define MOD_BUILD_RESULT_MODULE_HXX
+
+#include <libbrep/types.hxx>
+#include <libbrep/utility.hxx>
+
+#include <libbrep/build.hxx>
+
+#include <mod/module-options.hxx>
+#include <mod/database-module.hxx>
+#include <mod/build-config-module.hxx>
+
+namespace brep
+{
+ // Base class for modules that handle the build task results.
+ //
+ // Specifically, it loads build controller configuration, initializes the
+ // build database instance, and provides utilities for parsing and
+ // authenticating the build task session.
+ //
+ class build_result_module: public database_module,
+ protected build_config_module
+ {
+ protected:
+ build_result_module () = default;
+
+ // Create a shallow copy (handling instance) if initialized and a deep
+ // copy (context exemplar) otherwise.
+ //
+ explicit
+ build_result_module (const build_result_module&);
+
+ void
+ init (const options::build&, const options::build_db&);
+
+ using handler::init; // Unhide.
+
+ // Parse the build task session and verify that the session matches the
+ // tenant. Throw invalid_argument on errors.
+ //
+ struct parse_session_result
+ {
+ build_id id;
+ brep::version package_version;
+ brep::version toolchain_version;
+ brep::timestamp timestamp;
+ };
+
+ parse_session_result
+ parse_session (const string&) const;
+
+ // Return true if bbot agent authentication is disabled or the agent is
+ // recognized and challenge matches. If the session authentication fails
+ // (challenge is not expected, expected but doesn't match, etc), then log
+ // the failure reason with the warning severity and return false.
+ //
+ // Note that the session argument is used only for logging.
+ //
+ bool
+ authenticate_session (const options::build&,
+ const optional<vector<char>>& challenge,
+ const build&,
+ const string& session) const;
+
+ protected:
+ // True if the openssl version is greater or equal to 3.0.0 and so pkeyutl
+ // needs to be used instead of rsautl.
+ //
+ // Note that openssl 3.0.0 deprecates rsautl in favor of pkeyutl.
+ //
+ bool use_openssl_pkeyutl_;
+ };
+}
+
+#endif // MOD_BUILD_RESULT_MODULE_HXX
diff --git a/mod/build-target-config.cxx b/mod/build-target-config.cxx
new file mode 100644
index 0000000..a30e281
--- /dev/null
+++ b/mod/build-target-config.cxx
@@ -0,0 +1,254 @@
+// file : mod/target-build-config.cxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#include <mod/build-target-config.hxx>
+
+#include <libbutl/utility.hxx> // alpha(), etc.
+#include <libbutl/path-pattern.hxx>
+
+namespace brep
+{
+ using namespace std;
+ using namespace butl;
+ using namespace bpkg;
+
+ // The default underlying class set expressions (see below).
+ //
+ static const build_class_expr default_ucs_expr (
+ {"default"}, '+', "Default.");
+
+ static const build_class_expr all_ucs_expr (
+ {"all"}, '+', "All.");
+
+ bool
+ exclude (const build_class_exprs& exprs,
+ const build_constraints& constrs,
+ const build_target_config& tc,
+ const map<string, string>& class_inheritance_map,
+ string* reason,
+ bool default_all_ucs)
+ {
+ // Save the first sentence of the reason, lower-case the first letter if
+ // the beginning looks like a word (all subsequent characters until a
+ // whitespace are lower-case letters).
+ //
+ auto sanitize = [] (const string& reason)
+ {
+ string r (reason.substr (0, reason.find ('.')));
+
+ char c (r[0]); // Can be '\0'.
+ if (alpha (c) && c == ucase (c))
+ {
+ bool word (true);
+
+ for (size_t i (1);
+ i != r.size () && (c = r[i]) != ' ' && c != '\t' && c != '\n';
+ ++i)
+ {
+ // Is not a word if contains a non-letter or an upper-case letter.
+ //
+ if (!alpha (c) || c == ucase (c))
+ {
+ word = false;
+ break;
+ }
+ }
+
+ if (word)
+ r[0] = lcase (r[0]);
+ }
+
+ return r;
+ };
+
+ // First, match the configuration against the package underlying build
+ // class set and expressions.
+ //
+ bool m (false);
+
+ // Match the configuration against an expression, updating the match
+ // result.
+ //
+ // We will use a comment of the first encountered excluding expression
+ // (changing the result from true to false) or non-including one (leaving
+ // the false result) as an exclusion reason.
+ //
+ auto match = [&tc, &m, reason, &sanitize, &class_inheritance_map]
+ (const build_class_expr& e)
+ {
+ bool pm (m);
+ e.match (tc.classes, class_inheritance_map, m);
+
+ if (reason != nullptr)
+ {
+ // Reset the reason which, if saved, makes no sense anymore.
+ //
+ if (m)
+ {
+ reason->clear ();
+ }
+ else if (reason->empty () &&
+ //
+ // Exclusion.
+ //
+ (pm ||
+ //
+ // Non-inclusion. Make sure that the build class expression
+ // is empty or starts with an addition (+...).
+ //
+ e.expr.empty () ||
+ e.expr.front ().operation == '+'))
+ {
+ *reason = sanitize (e.comment);
+ }
+ }
+ };
+
+ // Determine the underlying class set. Note that in the future we can
+ // potentially extend the underlying set with special classes.
+ //
+ const build_class_expr* ucs (
+ !exprs.empty () && !exprs.front ().underlying_classes.empty ()
+ ? &exprs.front ()
+ : nullptr);
+
+ // Note that the combined package build configuration class expression can
+ // be represented as the underlying class set used as a starting set for
+ // the original expressions and a restricting set, simultaneously. For
+ // example, for the expression:
+ //
+ // default legacy : -msvc
+ //
+ // the resulting expression will be:
+ //
+ // +( +default +legacy ) -msvc &( +default +legacy )
+ //
+ // Let's, however, optimize it a bit based on the following facts:
+ //
+ // - If the underlying class set expression (+default +legacy in the above
+ // example) evaluates to false, then the resulting expression also
+ // evaluates to false due to the trailing '&' operation. Thus, we don't
+ // need to evaluate further if that's the case.
+ //
+ // - On the other hand, if the underlying class set expression evaluates
+ // to true, then we don't need to apply the trailing '&' operation as it
+ // cannot affect the result.
+ //
+ const build_class_expr& ucs_expr (
+ ucs != nullptr ? build_class_expr (ucs->underlying_classes,
+ '+',
+ ucs->comment) :
+ default_all_ucs ? all_ucs_expr :
+ default_ucs_expr);
+
+ match (ucs_expr);
+
+ if (m)
+ {
+ for (const build_class_expr& e: exprs)
+ match (e);
+ }
+
+ // Exclude the configuration if it doesn't match the compound expression.
+ //
+ if (!m)
+ return true;
+
+ // Now check if the configuration is excluded/included via the patterns.
+ //
+ // To implement matching of absent name components with wildcard-only
+ // pattern components we are going to convert names to paths (see
+ // dash_components_to_path() for details).
+ //
+ // And if any of the build-{include,exclude} values (which is legal) or
+ // the build configuration name/target (illegal) are invalid paths, then
+ // we assume no match.
+ //
+ if (!constrs.empty ())
+ try
+ {
+ path cn (dash_components_to_path (tc.name));
+ path tg (dash_components_to_path (tc.target.string ()));
+
+ for (const build_constraint& c: constrs)
+ {
+ if (path_match (cn,
+ dash_components_to_path (c.config),
+ dir_path () /* start */,
+ path_match_flags::match_absent) &&
+ (!c.target ||
+ path_match (tg,
+ dash_components_to_path (*c.target),
+ dir_path () /* start */,
+ path_match_flags::match_absent)))
+ {
+ if (!c.exclusion)
+ return false;
+
+ if (reason != nullptr)
+ *reason = sanitize (c.comment);
+
+ return true;
+ }
+ }
+ }
+ catch (const invalid_path&) {}
+
+ return false;
+ }
+
+ path
+ dash_components_to_path (const string& pattern)
+ {
+ string r;
+ size_t nstar (0);
+ for (const path_pattern_term& pt: path_pattern_iterator (pattern))
+ {
+ switch (pt.type)
+ {
+ case path_pattern_term_type::star:
+ {
+ // Replace ** with */**/* and skip all the remaining stars that may
+ // follow in this sequence.
+ //
+ if (nstar == 0)
+ r += "*";
+ else if (nstar == 1)
+ r += "/**/*"; // The first star is already copied.
+
+ break;
+ }
+ case path_pattern_term_type::literal:
+ {
+ // Replace '-' with '/' and fall through otherwise.
+ //
+ if (get_literal (pt) == '-')
+ {
+ r += '/';
+ break;
+ }
+ }
+ // Fall through.
+ default:
+ {
+ r.append (pt.begin, pt.end); // Copy the pattern term as is.
+ }
+ }
+
+ nstar = pt.star () ? nstar + 1 : 0;
+ }
+
+ // Append the trailing slash to match the resulting paths as directories.
+ // This is required for the trailing /* we could append to match absent
+ // directory path components (see path_match_flags::match_absent for
+ // details).
+ //
+ // Note that valid dash components may not contain a trailing dash.
+ // Anyway, any extra trailing slashes will be ignored by the path
+ // constructor.
+ //
+ r += '/';
+
+ return path (move (r));
+ }
+}
diff --git a/mod/build-target-config.hxx b/mod/build-target-config.hxx
new file mode 100644
index 0000000..60d159c
--- /dev/null
+++ b/mod/build-target-config.hxx
@@ -0,0 +1,96 @@
+// file : mod/build-target-config.hxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#ifndef MOD_BUILD_TARGET_CONFIG_HXX
+#define MOD_BUILD_TARGET_CONFIG_HXX
+
+#include <map>
+
+#include <libbutl/target-triplet.hxx>
+
+#include <libbpkg/manifest.hxx>
+
+#include <libbbot/build-target-config.hxx>
+
+#include <libbrep/types.hxx>
+#include <libbrep/utility.hxx>
+
+#include <libbrep/common.hxx>
+
+namespace brep
+{
+ using build_target_config = bbot::build_target_config;
+ using build_target_configs = bbot::build_target_configs;
+
+ // Return true if the specified build target configuration is excluded by a
+ // package configuration based on its underlying build class set, build
+ // class expressions, and build constraints, potentially extending the
+ // underlying set with the special classes. Set the exclusion reason if
+ // requested. Optionally use the `all` class as a default underlying build
+ // class set rather than the `default` class (which is, for example, the
+ // case for the external test packages not to reduce their build target
+ // configuration set needlessly).
+ //
+ bool
+ exclude (const build_class_exprs& builds,
+ const build_constraints& constraints,
+ const build_target_config&,
+ const std::map<string, string>& class_inheritance_map,
+ string* reason = nullptr,
+ bool default_all_ucs = false);
+
+ template <typename K>
+ inline bool
+ exclude (const build_package_config_template<K>& pc,
+ const build_class_exprs& common_builds,
+ const build_constraints& common_constraints,
+ const build_target_config& tc,
+ const std::map<string, string>& class_inheritance_map,
+ string* reason = nullptr,
+ bool default_all_ucs = false)
+ {
+ return exclude (pc.effective_builds (common_builds),
+ pc.effective_constraints (common_constraints),
+ tc,
+ class_inheritance_map,
+ reason,
+ default_all_ucs);
+ }
+
+ // Convert dash-separated components (target, build target configuration
+ // name, machine name) or a pattern thereof into a path, replacing dashes
+ // with slashes (directory separators), `**` with `*/**/*`, and appending
+ // the trailing slash for a subsequent match using the path_match()
+ // functionality (the idea here is for `linux**` to match `linux-gcc` which
+ // is quite natural to expect). Throw invalid_path if the resulting path is
+ // invalid.
+ //
+ // Note that the match_absent path match flag must be used for the above
+ // `**` transformation to work.
+ //
+ path
+ dash_components_to_path (const string&);
+
+ // Build target/target configuration name combination that, in particular,
+ // identifies configurations in the buildtab and thus can be used as a
+ // set/map key.
+ //
+ // Note: contains shallow references to the target and configuration name.
+ //
+ struct build_target_config_id
+ {
+ reference_wrapper<const butl::target_triplet> target;
+ reference_wrapper<const string> config;
+
+ bool
+ operator< (const build_target_config_id& x) const
+ {
+ if (int r = target.get ().compare (x.target.get ()))
+ return r < 0;
+
+ return config.get ().compare (x.config.get ()) < 0;
+ }
+ };
+}
+
+#endif // MOD_BUILD_TARGET_CONFIG
diff --git a/mod/build.cxx b/mod/build.cxx
index 32f3691..5c37acb 100644
--- a/mod/build.cxx
+++ b/mod/build.cxx
@@ -1,15 +1,24 @@
// file : mod/build.cxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
#include <mod/build.hxx>
-#include <web/mime-url-encoding.hxx>
+#include <odb/database.hxx>
+#include <odb/connection.hxx>
+#include <odb/transaction.hxx>
+
+#include <libbutl/sendmail.hxx>
+#include <libbutl/process-io.hxx>
+
+#include <web/server/mime-url-encoding.hxx>
+
+#include <libbrep/build-package-odb.hxx>
#include <mod/utility.hxx>
namespace brep
{
+ using namespace std;
using namespace web;
string
@@ -21,12 +30,15 @@ namespace brep
// needs to be url-encoded, and only in the query part of the URL. We embed
// the package version into the URL path part and so don't encode it.
//
- string url (host + tenant_dir (root, b.tenant).representation () +
- mime_url_encode (b.package_name.string (), false) + '/' +
- b.package_version.string () + "/log/" +
- mime_url_encode (b.configuration, false /* query */) + '/' +
- mime_url_encode (b.toolchain_name, false /* query */) + '/' +
- b.toolchain_version.string ());
+ string url (
+ host + tenant_dir (root, b.tenant).representation () +
+ mime_url_encode (b.package_name.string (), false) + '/' +
+ b.package_version.string () + "/log/" +
+ mime_url_encode (b.target.string (), false /* query */) + '/' +
+ mime_url_encode (b.target_config_name, false /* query */) + '/' +
+ mime_url_encode (b.package_config_name, false /* query */) + '/' +
+ mime_url_encode (b.toolchain_name, false /* query */) + '/' +
+ b.toolchain_version.string ());
if (op != nullptr)
{
@@ -45,12 +57,154 @@ namespace brep
// we embed the package version into the URL query part, where it is not
// encoded by design.
//
- return host + tenant_dir (root, b.tenant).string () +
+ return host + tenant_dir (root, b.tenant).string () +
"?build-force&pn=" + mime_url_encode (b.package_name.string ()) +
- "&pv=" + b.package_version.string () +
- "&cf=" + mime_url_encode (b.configuration) +
- "&tn=" + mime_url_encode (b.toolchain_name) +
- "&tv=" + b.toolchain_version.string () +
+ "&pv=" + b.package_version.string () +
+ "&tg=" + mime_url_encode (b.target.string ()) +
+ "&tc=" + mime_url_encode (b.target_config_name) +
+ "&pc=" + mime_url_encode (b.package_config_name) +
+ "&tn=" + mime_url_encode (b.toolchain_name) +
+ "&tv=" + b.toolchain_version.string () +
"&reason=";
}
+
+ void
+ send_notification_email (const options::build_email_notification& o,
+ const odb::core::connection_ptr& conn,
+ const build& b,
+ const build_package& p,
+ const build_package_config& pc,
+ const string& what,
+ const basic_mark& error,
+ const basic_mark* trace)
+ {
+ using namespace odb::core;
+ using namespace butl;
+
+ assert (b.state == build_state::built && b.status);
+
+ // Bail out if sending build notification emails is disabled for this
+ // toolchain for this package.
+ //
+ {
+ const map<string, build_email>& tes (o.build_toolchain_email ());
+ auto i (tes.find (b.id.toolchain_name));
+ build_email mode (i != tes.end () ? i->second : build_email::latest);
+
+ if (mode == build_email::none)
+ {
+ return;
+ }
+ else if (mode == build_email::latest)
+ {
+ transaction t (conn->begin ());
+ database& db (t.database ());
+
+ const auto& id (query<buildable_package>::build_package::id);
+
+ buildable_package lp (
+ db.query_value<buildable_package> (
+ (id.tenant == b.tenant && id.name == b.package_name) +
+ order_by_version_desc (id.version) +
+ "LIMIT 1"));
+
+ t.commit ();
+
+ if (lp.package->version != p.version)
+ return;
+ }
+ }
+
+ string subj (what + ' ' +
+ to_string (*b.status) + ": " +
+ b.package_name.string () + '/' +
+ b.package_version.string () + ' ' +
+ b.target_config_name + '/' +
+ b.target.string () + ' ' +
+ b.package_config_name + ' ' +
+ b.toolchain_name + '-' + b.toolchain_version.string ());
+
+ // Send notification emails to the interested parties.
+ //
+ auto send_email = [&b, &subj, &o, &error, trace] (const string& to)
+ {
+ try
+ {
+ if (trace != nullptr)
+ *trace << "email '" << subj << "' to " << to;
+
+ // Redirect the diagnostics to webserver error log.
+ //
+ sendmail sm ([trace] (const char* args[], size_t n)
+ {
+ if (trace != nullptr)
+ *trace << process_args {args, n};
+ },
+ 2,
+ o.email (),
+ subj,
+ {to});
+
+ if (b.results.empty ())
+ {
+ sm.out << "No operation results available." << endl;
+ }
+ else
+ {
+ const string& host (o.host ());
+ const dir_path& root (o.root ());
+
+ ostream& os (sm.out);
+
+ os << "combined: " << *b.status << endl << endl
+ << " " << build_log_url (host, root, b) << endl << endl;
+
+ for (const auto& r: b.results)
+ os << r.operation << ": " << r.status << endl << endl
+ << " " << build_log_url (host, root, b, &r.operation)
+ << endl << endl;
+
+ os << "Force rebuild (enter the reason, use '+' instead of spaces):"
+ << endl << endl
+ << " " << build_force_url (host, root, b) << endl;
+ }
+
+ sm.out.close ();
+
+ if (!sm.wait ())
+ error << "sendmail " << *sm.exit;
+ }
+ // Handle process_error and io_error (both derive from system_error).
+ //
+ catch (const system_error& e)
+ {
+ error << "sendmail error: " << e;
+ }
+ };
+
+ // Send the build notification email if a non-empty package build email is
+ // specified.
+ //
+ if (const optional<email>& e = pc.effective_email (p.build_email))
+ {
+ if (!e->empty ())
+ send_email (*e);
+ }
+
+ // Send the build warning/error notification emails, if requested.
+ //
+ if (*b.status >= result_status::warning)
+ {
+ if (const optional<email>& e =
+ pc.effective_warning_email (p.build_warning_email))
+ send_email (*e);
+ }
+
+ if (*b.status >= result_status::error)
+ {
+ if (const optional<email>& e =
+ pc.effective_error_email (p.build_error_email))
+ send_email (*e);
+ }
+ }
}
diff --git a/mod/build.hxx b/mod/build.hxx
index 7ae7f95..07e4411 100644
--- a/mod/build.hxx
+++ b/mod/build.hxx
@@ -1,14 +1,19 @@
// file : mod/build.hxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
#ifndef MOD_BUILD_HXX
#define MOD_BUILD_HXX
+#include <odb/forward.hxx> // odb::core::connection_ptr
+
#include <libbrep/types.hxx>
#include <libbrep/utility.hxx>
#include <libbrep/build.hxx>
+#include <libbrep/build-package.hxx>
+
+#include <mod/diagnostics.hxx>
+#include <mod/module-options.hxx>
// Various package build-related utilities.
//
@@ -26,6 +31,19 @@ namespace brep
//
string
build_force_url (const string& host, const dir_path& root, const build&);
+
+ // Send the notification email for the specified package configuration
+ // build. The build is expected to be in the built state.
+ //
+ void
+ send_notification_email (const options::build_email_notification&,
+ const odb::core::connection_ptr&,
+ const build&,
+ const build_package&,
+ const build_package_config&,
+ const string& what, // build, rebuild, etc.
+ const basic_mark& error,
+ const basic_mark* trace);
}
#endif // MOD_BUILD_HXX
diff --git a/mod/buildfile b/mod/buildfile
index ffa9031..c3895dc 100644
--- a/mod/buildfile
+++ b/mod/buildfile
@@ -1,5 +1,4 @@
# file : mod/buildfile
-# copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
# license : MIT; see accompanying LICENSE file
define mod: libs
@@ -20,25 +19,43 @@ import libs += libbpkg%lib{bpkg}
import libs += libbbot%lib{bbot}
include ../libbrep/
-include ../web/
-mod{brep}: {hxx ixx txx cxx}{* -options} \
- {hxx ixx cxx}{ options} \
- ../libbrep/lib{brep} ../web/libus{web} $libs
+include ../web/xhtml/
+include ../web/server/
+
+./: mod{brep} {libue libus}{mod}
+
+libu_src = options-types types-parsers build-target-config
+
+mod{brep}: {hxx ixx txx cxx}{* -module-options -{$libu_src}} \
+ libus{mod} ../libbrep/lib{brep} ../web/server/libus{web-server} \
+ $libs
+
+{libue libus}{mod}: {hxx ixx cxx}{module-options} \
+ {hxx ixx txx cxx}{+{$libu_src} } \
+ $libs
+
+# Add support for tenant-associated service notifications to the CI module for
+# the debugging of the notifications machinery.
+#
+cxx.poptions += -DBREP_CI_TENANT_SERVICE
+
+libus{mod}: ../web/xhtml/libus{xhtml}
+libue{mod}: ../web/xhtml/libue{xhtml}
# Generated options parser.
#
if $cli.configured
{
- cli.cxx{options}: cli{options}
+ cli.cxx{module-options}: cli{module}
# Set option prefix to the empty value to handle all unknown request
# parameters uniformly with a single catch block.
#
- cli.options += --std c++11 -I $src_root --include-with-brackets \
---include-prefix mod --guard-prefix MOD --generate-specifier \
---cxx-prologue "#include <mod/types-parsers.hxx>" \
---cli-namespace brep::cli --generate-file-scanner --suppress-usage \
+ cli.options += --std c++11 -I $src_root --include-with-brackets \
+--include-prefix mod --guard-prefix MOD --generate-specifier \
+--cxx-prologue "#include <mod/types-parsers.hxx>" \
+--cli-namespace brep::cli --generate-file-scanner --option-length 46 \
--generate-modifier --generate-description --option-prefix ""
# Include the generated cli files into the distribution and don't remove
diff --git a/mod/ci-common.cxx b/mod/ci-common.cxx
new file mode 100644
index 0000000..cb61e66
--- /dev/null
+++ b/mod/ci-common.cxx
@@ -0,0 +1,494 @@
+// file : mod/ci-common.cxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#include <mod/ci-common.hxx>
+
+#include <libbutl/uuid.hxx>
+#include <libbutl/fdstream.hxx>
+#include <libbutl/sendmail.hxx>
+#include <libbutl/timestamp.hxx>
+#include <libbutl/filesystem.hxx>
+#include <libbutl/process-io.hxx> // operator<<(ostream, process_args)
+#include <libbutl/manifest-serializer.hxx>
+
+#include <mod/external-handler.hxx>
+
+namespace brep
+{
+ using namespace std;
+ using namespace butl;
+
+ void ci_start::
+ init (shared_ptr<options::ci_start> o)
+ {
+ // Verify the data directory satisfies the requirements.
+ //
+ const dir_path& d (o->ci_data ());
+
+ if (d.relative ())
+ throw runtime_error ("ci-data directory path must be absolute");
+
+ if (!dir_exists (d))
+ throw runtime_error ("ci-data directory '" + d.string () +
+ "' does not exist");
+
+ if (o->ci_handler_specified () && o->ci_handler ().relative ())
+ throw runtime_error ("ci-handler path must be absolute");
+
+ options_ = move (o);
+ }
+
+ optional<ci_start::start_result> ci_start::
+ start (const basic_mark& error,
+ const basic_mark& warn,
+ const basic_mark* trace,
+ optional<tenant_service>&& service,
+ const repository_location& repository,
+ const vector<package>& packages,
+ const optional<string>& client_ip,
+ const optional<string>& user_agent,
+ const optional<string>& interactive,
+ const optional<string>& simulate,
+ const vector<pair<string, string>>& custom_request,
+ const vector<pair<string, string>>& overrides)
+ {
+ using serializer = manifest_serializer;
+ using serialization = manifest_serialization;
+
+ assert (options_ != nullptr); // Shouldn't be called otherwise.
+
+ // If the tenant service is specified, then its type may not be empty.
+ //
+ assert (!service || !service->type.empty ());
+
+ // Generate the request id.
+ //
+ // Note that it will also be used as a CI result manifest reference,
+ // unless the latter is provided by the external handler.
+ //
+ string request_id;
+
+ try
+ {
+ request_id = uuid::generate ().string ();
+ }
+ catch (const system_error& e)
+ {
+ error << "unable to generate request id: " << e;
+ return nullopt;
+ }
+
+ // Create the submission data directory.
+ //
+ dir_path dd (options_->ci_data () / dir_path (request_id));
+
+ try
+ {
+ // It's highly unlikely but still possible that the directory already
+ // exists. This can only happen if the generated uuid is not unique.
+ //
+ if (try_mkdir (dd) == mkdir_status::already_exists)
+ throw_generic_error (EEXIST);
+ }
+ catch (const system_error& e)
+ {
+ error << "unable to create directory '" << dd << "': " << e;
+ return nullopt;
+ }
+
+ auto_rmdir ddr (dd);
+
+ // Return the start_result object for the client errors (normally the bad
+ // request status code (400) for the client data serialization errors).
+ //
+ auto client_error = [&request_id] (uint16_t status, string message)
+ {
+ return start_result {status,
+ move (message),
+ request_id,
+ vector<pair<string, string>> ()};
+ };
+
+ // Serialize the CI request manifest to a stream. On the serialization
+ // error return false together with the start_result object containing the
+ // bad request (400) code and the error message. On the stream error pass
+ // through the io_error exception. Otherwise return true.
+ //
+ timestamp ts (system_clock::now ());
+
+ auto rqm = [&request_id,
+ &ts,
+ &service,
+ &repository,
+ &packages,
+ &client_ip,
+ &user_agent,
+ &interactive,
+ &simulate,
+ &custom_request,
+ &client_error] (ostream& os, bool long_lines = false)
+ -> pair<bool, optional<start_result>>
+ {
+ try
+ {
+ serializer s (os, "request", long_lines);
+
+ // Serialize the submission manifest header.
+ //
+ s.next ("", "1"); // Start of manifest.
+ s.next ("id", request_id);
+ s.next ("repository", repository.string ());
+
+ for (const package& p: packages)
+ {
+ if (!p.version)
+ s.next ("package", p.name.string ());
+ else
+ s.next ("package",
+ p.name.string () + '/' + p.version->string ());
+ }
+
+ if (interactive)
+ s.next ("interactive", *interactive);
+
+ if (simulate)
+ s.next ("simulate", *simulate);
+
+ s.next ("timestamp",
+ butl::to_string (ts,
+ "%Y-%m-%dT%H:%M:%SZ",
+ false /* special */,
+ false /* local */));
+
+ if (client_ip)
+ s.next ("client-ip", *client_ip);
+
+ if (user_agent)
+ s.next ("user-agent", *user_agent);
+
+ if (service)
+ {
+ // Note that if the service id is not specified, then the handler
+ // will use the generated reference instead.
+ //
+ if (!service->id.empty ())
+ s.next ("service-id", service->id);
+
+ s.next ("service-type", service->type);
+
+ if (service->data)
+ s.next ("service-data", *service->data);
+ }
+
+ // Serialize the request custom parameters.
+ //
+ // Note that the serializer constraints the custom parameter names
+ // (can't start with '#', can't contain ':' and the whitespaces,
+ // etc).
+ //
+ for (const pair<string, string>& nv: custom_request)
+ s.next (nv.first, nv.second);
+
+ s.next ("", ""); // End of manifest.
+ return make_pair (true, optional<start_result> ());
+ }
+ catch (const serialization& e)
+ {
+ return make_pair (false,
+ optional<start_result> (
+ client_error (400,
+ string ("invalid parameter: ") +
+ e.what ())));
+ }
+ };
+
+ // Serialize the CI request manifest to the submission directory.
+ //
+ path rqf (dd / "request.manifest");
+
+ try
+ {
+ ofdstream os (rqf);
+ pair<bool, optional<start_result>> r (rqm (os));
+ os.close ();
+
+ if (!r.first)
+ return move (*r.second);
+ }
+ catch (const io_error& e)
+ {
+ error << "unable to write to '" << rqf << "': " << e;
+ return nullopt;
+ }
+
+ // Serialize the CI overrides manifest to a stream. On the serialization
+ // error return false together with the start_result object containing the
+ // bad request (400) code and the error message. On the stream error pass
+ // through the io_error exception. Otherwise return true.
+ //
+ auto ovm = [&overrides, &client_error] (ostream& os,
+ bool long_lines = false)
+ -> pair<bool, optional<start_result>>
+ {
+ try
+ {
+ serializer s (os, "overrides", long_lines);
+
+ s.next ("", "1"); // Start of manifest.
+
+ for (const pair<string, string>& nv: overrides)
+ s.next (nv.first, nv.second);
+
+ s.next ("", ""); // End of manifest.
+ return make_pair (true, optional<start_result> ());
+ }
+ catch (const serialization& e)
+ {
+ return make_pair (false,
+ optional<start_result> (
+ client_error (
+ 400,
+ string ("invalid manifest override: ") +
+ e.what ())));
+ }
+ };
+
+ // Serialize the CI overrides manifest to the submission directory.
+ //
+ path ovf (dd / "overrides.manifest");
+
+ if (!overrides.empty ())
+ try
+ {
+ ofdstream os (ovf);
+ pair<bool, optional<start_result>> r (ovm (os));
+ os.close ();
+
+ if (!r.first)
+ return move (*r.second);
+ }
+ catch (const io_error& e)
+ {
+ error << "unable to write to '" << ovf << "': " << e;
+ return nullopt;
+ }
+
+ // Given that the submission data is now successfully persisted we are no
+ // longer in charge of removing it, except for the cases when the
+ // submission handler terminates with an error (see below for details).
+ //
+ ddr.cancel ();
+
+ // If the handler terminates with non-zero exit status or specifies 5XX
+ // (HTTP server error) submission result manifest status value, then we
+ // stash the submission data directory for troubleshooting. Otherwise, if
+ // it's the 4XX (HTTP client error) status value, then we remove the
+ // directory.
+ //
+ auto stash_submit_dir = [&dd, error] ()
+ {
+ if (dir_exists (dd))
+ try
+ {
+ mvdir (dd, dir_path (dd + ".fail"));
+ }
+ catch (const system_error& e)
+ {
+ // Not much we can do here. Let's just log the issue and bail out
+ // leaving the directory in place.
+ //
+ error << "unable to rename directory '" << dd << "': " << e;
+ }
+ };
+
+ // Run the submission handler, if specified, reading the CI result
+ // manifest from its stdout and parse it into the resulting manifest
+ // object. Otherwise, create implied CI result manifest.
+ //
+ start_result sr;
+
+ if (options_->ci_handler_specified ())
+ {
+ using namespace external_handler;
+
+ optional<result_manifest> r (run (options_->ci_handler (),
+ options_->ci_handler_argument (),
+ dd,
+ options_->ci_handler_timeout (),
+ error,
+ warn,
+ trace));
+ if (!r)
+ {
+ stash_submit_dir ();
+ return nullopt; // The diagnostics is already issued.
+ }
+
+ sr.status = r->status;
+
+ for (manifest_name_value& nv: r->values)
+ {
+ string& n (nv.name);
+ string& v (nv.value);
+
+ if (n == "message")
+ sr.message = move (v);
+ else if (n == "reference")
+ sr.reference = move (v);
+ else if (n != "status")
+ sr.custom_result.emplace_back (move (n), move (v));
+ }
+
+ if (sr.reference.empty ())
+ sr.reference = move (request_id);
+ }
+ else // Create the implied CI result manifest.
+ {
+ sr.status = 200;
+ sr.message = "CI request is queued";
+ sr.reference = move (request_id);
+ }
+
+ // Serialize the CI result manifest manifest to a stream. On the
+ // serialization error log the error description and return false, on the
+ // stream error pass through the io_error exception, otherwise return
+ // true.
+ //
+ auto rsm = [&sr, &error] (ostream& os, bool long_lines = false) -> bool
+ {
+ try
+ {
+ serialize_manifest (sr, os, long_lines);
+ return true;
+ }
+ catch (const serialization& e)
+ {
+ error << "ref " << sr.reference << ": unable to serialize handler's "
+ << "output: " << e;
+ return false;
+ }
+ };
+
+ // If the submission data directory still exists then perform an
+ // appropriate action on it, depending on the submission result status.
+ // Note that the handler could move or remove the directory.
+ //
+ if (dir_exists (dd))
+ {
+ // Remove the directory if the client error is detected.
+ //
+ if (sr.status >= 400 && sr.status < 500)
+ {
+ rmdir_r (dd);
+ }
+ //
+ // Otherwise, save the result manifest, into the directory. Also stash
+ // the directory for troubleshooting in case of the server error.
+ //
+ else
+ {
+ path rsf (dd / "result.manifest");
+
+ try
+ {
+ ofdstream os (rsf);
+
+ // Not being able to stash the result manifest is not a reason to
+ // claim the submission failed. The error is logged nevertheless.
+ //
+ rsm (os);
+
+ os.close ();
+ }
+ catch (const io_error& e)
+ {
+ // Not fatal (see above).
+ //
+ error << "unable to write to '" << rsf << "': " << e;
+ }
+
+ if (sr.status >= 500 && sr.status < 600)
+ stash_submit_dir ();
+ }
+ }
+
+ // Send email, if configured, and the CI request submission is not
+ // simulated. Use the long lines manifest serialization mode for the
+ // convenience of copying/clicking URLs they contain.
+ //
+ // Note that we don't consider the email sending failure to be a
+ // submission failure as the submission data is successfully persisted and
+ // the handler is successfully executed, if configured. One can argue that
+ // email can be essential for the submission processing and missing it
+ // would result in the incomplete submission. In this case it's natural to
+ // assume that the web server error log is monitored and the email sending
+ // failure will be noticed.
+ //
+ if (options_->ci_email_specified () && !simulate)
+ try
+ {
+ // Redirect the diagnostics to the web server error log.
+ //
+ sendmail sm ([trace] (const char* args[], size_t n)
+ {
+ if (trace != nullptr)
+ *trace << process_args {args, n};
+ },
+ 2 /* stderr */,
+ options_->email (),
+ "CI request submission (" + sr.reference + ')',
+ {options_->ci_email ()});
+
+ // Write the CI request manifest.
+ //
+ pair<bool, optional<start_result>> r (
+ rqm (sm.out, true /* long_lines */));
+
+ assert (r.first); // The serialization succeeded once, so can't fail now.
+
+ // Write the CI overrides manifest.
+ //
+ sm.out << "\n\n";
+
+ r = ovm (sm.out, true /* long_lines */);
+ assert (r.first); // The serialization succeeded once, so can't fail now.
+
+ // Write the CI result manifest.
+ //
+ sm.out << "\n\n";
+
+ // We don't care about the result (see above).
+ //
+ rsm (sm.out, true /* long_lines */);
+
+ sm.out.close ();
+
+ if (!sm.wait ())
+ error << "sendmail " << *sm.exit;
+ }
+ // Handle process_error and io_error (both derive from system_error).
+ //
+ catch (const system_error& e)
+ {
+ error << "sendmail error: " << e;
+ }
+
+ return optional<start_result> (move (sr));
+ }
+
+ void ci_start::
+ serialize_manifest (const start_result& r, ostream& os, bool long_lines)
+ {
+ manifest_serializer s (os, "result", long_lines);
+
+ s.next ("", "1"); // Start of manifest.
+ s.next ("status", to_string (r.status));
+ s.next ("message", r.message);
+ s.next ("reference", r.reference);
+
+ for (const pair<string, string>& nv: r.custom_result)
+ s.next (nv.first, nv.second);
+
+ s.next ("", ""); // End of manifest.
+ }
+}
diff --git a/mod/ci-common.hxx b/mod/ci-common.hxx
new file mode 100644
index 0000000..6f62c4b
--- /dev/null
+++ b/mod/ci-common.hxx
@@ -0,0 +1,96 @@
+// file : mod/ci-common.hxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#ifndef MOD_CI_COMMON_HXX
+#define MOD_CI_COMMON_HXX
+
+#include <odb/forward.hxx> // database
+
+#include <libbrep/types.hxx>
+#include <libbrep/utility.hxx>
+
+#include <libbrep/common.hxx>
+
+#include <mod/diagnostics.hxx>
+#include <mod/module-options.hxx>
+
+namespace brep
+{
+ class ci_start
+ {
+ public:
+ void
+ init (shared_ptr<options::ci_start>);
+
+ // If the request handling has been performed normally, then return the
+ // information that corresponds to the CI result manifest (see CI Result
+ // Manifest in the manual). Otherwise (some internal has error occured),
+ // log the error and return nullopt.
+ //
+ // The arguments correspond to the CI request and overrides manifest
+ // values (see CI Request and Overrides Manifests in the manual). Note:
+ // request id and timestamp are generated by the implementation.
+ //
+ struct package
+ {
+ package_name name;
+ optional<brep::version> version;
+ };
+ // Note that the inability to generate the reference is an internal
+ // error. Thus, it is not optional.
+ //
+ struct start_result
+ {
+ uint16_t status;
+ string message;
+ string reference;
+ vector<pair<string, string>> custom_result;
+ };
+
+ // In the optional service information, if id is empty, then the generated
+ // reference is used instead.
+ //
+ optional<start_result>
+ start (const basic_mark& error,
+ const basic_mark& warn,
+ const basic_mark* trace,
+ optional<tenant_service>&&,
+ const repository_location& repository,
+ const vector<package>& packages,
+ const optional<string>& client_ip,
+ const optional<string>& user_agent,
+ const optional<string>& interactive = nullopt,
+ const optional<string>& simulate = nullopt,
+ const vector<pair<string, string>>& custom_request = {},
+ const vector<pair<string, string>>& overrides = {});
+
+ // Helpers.
+ //
+
+ // Serialize the start result as a CI result manifest.
+ //
+ static void
+ serialize_manifest (const start_result&, ostream&, bool long_lines = false);
+
+ private:
+ shared_ptr<options::ci_start> options_;
+ };
+
+ class ci_cancel
+ {
+ public:
+ void
+ init (shared_ptr<options::ci_cancel>, shared_ptr<odb::core::database>);
+
+ // @@ TODO Archive the tenant.
+ //
+ void
+ cancel (/*...*/);
+
+ private:
+ shared_ptr<options::ci_cancel> options_;
+ shared_ptr<odb::core::database> build_db_;
+ };
+}
+
+#endif // MOD_CI_COMMON_HXX
diff --git a/mod/database-module.cxx b/mod/database-module.cxx
index 5f20c01..07babc6 100644
--- a/mod/database-module.cxx
+++ b/mod/database-module.cxx
@@ -1,16 +1,22 @@
// file : mod/database-module.cxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
#include <mod/database-module.hxx>
+#include <odb/database.hxx>
#include <odb/exceptions.hxx>
+#include <odb/transaction.hxx>
+
+#include <libbrep/build-package.hxx>
+#include <libbrep/build-package-odb.hxx>
-#include <mod/options.hxx>
#include <mod/database.hxx>
+#include <mod/module-options.hxx>
namespace brep
{
+ using namespace odb::core;
+
// While currently the user-defined copy constructor is not required (we
// don't need to deep copy nullptr's), it is a good idea to keep the
// placeholder ready for less trivial cases.
@@ -69,4 +75,53 @@ namespace brep
throw;
}
+
+ void database_module::
+ update_tenant_service_state (
+ const connection_ptr& conn,
+ const string& tid,
+ const function<optional<string> (const tenant_service&)>& f)
+ {
+ assert (f != nullptr); // Shouldn't be called otherwise.
+
+ // Must be initialized via the init(options::build_db) function call.
+ //
+ assert (build_db_ != nullptr);
+
+ for (size_t retry (retry_);; )
+ {
+ try
+ {
+ transaction tr (conn->begin ());
+
+ shared_ptr<build_tenant> t (build_db_->find<build_tenant> (tid));
+
+ if (t != nullptr && t->service)
+ {
+ tenant_service& s (*t->service);
+
+ if (optional<string> data = f (s))
+ {
+ s.data = move (*data);
+ build_db_->update (t);
+ }
+ }
+
+ tr.commit ();
+
+ // Bail out if we have successfully updated the service state.
+ //
+ break;
+ }
+ catch (const odb::recoverable& e)
+ {
+ if (retry-- == 0)
+ throw;
+
+ HANDLER_DIAG;
+ l1 ([&]{trace << e << "; " << retry + 1 << " tenant service "
+ << "state update retries left";});
+ }
+ }
+ }
}
diff --git a/mod/database-module.hxx b/mod/database-module.hxx
index 06fc496..910cb35 100644
--- a/mod/database-module.hxx
+++ b/mod/database-module.hxx
@@ -1,20 +1,21 @@
// file : mod/database-module.hxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
#ifndef MOD_DATABASE_MODULE_HXX
#define MOD_DATABASE_MODULE_HXX
-#include <odb/forward.hxx> // database
+#include <odb/forward.hxx> // odb::core::database, odb::core::connection_ptr
#include <libbrep/types.hxx>
#include <libbrep/utility.hxx>
#include <mod/module.hxx>
-#include <mod/options.hxx>
+#include <mod/module-options.hxx>
namespace brep
{
+ struct tenant_service;
+
// A handler that utilises the database. Specifically, it will retry the
// request in the face of recoverable database failures (deadlock, loss of
// connection, etc) up to a certain number of times.
@@ -51,6 +52,25 @@ namespace brep
virtual bool
handle (request&, response&) = 0;
+ // Helpers.
+ //
+
+ // Update the tenant-associated service state if the specified
+ // notification callback-returned function (expected to be not NULL)
+ // returns the new state data.
+ //
+ // Specifically, start the database transaction, query the service state,
+ // and call the callback-returned function on this state. If this call
+ // returns the data string (rather than nullopt), then update the service
+ // state with this data and persist the change. Repeat all the above steps
+ // on the recoverable database failures (deadlocks, etc).
+ //
+ void
+ update_tenant_service_state (
+ const odb::core::connection_ptr&,
+ const string& tid,
+ const function<optional<string> (const tenant_service&)>&);
+
protected:
size_t retry_ = 0; // Max of all retries.
diff --git a/mod/database.cxx b/mod/database.cxx
index 3a3f793..02d521d 100644
--- a/mod/database.cxx
+++ b/mod/database.cxx
@@ -1,5 +1,4 @@
// file : mod/database.cxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
#include <mod/database.hxx>
@@ -25,10 +24,10 @@ namespace brep
operator< (const db_key& x, const db_key& y)
{
int r;
- if ((r = x.user.compare (y.user)) != 0 ||
- (r = x.role.compare (y.role)) != 0 ||
+ if ((r = x.user.compare (y.user)) != 0 ||
+ (r = x.role.compare (y.role)) != 0 ||
(r = x.password.compare (y.password)) != 0 ||
- (r = x.name.compare (y.name)) != 0 ||
+ (r = x.name.compare (y.name)) != 0 ||
(r = x.host.compare (y.host)))
return r < 0;
@@ -60,7 +59,7 @@ namespace brep
// Change the connection current user to the execution user name.
//
if (!role_.empty ())
- conn->execute ("SET ROLE '" + role_ + "'");
+ conn->execute ("SET ROLE '" + role_ + '\'');
return conn;
}
diff --git a/mod/database.hxx b/mod/database.hxx
index 2006e35..ff61433 100644
--- a/mod/database.hxx
+++ b/mod/database.hxx
@@ -1,5 +1,4 @@
// file : mod/database.hxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
#ifndef MOD_DATABASE_HXX
diff --git a/mod/diagnostics.cxx b/mod/diagnostics.cxx
index 0a2609f..fac251f 100644
--- a/mod/diagnostics.cxx
+++ b/mod/diagnostics.cxx
@@ -1,5 +1,4 @@
// file : mod/diagnostics.cxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
#include <mod/diagnostics.hxx>
diff --git a/mod/diagnostics.hxx b/mod/diagnostics.hxx
index 15fe7b2..f83e1de 100644
--- a/mod/diagnostics.hxx
+++ b/mod/diagnostics.hxx
@@ -1,5 +1,4 @@
// file : mod/diagnostics.hxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
#ifndef MOD_DIAGNOSTICS_HXX
@@ -110,7 +109,7 @@ namespace brep
uncaught_ (r.uncaught_),
#endif
data_ (move (r.data_)),
- os_ (move (r.os_)),
+ os_ (move (r.os_)), // Note: can throw.
epilogue_ (r.epilogue_)
{
r.data_.clear (); // Empty.
diff --git a/mod/external-handler.cxx b/mod/external-handler.cxx
index 4237439..3a85bd8 100644
--- a/mod/external-handler.cxx
+++ b/mod/external-handler.cxx
@@ -1,5 +1,4 @@
// file : mod/external-handler.cxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
#include <mod/external-handler.hxx>
@@ -14,9 +13,10 @@
#include <type_traits> // static_assert
#include <system_error> // error_code, generic_category()
-#include <libbutl/process.mxx>
-#include <libbutl/fdstream.mxx>
-#include <libbutl/process-io.mxx> // operator<<(ostream, process_args)
+#include <libbutl/process.hxx>
+#include <libbutl/fdstream.hxx>
+#include <libbutl/process-io.hxx> // operator<<(ostream, process_args)
+#include <libbutl/manifest-parser.hxx>
using namespace std;
using namespace butl;
@@ -96,15 +96,18 @@ namespace brep
data_dir));
pipe.out.close ();
+ // Kill the process and wait for its completion.
+ //
auto kill = [&pr, &warn, &handler, &ref] ()
- {
- // We may still end up well (see below), thus this is a warning.
- //
- warn << "ref " << ref << ": process " << handler
- << " execution timeout expired";
+ {
+ // We may still end up well (see below), thus this is a warning.
+ //
+ warn << "ref " << ref << ": process " << handler
+ << " execution timeout expired";
- pr.kill ();
- };
+ pr.kill ();
+ pr.wait ();
+ };
try
{
@@ -127,7 +130,7 @@ namespace brep
}
timeval tm {wd.count () / 1000 /* seconds */,
- wd.count () % 1000 * 1000 /* microseconds */};
+ wd.count () % 1000 * 1000 /* microseconds */};
fd_set rd;
FD_ZERO (&rd);
@@ -314,7 +317,7 @@ namespace brep
assert (e != nullptr);
if (!(*e == '\0' && c >= 100 && c < 600))
- bad_value ("invalid HTTP status '" + v + "'");
+ bad_value ("invalid HTTP status '" + v + '\'');
// Save the HTTP status.
//
diff --git a/mod/external-handler.hxx b/mod/external-handler.hxx
index 5a1d731..0276a25 100644
--- a/mod/external-handler.hxx
+++ b/mod/external-handler.hxx
@@ -1,11 +1,10 @@
// file : mod/external-handler.hxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
#ifndef MOD_EXTERNAL_HANDLER_HXX
#define MOD_EXTERNAL_HANDLER_HXX
-#include <libbutl/manifest-parser.mxx>
+#include <libbutl/manifest-types.hxx>
#include <libbrep/types.hxx>
#include <libbrep/utility.hxx>
diff --git a/mod/mod-build-configs.cxx b/mod/mod-build-configs.cxx
index 0218a5f..9282544 100644
--- a/mod/mod-build-configs.cxx
+++ b/mod/mod-build-configs.cxx
@@ -1,21 +1,18 @@
// file : mod/mod-build-configs.cxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
#include <mod/mod-build-configs.hxx>
-#include <algorithm> // replace()
-
#include <libstudxml/serializer.hxx>
-#include <web/xhtml.hxx>
-#include <web/module.hxx>
+#include <web/server/module.hxx>
+
+#include <web/xhtml/serialization.hxx>
#include <mod/page.hxx>
-#include <mod/options.hxx>
+#include <mod/module-options.hxx>
using namespace std;
-using namespace bbot;
using namespace brep::cli;
// While currently the user-defined copy constructor is not required (we don't
@@ -40,6 +37,9 @@ init (scanner& s)
if (options_->build_config_specified ())
build_config_module::init (*options_);
+
+ if (options_->root ().empty ())
+ options_->root (dir_path ("/"));
}
bool brep::build_configs::
@@ -49,7 +49,7 @@ handle (request& rq, response& rs)
HANDLER_DIAG;
- if (build_conf_ == nullptr)
+ if (target_conf_ == nullptr)
throw invalid_request (501, "not implemented");
const size_t page_configs (options_->build_config_page_entries ());
@@ -57,6 +57,8 @@ handle (request& rq, response& rs)
params::build_configs params;
+ string& selected_class (params.class_name ()); // Note: can be empty.
+
try
{
name_value_scanner s (rq.parameters (1024));
@@ -67,8 +69,7 @@ handle (request& rq, response& rs)
// character (that is otherwise forbidden in a class name) to the plus
// character.
//
- string& cn (params.class_name ());
- replace (cn.begin (), cn.end (), ' ', '+');
+ replace (selected_class.begin (), selected_class.end (), ' ', '+');
}
catch (const cli::exception& e)
{
@@ -89,11 +90,11 @@ handle (request& rq, response& rs)
<< DIV_HEADER (options_->logo (), options_->menu (), root, tenant)
<< DIV(ID="content");
- auto url = [&root] (const string& cls)
+ auto url = [&root, this] (const string& cls)
{
- string r (root.string () + "?build-configs");
+ string r (tenant_dir (root, tenant).string () + "?build-configs");
- if (cls != "all")
+ if (!cls.empty ())
{
r += '=';
@@ -120,34 +121,44 @@ handle (request& rq, response& rs)
//
if (params.page () == 0)
{
- const strings& cls (build_conf_->classes);
- const map<string, string>& im (build_conf_->class_inheritance_map);
+ const strings& cls (target_conf_->classes);
+ const map<string, string>& im (target_conf_->class_inheritance_map);
s << DIV(ID="filter-heading") << "Build Configuration Classes" << ~DIV
<< P(ID="filter");
for (auto b (cls.begin ()), i (b), e (cls.end ()); i != e; ++i)
{
- if (i != b)
- s << ' ';
-
+ // Skip the 'hidden' class.
+ //
const string& c (*i);
- print_class_name (c, c == params.class_name ());
- // Append the base class, if present.
- //
- auto j (im.find (c));
- if (j != im.end ())
+ if (c != "hidden")
{
- s << ':';
- print_class_name (j->second);
+ // Note that here we rely on the fact that the first class in the list
+ // can never be 'hidden' (is always 'all').
+ //
+ if (i != b)
+ s << ' ';
+
+ print_class_name (c, c == selected_class);
+
+ // Append the base class, if present.
+ //
+ auto j (im.find (c));
+ if (j != im.end ())
+ {
+ s << ':';
+ print_class_name (j->second);
+ }
}
}
s << ~P;
}
- // Print build configurations that belong to the selected class.
+ // Print build configurations that belong to the selected class (all
+ // configurations if no class is selected) and are not hidden.
//
// We will calculate the total configuration count and cache configurations
// for printing (skipping an appropriate number of them for page number
@@ -155,14 +166,15 @@ handle (request& rq, response& rs)
// before printing the configurations.
//
size_t count (0);
- vector<const build_config*> configs;
+ vector<const build_target_config*> configs;
configs.reserve (page_configs);
size_t skip (page * page_configs);
size_t print (page_configs);
- for (const build_config& c: *build_conf_)
+ for (const build_target_config& c: *target_conf_)
{
- if (belongs (c, params.class_name ()))
+ if ((selected_class.empty () || belongs (c, selected_class)) &&
+ !belongs (c, "hidden"))
{
if (skip != 0)
--skip;
@@ -185,7 +197,7 @@ handle (request& rq, response& rs)
// Enclose the subsequent tables to be able to use nth-child CSS selector.
//
s << DIV;
- for (const build_config* c: configs)
+ for (const build_target_config* c: configs)
{
s << TABLE(CLASS="proplist config")
<< TBODY
@@ -217,7 +229,7 @@ handle (request& rq, response& rs)
count,
page_configs,
options_->build_config_pages (),
- url (params.class_name ()))
+ url (selected_class))
<< ~DIV
<< ~BODY
<< ~HTML;
diff --git a/mod/mod-build-configs.hxx b/mod/mod-build-configs.hxx
index a8354e6..562ac6d 100644
--- a/mod/mod-build-configs.hxx
+++ b/mod/mod-build-configs.hxx
@@ -1,5 +1,4 @@
// file : mod/mod-build-configs.hxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
#ifndef MOD_MOD_BUILD_CONFIGS_HXX
@@ -9,7 +8,7 @@
#include <libbrep/utility.hxx>
#include <mod/module.hxx>
-#include <mod/options.hxx>
+#include <mod/module-options.hxx>
#include <mod/build-config-module.hxx>
namespace brep
diff --git a/mod/mod-build-force.cxx b/mod/mod-build-force.cxx
index 72c5fdf..bdae356 100644
--- a/mod/mod-build-force.cxx
+++ b/mod/mod-build-force.cxx
@@ -1,35 +1,41 @@
// file : mod/mod-build-force.cxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
#include <mod/mod-build-force.hxx>
-#include <algorithm> // replace()
-
#include <odb/database.hxx>
#include <odb/transaction.hxx>
-#include <web/module.hxx>
+#include <web/server/module.hxx>
#include <libbrep/build.hxx>
#include <libbrep/build-odb.hxx>
+#include <libbrep/build-package.hxx>
+#include <libbrep/build-package-odb.hxx>
-#include <mod/options.hxx>
+#include <mod/module-options.hxx>
+#include <mod/tenant-service.hxx>
using namespace std;
-using namespace bbot;
using namespace brep::cli;
using namespace odb::core;
+brep::build_force::
+build_force (const tenant_service_map& tsm)
+ : tenant_service_map_ (tsm)
+{
+}
+
// While currently the user-defined copy constructor is not required (we don't
// need to deep copy nullptr's), it is a good idea to keep the placeholder
// ready for less trivial cases.
//
brep::build_force::
-build_force (const build_force& r)
+build_force (const build_force& r, const tenant_service_map& tsm)
: database_module (r),
build_config_module (r),
- options_ (r.initialized_ ? r.options_ : nullptr)
+ options_ (r.initialized_ ? r.options_ : nullptr),
+ tenant_service_map_ (tsm)
{
}
@@ -116,10 +122,26 @@ handle (request& rq, response& rs)
version package_version (parse_version (params.version (),
"package version"));
- string& config (params.configuration ());
+ target_triplet target;
+
+ try
+ {
+ target = target_triplet (params.target ());
+ }
+ catch (const invalid_argument& e)
+ {
+ throw invalid_argument (string ("invalid target: ") + e.what ());
+ }
+
+ string& target_config (params.target_config ());
+
+ if (target_config.empty ())
+ throw invalid_argument ("no target configuration name");
- if (config.empty ())
- throw invalid_argument ("no configuration name");
+ string& package_config (params.package_config ());
+
+ if (package_config.empty ())
+ throw invalid_argument ("no package configuration name");
string& toolchain_name (params.toolchain_name ());
@@ -130,7 +152,9 @@ handle (request& rq, response& rs)
"toolchain version"));
id = build_id (package_id (move (tenant), move (p), package_version),
- move (config),
+ move (target),
+ move (target_config),
+ move (package_config),
move (toolchain_name),
toolchain_version);
}
@@ -150,42 +174,137 @@ handle (request& rq, response& rs)
// Make sure the build configuration still exists.
//
- if (build_conf_map_->find (id.configuration.c_str ()) ==
- build_conf_map_->end ())
- config_expired ("no configuration");
+ if (target_conf_map_->find (
+ build_target_config_id {id.target,
+ id.target_config_name}) ==
+ target_conf_map_->end ())
+ config_expired ("no target configuration");
// Load the package build configuration (if present), set the force flag and
// update the object's persistent state.
//
+ // If the incomplete package build is being forced to rebuild and the
+ // tenant_service_build_queued callback is associated with the package
+ // tenant, then stash the state, the build object, and the callback pointer
+ // and calculate the hints for the subsequent service `queued` notification.
+ //
+ const tenant_service_build_queued* tsq (nullptr);
+ optional<pair<tenant_service, shared_ptr<build>>> tss;
+ tenant_service_build_queued::build_queued_hints qhs;
+
+ connection_ptr conn (build_db_->connection ());
{
- transaction t (build_db_->begin ());
+ transaction t (conn->begin ());
package_build pb;
+ shared_ptr<build> b;
+
if (!build_db_->query_one<package_build> (
- query<package_build>::build::id == id, pb))
+ query<package_build>::build::id == id, pb) ||
+ (b = move (pb.build))->state == build_state::queued)
config_expired ("no package build");
- shared_ptr<build> b (pb.build);
force_state force (b->state == build_state::built
? force_state::forced
: force_state::forcing);
if (b->force != force)
{
+ // Log the force rebuild with the warning severity, truncating the
+ // reason if too long.
+ //
+ diag_record dr (warn);
+ dr << "force rebuild for ";
+
+ if (!b->tenant.empty ())
+ dr << b->tenant << ' ';
+
+ dr << b->package_name << '/' << b->package_version << ' '
+ << b->target_config_name << '/' << b->target << ' '
+ << b->package_config_name << ' '
+ << b->toolchain_name << '-' << b->toolchain_version
+ << " (state: " << to_string (b->state) << ' ' << to_string (b->force)
+ << "): ";
+
+ if (reason.size () < 50)
+ dr << reason;
+ else
+ dr << string (reason, 0, 50) << "...";
+
b->force = force;
build_db_->update (b);
- l1 ([&]{trace << "force rebuild for "
- << b->tenant << ' '
- << b->package_name << '/' << b->package_version << ' '
- << b->configuration << ' '
- << b->toolchain_name << '-' << b->toolchain_version
- << ": " << reason;});
+ if (force == force_state::forcing)
+ {
+ shared_ptr<build_tenant> t (build_db_->load<build_tenant> (b->tenant));
+
+ if (t->service)
+ {
+ auto i (tenant_service_map_.find (t->service->type));
+
+ if (i != tenant_service_map_.end ())
+ {
+ tsq = dynamic_cast<const tenant_service_build_queued*> (
+ i->second.get ());
+
+ // If we ought to call the
+ // tenant_service_build_queued::build_queued() callback, then also
+ // set the package tenant's queued timestamp to the current time
+ // to prevent the notifications race (see tenant::queued_timestamp
+ // for details).
+ //
+ if (tsq != nullptr)
+ {
+ // Calculate the tenant service hints.
+ //
+ buildable_package_count tpc (
+ build_db_->query_value<buildable_package_count> (
+ query<buildable_package_count>::build_tenant::id == t->id));
+
+ shared_ptr<build_package> p (
+ build_db_->load<build_package> (b->id.package));
+
+ qhs = tenant_service_build_queued::build_queued_hints {
+ tpc == 1, p->configs.size () == 1};
+
+ // Set the package tenant's queued timestamp.
+ //
+ t->queued_timestamp = system_clock::now ();
+ build_db_->update (t);
+
+ tss = make_pair (move (*t->service), move (b));
+ }
+ }
+ }
+ }
}
t.commit ();
}
+ // If the incomplete package build is being forced to rebuild and the
+ // tenant-associated third-party service needs to be notified about the
+ // queued builds, then call the tenant_service_build_queued::build_queued()
+ // callback function and update the service state, if requested.
+ //
+ if (tsq != nullptr)
+ {
+ assert (tss); // Wouldn't be here otherwise.
+
+ const tenant_service& ss (tss->first);
+ build& b (*tss->second);
+
+ vector<build> qbs;
+ qbs.push_back (move (b));
+
+ if (auto f = tsq->build_queued (ss,
+ qbs,
+ build_state::building,
+ qhs,
+ log_writer_))
+ update_tenant_service_state (conn, qbs.back ().tenant, f);
+ }
+
// We have all the data, so don't buffer the response content.
//
ostream& os (rs.content (200, "text/plain;charset=utf-8", false));
diff --git a/mod/mod-build-force.hxx b/mod/mod-build-force.hxx
index afae53b..ea9c141 100644
--- a/mod/mod-build-force.hxx
+++ b/mod/mod-build-force.hxx
@@ -1,5 +1,4 @@
// file : mod/mod-build-force.hxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
#ifndef MOD_MOD_BUILD_FORCE_HXX
@@ -8,7 +7,8 @@
#include <libbrep/types.hxx>
#include <libbrep/utility.hxx>
-#include <mod/options.hxx>
+#include <mod/module-options.hxx>
+#include <mod/tenant-service.hxx>
#include <mod/database-module.hxx>
#include <mod/build-config-module.hxx>
@@ -17,13 +17,13 @@ namespace brep
class build_force: public database_module, private build_config_module
{
public:
- build_force () = default;
+ explicit
+ build_force (const tenant_service_map&);
// Create a shallow copy (handling instance) if initialized and a deep
// copy (context exemplar) otherwise.
//
- explicit
- build_force (const build_force&);
+ build_force (const build_force&, const tenant_service_map&);
virtual bool
handle (request&, response&);
@@ -40,6 +40,7 @@ namespace brep
private:
shared_ptr<options::build_force> options_;
+ const tenant_service_map& tenant_service_map_;
};
}
diff --git a/mod/mod-build-log.cxx b/mod/mod-build-log.cxx
index a6e6730..c8e803b 100644
--- a/mod/mod-build-log.cxx
+++ b/mod/mod-build-log.cxx
@@ -1,25 +1,21 @@
// file : mod/mod-build-log.cxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
#include <mod/mod-build-log.hxx>
-#include <algorithm> // find_if()
-
#include <odb/database.hxx>
#include <odb/transaction.hxx>
-#include <libbutl/timestamp.mxx> // to_stream()
+#include <libbutl/timestamp.hxx> // to_stream()
-#include <web/module.hxx>
+#include <web/server/module.hxx>
#include <libbrep/build.hxx>
#include <libbrep/build-odb.hxx>
-#include <mod/options.hxx>
+#include <mod/module-options.hxx>
using namespace std;
-using namespace bbot;
using namespace brep::cli;
using namespace odb::core;
@@ -69,7 +65,7 @@ handle (request& rq, response& rs)
//
// Note that the URL path must be in the following form:
//
- // <pkg-name>/<pkg-version>/log/<cfg-name>/<toolchain-name>/<toolchain-version>[/<operation>]
+ // <pkg-name>/<pkg-version>/log/<cfg-name>/<target>/<toolchain-name>/<toolchain-version>[/<operation>]
//
// Also note that the presence of the first 3 components is guaranteed by
// the repository_root module.
@@ -125,12 +121,33 @@ handle (request& rq, response& rs)
assert (i != lpath.end () && *i == "log");
if (++i == lpath.end ())
- throw invalid_argument ("no configuration name");
+ throw invalid_argument ("no target");
+
+ target_triplet target;
+ try
+ {
+ target = target_triplet (*i++);
+ }
+ catch (const invalid_argument& e)
+ {
+ throw invalid_argument (string ("invalid target: ") + e.what ());
+ }
+
+ if (i == lpath.end ())
+ throw invalid_argument ("no target configuration name");
- string config (*i++);
+ string target_config (*i++);
- if (config.empty ())
- throw invalid_argument ("empty configuration name");
+ if (target_config.empty ())
+ throw invalid_argument ("empty target configuration name");
+
+ if (i == lpath.end ())
+ throw invalid_argument ("no package configuration name");
+
+ string package_config (*i++);
+
+ if (package_config.empty ())
+ throw invalid_argument ("empty package configuration name");
if (i == lpath.end ())
throw invalid_argument ("no toolchain name");
@@ -146,7 +163,9 @@ handle (request& rq, response& rs)
version toolchain_version (parse_version (*i++, "toolchain version"));
id = build_id (package_id (tenant, move (name), package_version),
- move (config),
+ move (target),
+ move (target_config),
+ move (package_config),
move (toolchain_name),
toolchain_version);
@@ -183,7 +202,7 @@ handle (request& rq, response& rs)
auto config_expired = [&trace, &lpath, this] (const string& d)
{
l2 ([&]{trace << "package build configuration for " << lpath
- << (!tenant.empty () ? "(" + tenant + ")" : "")
+ << (!tenant.empty () ? '(' + tenant + ')' : "")
<< " expired: " << d;});
throw invalid_request (404, "package build configuration expired: " + d);
@@ -191,9 +210,11 @@ handle (request& rq, response& rs)
// Make sure the build configuration still exists.
//
- if (build_conf_map_->find (id.configuration.c_str ()) ==
- build_conf_map_->end ())
- config_expired ("no configuration");
+ if (target_conf_map_->find (
+ build_target_config_id {id.target,
+ id.target_config_name}) ==
+ target_conf_map_->end ())
+ config_expired ("no target configuration");
// Load the package build configuration (if present).
//
@@ -206,11 +227,16 @@ handle (request& rq, response& rs)
query<package_build>::build::id == id, pb))
config_expired ("no package build");
- b = pb.build;
+ b = move (pb.build);
if (b->state != build_state::built)
+ {
config_expired ("state is " + to_string (b->state));
+ }
else
+ {
build_db_->load (*b, b->results_section);
+ build_db_->load (*b, b->auxiliary_machines_section);
+ }
t.commit ();
}
@@ -229,15 +255,20 @@ handle (request& rq, response& rs)
if (!b->tenant.empty ())
os << options_->tenant_name () << ": " << b->tenant << endl << endl;
- os << "package: " << b->package_name << endl
- << "version: " << b->package_version << endl
- << "toolchain: " << b->toolchain_name << '-' << b->toolchain_version
- << endl
- << "config: " << b->configuration << endl
- << "machine: " << b->machine << " (" << b->machine_summary << ")"
- << endl
- << "target: " << b->target.string () << endl
- << "timestamp: ";
+ os << "package: " << b->package_name << endl
+ << "version: " << b->package_version << endl
+ << "toolchain: " << b->toolchain_name << '-'
+ << b->toolchain_version << endl
+ << "target: " << b->target << endl
+ << "target config: " << b->target_config_name << endl
+ << "package config: " << b->package_config_name << endl
+ << "build machine: " << b->machine.name << " -- "
+ << b->machine.summary << endl;
+
+ for (const build_machine& m: b->auxiliary_machines)
+ os << "auxiliary machine: " << m.name << " -- " << m.summary << endl;
+
+ os << "timestamp: ";
butl::to_stream (os,
b->timestamp,
diff --git a/mod/mod-build-log.hxx b/mod/mod-build-log.hxx
index 2a5812c..a2f4e48 100644
--- a/mod/mod-build-log.hxx
+++ b/mod/mod-build-log.hxx
@@ -1,5 +1,4 @@
// file : mod/mod-build-log.hxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
#ifndef MOD_MOD_BUILD_LOG_HXX
@@ -8,7 +7,7 @@
#include <libbrep/types.hxx>
#include <libbrep/utility.hxx>
-#include <mod/options.hxx>
+#include <mod/module-options.hxx>
#include <mod/database-module.hxx>
#include <mod/build-config-module.hxx>
diff --git a/mod/mod-build-result.cxx b/mod/mod-build-result.cxx
index 860a964..ccce17f 100644
--- a/mod/mod-build-result.cxx
+++ b/mod/mod-build-result.cxx
@@ -1,5 +1,4 @@
// file : mod/mod-build-result.cxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
#include <mod/mod-build-result.hxx>
@@ -7,24 +6,21 @@
#include <odb/database.hxx>
#include <odb/transaction.hxx>
-#include <libbutl/openssl.mxx>
-#include <libbutl/sendmail.mxx>
-#include <libbutl/fdstream.mxx>
-#include <libbutl/process-io.mxx>
-#include <libbutl/manifest-parser.mxx>
-#include <libbutl/manifest-serializer.mxx>
+#include <libbutl/manifest-parser.hxx>
+#include <libbutl/manifest-serializer.hxx>
#include <libbbot/manifest.hxx>
-#include <web/module.hxx>
+#include <web/server/module.hxx>
#include <libbrep/build.hxx>
#include <libbrep/build-odb.hxx>
-#include <libbrep/package.hxx>
-#include <libbrep/package-odb.hxx>
+#include <libbrep/build-package.hxx>
+#include <libbrep/build-package-odb.hxx>
-#include <mod/build.hxx> // *_url()
-#include <mod/options.hxx>
+#include <mod/build.hxx> // send_notification_email()
+#include <mod/module-options.hxx>
+#include <mod/tenant-service.hxx>
using namespace std;
using namespace butl;
@@ -32,15 +28,21 @@ using namespace bbot;
using namespace brep::cli;
using namespace odb::core;
+brep::build_result::
+build_result (const tenant_service_map& tsm)
+ : tenant_service_map_ (tsm)
+{
+}
+
// While currently the user-defined copy constructor is not required (we don't
// need to deep copy nullptr's), it is a good idea to keep the placeholder
// ready for less trivial cases.
//
brep::build_result::
-build_result (const build_result& r)
- : database_module (r),
- build_config_module (r),
- options_ (r.initialized_ ? r.options_ : nullptr)
+build_result (const build_result& r, const tenant_service_map& tsm)
+ : build_result_module (r),
+ options_ (r.initialized_ ? r.options_ : nullptr),
+ tenant_service_map_ (tsm)
{
}
@@ -52,16 +54,8 @@ init (scanner& s)
options_ = make_shared<options::build_result> (
s, unknown_mode::fail, unknown_mode::fail);
- database_module::init (static_cast<const options::package_db&> (*options_),
- options_->package_db_retry ());
-
if (options_->build_config_specified ())
- {
- database_module::init (static_cast<const options::build_db&> (*options_),
- options_->build_db_retry ());
-
- build_config_module::init (*options_);
- }
+ build_result_module::init (*options_, *options_);
if (options_->root ().empty ())
options_->root (dir_path ("/"));
@@ -109,120 +103,23 @@ handle (request& rq, response&)
throw invalid_request (400, e.what ());
}
- // Parse the task response session to obtain the build id and the timestamp,
- // and to make sure the session matches tenant and the result manifest's
- // package name, and version.
+ // Parse the task response session and make sure the session matches tenant
+ // and the result manifest's package name, and version.
//
- build_id id;
- timestamp session_timestamp;
+ parse_session_result session;
+ const build_id& id (session.id);
try
{
- const string& s (rqm.session);
-
- size_t p (s.find ('/')); // End of tenant.
-
- if (p == string::npos)
- throw invalid_argument ("no package name");
-
- if (tenant.compare (0, tenant.size (), s, 0, p) != 0)
- throw invalid_argument ("tenant mismatch");
-
- size_t b (p + 1); // Start of package name.
- p = s.find ('/', b); // End of package name.
-
- if (p == b)
- throw invalid_argument ("empty package name");
-
- if (p == string::npos)
- throw invalid_argument ("no package version");
-
- package_name& name (rqm.result.name);
- {
- const string& n (name.string ());
- if (n.compare (0, n.size (), s, b, p - b) != 0)
- throw invalid_argument ("package name mismatch");
- }
-
- b = p + 1; // Start of version.
- p = s.find ('/', b); // End of version.
-
- if (p == string::npos)
- throw invalid_argument ("no configuration name");
-
- auto parse_version = [&s, &b, &p] (const char* what) -> version
- {
- // Intercept exception handling to add the parsing error attribution.
- //
- try
- {
- return brep::version (string (s, b, p - b));
- }
- catch (const invalid_argument& e)
- {
- throw invalid_argument (string ("invalid ") + what + ": " + e.what ());
- }
- };
+ // Note: also verifies that the tenant matches the session.
+ //
+ session = parse_session (rqm.session);
- version package_version (parse_version ("package version"));
+ if (rqm.result.name != id.package.name)
+ throw invalid_argument ("package name mismatch");
- if (package_version != rqm.result.version)
+ if (rqm.result.version != session.package_version)
throw invalid_argument ("package version mismatch");
-
- b = p + 1; // Start of configuration name.
- p = s.find ('/', b); // End of configuration name.
-
- if (p == string::npos)
- throw invalid_argument ("no toolchain name");
-
- string config (s, b, p - b);
-
- if (config.empty ())
- throw invalid_argument ("empty configuration name");
-
- b = p + 1; // Start of toolchain name.
- p = s.find ('/', b); // End of toolchain name.
-
- if (p == string::npos)
- throw invalid_argument ("no toolchain version");
-
- string toolchain_name (s, b, p - b);
-
- if (toolchain_name.empty ())
- throw invalid_argument ("empty toolchain name");
-
- b = p + 1; // Start of toolchain version.
- p = s.find ('/', b); // End of toolchain version.
-
- if (p == string::npos)
- throw invalid_argument ("no timestamp");
-
- version toolchain_version (parse_version ("toolchain version"));
-
- id = build_id (package_id (move (tenant), move (name), package_version),
- move (config),
- move (toolchain_name),
- toolchain_version);
-
- try
- {
- size_t tsn;
- string ts (s, p + 1);
-
- session_timestamp = timestamp (
- chrono::duration_cast<timestamp::duration> (
- chrono::nanoseconds (stoull (ts, &tsn))));
-
- if (tsn != ts.size ())
- throw invalid_argument ("trailing junk");
- }
- // Handle invalid_argument or out_of_range (both derive from logic_error),
- // that can be thrown by stoull().
- //
- catch (const logic_error& e)
- {
- throw invalid_argument (string ("invalid timestamp: ") + e.what ());
- }
}
catch (const invalid_argument& e)
{
@@ -234,52 +131,42 @@ handle (request& rq, response&)
// if the session is valid. The thinking is that this is a problem with the
// controller's setup (expires too fast), not with the agent's.
//
- auto warn_expired = [&rqm, &warn] (const string& d)
+ // Note, though, that there can be quite a common situation when a build
+ // machine is suspended by the bbot agent due to the build timeout. In this
+ // case the task result request may arrive anytime later (after the issue is
+ // investigated, etc) with the abort or abnormal status. By that arrival
+ // time a new build task may already be issued/completed for this package
+ // build configuration or this configuration may even be gone (brep has been
+ // reconfigured, package has gone, etc). We will log no warning in this
+ // case, assuming that such an expiration is not a problem with the
+ // controller's setup.
+ //
+ shared_ptr<build> b;
+ result_status rs (rqm.result.status);
+
+ auto warn_expired = [&rqm, &warn, &b, &session, rs] (const string& d)
{
- warn << "session '" << rqm.session << "' expired: " << d;
+ if (!((b == nullptr || b->timestamp > session.timestamp) &&
+ (rs == result_status::abort || rs == result_status::abnormal)))
+ warn << "session '" << rqm.session << "' expired: " << d;
};
// Make sure the build configuration still exists.
//
- const bbot::build_config* cfg;
+ const build_target_config* tc;
{
- auto i (build_conf_map_->find (id.configuration.c_str ()));
+ auto i (target_conf_map_->find (
+ build_target_config_id {id.target, id.target_config_name}));
- if (i == build_conf_map_->end ())
+ if (i == target_conf_map_->end ())
{
warn_expired ("no build configuration");
return true;
}
- cfg = i->second;
- }
-
- // Load the built package (if present).
- //
- // The only way not to deal with 2 databases simultaneously is to pull
- // another bunch of the package fields into the build_package foreign
- // object, which is a pain (see build_package.hxx for details). Doesn't seem
- // worth it here: email members are really secondary and we don't need to
- // switch transactions back and forth.
- //
- shared_ptr<package> pkg;
- {
- transaction t (package_db_->begin ());
- pkg = package_db_->find<package> (id.package);
- t.commit ();
- }
-
- if (pkg == nullptr)
- {
- warn_expired ("no package");
- return true;
+ tc = i->second;
}
- auto print_args = [&trace, this] (const char* args[], size_t n)
- {
- l2 ([&]{trace << process_args {args, n};});
- };
-
// Load and update the package build configuration (if present).
//
// NULL if the package build doesn't exist or is not updated for any reason
@@ -288,240 +175,389 @@ handle (request& rq, response&)
//
shared_ptr<build> bld;
- optional<result_status> prev_status;
+ // The built package configuration.
+ //
+ // Not NULL if bld is not NULL.
+ //
+ shared_ptr<build_package> pkg;
+ build_package_config* cfg (nullptr);
+
+ // Don't send email to the build-email address for the success-to-success
+ // status change, unless the build was forced.
+ //
bool build_notify (false);
bool unforced (true);
+ // If the package is built (result status differs from interrupt, etc) and
+ // the package tenant has a third-party service state associated with it,
+ // then check if the tenant_service_build_built callback is registered for
+ // the type of the associated service. If it is, then stash the state, the
+ // build object, and the callback pointer for the subsequent service `built`
+ // notification. Note that we send this notification for the skip result as
+ // well, since it is semantically equivalent to the previous build result
+ // with the actual build process being optimized out.
+ //
+ // If the package build is interrupted and the tenant_service_build_queued
+ // callback is associated with the package tenant, then stash the state, the
+ // build object, and the callback pointer and calculate the hints for the
+ // subsequent service `queued` notification.
+ //
+ const tenant_service_build_built* tsb (nullptr);
+ const tenant_service_build_queued* tsq (nullptr);
+ optional<pair<tenant_service, shared_ptr<build>>> tss;
+ tenant_service_build_queued::build_queued_hints qhs;
+
+ // Note that if the session authentication fails (probably due to the
+ // authentication settings change), then we log this case with the warning
+ // severity and respond with the 200 HTTP code as if the challenge is
+ // valid. The thinking is that we shouldn't alarm a law-abaiding agent and
+ // shouldn't provide any information to a malicious one.
+ //
+ connection_ptr conn (build_db_->connection ());
{
- transaction t (build_db_->begin ());
+ transaction t (conn->begin ());
package_build pb;
- shared_ptr<build> b;
+
+ auto build_timestamp = [&b] ()
+ {
+ return to_string (
+ chrono::duration_cast<std::chrono::nanoseconds> (
+ b->timestamp.time_since_epoch ()).count ());
+ };
+
if (!build_db_->query_one<package_build> (
query<package_build>::build::id == id, pb))
+ {
warn_expired ("no package build");
+ }
else if ((b = move (pb.build))->state != build_state::building)
- warn_expired ("package configuration state is " + to_string (b->state));
- else if (b->timestamp != session_timestamp)
- warn_expired ("non-matching timestamp");
- else
{
- // Check the challenge.
- //
- // If the challenge doesn't match expectations (probably due to the
- // authentication settings change), then we log this case with the
- // warning severity and respond with the 200 HTTP code as if the
- // challenge is valid. The thinking is that we shouldn't alarm a
- // law-abaiding agent and shouldn't provide any information to a
- // malicious one.
- //
- auto warn_auth = [&rqm, &warn] (const string& d)
+ warn_expired ("package configuration state is " + to_string (b->state) +
+ ", force state " + to_string (b->force) +
+ ", timestamp " + build_timestamp ());
+ }
+ else if (b->timestamp != session.timestamp)
+ {
+ warn_expired ("non-matching timestamp " + build_timestamp ());
+ }
+ else if (authenticate_session (*options_, rqm.challenge, *b, rqm.session))
+ {
+ const tenant_service_base* ts (nullptr);
+
+ shared_ptr<build_tenant> t (build_db_->load<build_tenant> (b->tenant));
+
+ if (t->service)
{
- warn << "session '" << rqm.session << "' authentication failed: " << d;
- };
+ auto i (tenant_service_map_.find (t->service->type));
- bool auth (false);
+ if (i != tenant_service_map_.end ())
+ ts = i->second.get ();
+ }
- // Must both be present or absent.
+ // If the build is interrupted, then revert it to the original built
+ // state if this is a rebuild. Otherwise (initial build), turn the build
+ // into the queued state if the tenant_service_build_queued callback is
+ // registered for the package tenant and delete it from the database
+ // otherwise.
+ //
+ // Note that if the tenant_service_build_queued callback is registered,
+ // we always send the `queued` notification for the interrupted build,
+ // even when we reverse it to the original built state. We could also
+ // turn the build into the queued state in this case, but it feels that
+ // there is no harm in keeping the previous build information available
+ // for the user.
//
- if (!b->agent_challenge != !rqm.challenge)
- warn_auth (rqm.challenge
- ? "unexpected challenge"
- : "challenge is expected");
- else if (bot_agent_key_map_ == nullptr) // Authentication is disabled.
- auth = true;
- else if (!b->agent_challenge) // Authentication is recently enabled.
- warn_auth ("challenge is required now");
- else
+ if (rs == result_status::interrupt)
{
- assert (b->agent_fingerprint && rqm.challenge);
- auto i (bot_agent_key_map_->find (*b->agent_fingerprint));
-
- // The agent's key is recently replaced.
+ // Schedule the `queued` notification, if the
+ // tenant_service_build_queued callback is registered for the tenant.
//
- if (i == bot_agent_key_map_->end ())
- warn_auth ("agent's public key not found");
- else
+ tsq = dynamic_cast<const tenant_service_build_queued*> (ts);
+
+ if (b->status) // Is this a rebuild?
{
- try
- {
- openssl os (print_args,
- path ("-"), fdstream_mode::text, 2,
- process_env (options_->openssl (),
- options_->openssl_envvar ()),
- "rsautl",
- options_->openssl_option (),
- "-verify", "-pubin", "-inkey", i->second);
-
- for (const auto& c: *rqm.challenge)
- os.out.put (c); // Sets badbit on failure.
-
- os.out.close ();
-
- string s;
- getline (os.in, s);
-
- bool v (os.in.eof ());
- os.in.close ();
-
- if (os.wait () && v)
- {
- auth = s == *b->agent_challenge;
-
- if (!auth)
- warn_auth ("challenge mismatched");
- }
- else // The signature is presumably meaningless.
- warn_auth ("unable to verify challenge");
- }
- catch (const system_error& e)
+ b->state = build_state::built;
+
+ // Keep the force rebuild indication. Note that the forcing state is
+ // only valid for the building state.
+ //
+ if (b->force == force_state::forcing)
+ b->force = force_state::forced;
+
+ // Cleanup the interactive build login information.
+ //
+ b->interactive = nullopt;
+
+ // Cleanup the authentication data.
+ //
+ b->agent_fingerprint = nullopt;
+ b->agent_challenge = nullopt;
+
+ // Note that we are unable to restore the pre-rebuild timestamp
+ // since it has been overwritten when the build task was issued.
+ // That, however, feels ok and we just keep it unchanged.
+ //
+ // Moreover, we actually use the fact that the build's timestamp is
+ // greater then its soft_timestamp as an indication that the build
+ // object represents the interrupted rebuild (see the build_task
+ // handler for details).
+ //
+ // @@ Actually, we also unable to restore the pre-rebuild machine
+ // and auxiliary machines, which are also displayed in the build
+ // log and may potentially be confusing. Should we drop them from
+ // the log in this case or replace with the "machine: unknown"
+ // record?
+
+ build_db_->update (b);
+ }
+ else // Initial build.
+ {
+ if (tsq != nullptr)
{
- fail << "unable to verify challenge: " << e;
+ // Since this is not a rebuild, there are no operation results and
+ // thus we don't need to load the results section to erase results
+ // from the database.
+ //
+ assert (b->results.empty ());
+
+ *b = build (move (b->tenant),
+ move (b->package_name),
+ move (b->package_version),
+ move (b->target),
+ move (b->target_config_name),
+ move (b->package_config_name),
+ move (b->toolchain_name),
+ move (b->toolchain_version));
+
+ build_db_->update (b);
}
+ else
+ build_db_->erase (b);
}
- }
- if (auth)
+ // If we ought to call the tenant_service_build_queued::build_queued()
+ // callback, then also set the package tenant's queued timestamp to
+ // the current time to prevent the notifications race (see
+ // tenant::queued_timestamp for details).
+ //
+ if (tsq != nullptr)
+ {
+ // Calculate the tenant service hints.
+ //
+ buildable_package_count tpc (
+ build_db_->query_value<buildable_package_count> (
+ query<buildable_package_count>::build_tenant::id == t->id));
+
+ shared_ptr<build_package> p (
+ build_db_->load<build_package> (b->id.package));
+
+ qhs = tenant_service_build_queued::build_queued_hints {
+ tpc == 1, p->configs.size () == 1};
+
+ // Set the package tenant's queued timestamp.
+ //
+ t->queued_timestamp = system_clock::now ();
+ build_db_->update (t);
+ }
+ }
+ else // Regular or skip build result.
{
- unforced = b->force == force_state::unforced;
+ // Schedule the `built` notification, if the
+ // tenant_service_build_built callback is registered for the tenant.
+ //
+ tsb = dynamic_cast<const tenant_service_build_built*> (ts);
- // Don't send email to the build-email address for the
- // success-to-success status change, unless the build was forced.
+ // Verify the result status/checksums.
//
- build_notify = !(rqm.result.status == result_status::success &&
- b->status &&
- *b->status == rqm.result.status &&
- unforced);
+ // Specifically, if the result status is skip, then it can only be in
+ // response to the soft rebuild task (all checksums are present in the
+ // build object) and the result checksums must match the build object
+ // checksums. On verification failure respond with the bad request
+ // HTTP code (400).
+ //
+ if (rs == result_status::skip)
+ {
+ if (!b->agent_checksum ||
+ !b->worker_checksum ||
+ !b->dependency_checksum)
+ throw invalid_request (400, "unexpected skip result status");
+
+ // Can only be absent for initial build, in which case the
+ // checksums are also absent and we would end up with the above
+ // 400 response.
+ //
+ assert (b->status);
+
+ // Verify that the result checksum matches the build checksum and
+ // throw invalid_request(400) if that's not the case.
+ //
+ auto verify = [] (const string& build_checksum,
+ const optional<string>& result_checksum,
+ const char* what)
+ {
+ if (!result_checksum)
+ throw invalid_request (
+ 400,
+ string (what) + " checksum is expected for skip result status");
+
+ if (*result_checksum != build_checksum)
+ throw invalid_request (
+ 400,
+ string (what) + " checksum '" + build_checksum +
+ "' is expected instead of '" + *result_checksum +
+ "' for skip result status");
+ };
+
+ verify (*b->agent_checksum, rqm.agent_checksum, "agent");
+
+ verify (*b->worker_checksum,
+ rqm.result.worker_checksum,
+ "worker");
+
+ verify (*b->dependency_checksum,
+ rqm.result.dependency_checksum,
+ "dependency");
+ }
- prev_status = move (b->status);
+ unforced = (b->force == force_state::unforced);
+
+ build_notify = !(rs == result_status::success &&
+ b->status &&
+ *b->status == rs &&
+ unforced);
b->state = build_state::built;
- b->status = rqm.result.status;
b->force = force_state::unforced;
+ // Cleanup the interactive build login information.
+ //
+ b->interactive = nullopt;
+
// Cleanup the authentication data.
//
b->agent_fingerprint = nullopt;
b->agent_challenge = nullopt;
- // Mark the section as loaded, so results are updated.
- //
- b->results_section.load ();
- b->results = move (rqm.result.results);
-
b->timestamp = system_clock::now ();
+ b->soft_timestamp = b->timestamp;
+
+ // If the result status is other than skip, then save the status,
+ // results, and checksums and update the hard timestamp. Also stash
+ // the service notification information, if present.
+ //
+ if (rs != result_status::skip)
+ {
+ b->status = rs;
+ b->hard_timestamp = b->soft_timestamp;
+
+ // Mark the section as loaded, so results are updated.
+ //
+ b->results_section.load ();
+ b->results = move (rqm.result.results);
+
+ // Save the checksums.
+ //
+ b->agent_checksum = move (rqm.agent_checksum);
+ b->worker_checksum = move (rqm.result.worker_checksum);
+ b->dependency_checksum = move (rqm.result.dependency_checksum);
+ }
build_db_->update (b);
- shared_ptr<build_package> p (
- build_db_->load<build_package> (b->id.package));
+ pkg = build_db_->load<build_package> (b->id.package);
+ cfg = find (b->package_config_name, pkg->configs);
- if (belongs (*cfg, "all") &&
- !exclude (p->builds, p->constraints, *cfg))
- bld = move (b);
+ // The package configuration should be present (see mod-builds.cxx for
+ // details) but if it is not, let's log the warning.
+ //
+ if (cfg != nullptr)
+ {
+ // Don't send the build notification email if the task result is
+ // `skip`, the configuration is hidden, or is now excluded by the
+ // package.
+ //
+ if (rs != result_status::skip && !belongs (*tc, "hidden"))
+ {
+ build_db_->load (*pkg, pkg->constraints_section);
+
+ if (!exclude (*cfg, pkg->builds, pkg->constraints, *tc))
+ bld = b;
+ }
+ }
+ else
+ warn << "cannot find configuration '" << b->package_config_name
+ << "' for package " << pkg->id.name << '/' << pkg->version;
}
+
+ // If required, stash the service notification information.
+ //
+ if (tsb != nullptr || tsq != nullptr)
+ tss = make_pair (move (*t->service), move (b));
}
t.commit ();
}
- if (bld == nullptr)
- return true;
-
- string subj ((unforced ? "build " : "rebuild ") +
- to_string (*bld->status) + ": " +
- bld->package_name.string () + '/' +
- bld->package_version.string () + '/' +
- bld->configuration + '/' +
- bld->toolchain_name + '-' + bld->toolchain_version.string ());
+ // We either notify about the queued build or notify about the built package
+ // or don't notify at all.
+ //
+ assert (tsb == nullptr || tsq == nullptr);
- // Send notification emails to the interested parties.
+ // If the package build is interrupted and the tenant-associated third-party
+ // service needs to be notified about the queued builds, then call the
+ // tenant_service_build_queued::build_queued() callback function and update
+ // the service state, if requested.
//
- auto send_email = [&bld, &subj, &error, &trace, &print_args, this]
- (const string& to)
+ if (tsq != nullptr)
{
- try
- {
- l2 ([&]{trace << "email '" << subj << "' to " << to;});
-
- // Redirect the diagnostics to webserver error log.
- //
- // Note: if using this somewhere else, then need to factor out all this
- // exit status handling code.
- //
- sendmail sm (print_args,
- 2,
- options_->email (),
- subj,
- {to});
-
- if (bld->results.empty ())
- sm.out << "No operation results available." << endl;
- else
- {
- const string& host (options_->host ());
- const dir_path& root (options_->root ());
-
- ostream& os (sm.out);
-
- assert (bld->status);
- os << "combined: " << *bld->status << endl << endl
- << " " << build_log_url (host, root, *bld) << endl << endl;
+ assert (tss); // Wouldn't be here otherwise.
- for (const auto& r: bld->results)
- os << r.operation << ": " << r.status << endl << endl
- << " " << build_log_url (host, root, *bld, &r.operation)
- << endl << endl;
-
- os << "Force rebuild (enter the reason, use '+' instead of spaces):"
- << endl << endl
- << " " << build_force_url (host, root, *bld) << endl;
- }
+ const tenant_service& ss (tss->first);
- sm.out.close ();
+ vector<build> qbs;
+ qbs.push_back (move (*tss->second));
- if (!sm.wait ())
- error << "sendmail " << *sm.exit;
- }
- // Handle process_error and io_error (both derive from system_error).
- //
- catch (const system_error& e)
- {
- error << "sendmail error: " << e;
- }
- };
+ if (auto f = tsq->build_queued (ss,
+ qbs,
+ build_state::building,
+ qhs,
+ log_writer_))
+ update_tenant_service_state (conn, qbs.back ().tenant, f);
+ }
- // Don't send the build notification email if the empty package build email
- // is specified.
+ // If a third-party service needs to be notified about the built package,
+ // then call the tenant_service_build_built::build_built() callback function
+ // and update the service state, if requested.
//
- optional<email>& build_email (pkg->build_email);
- if (build_notify && (!build_email || !build_email->empty ()))
+ if (tsb != nullptr)
{
- // If none of the package build-* addresses is specified, then the build
- // email address is assumed to be the same as the package email address,
- // if specified, otherwise as the project email address, if specified,
- // otherwise the notification email is not sent.
- //
- optional<email> to;
+ assert (tss); // Wouldn't be here otherwise.
- if (build_email)
- to = move (build_email);
- else if (!pkg->build_warning_email && !pkg->build_error_email)
- to = move (pkg->package_email ? pkg->package_email : pkg->email);
+ const tenant_service& ss (tss->first);
+ const build& b (*tss->second);
- if (to)
- send_email (*to);
+ if (auto f = tsb->build_built (ss, b, log_writer_))
+ update_tenant_service_state (conn, b.tenant, f);
}
- assert (bld->status);
-
- // Send the build warning/error notification emails, if requested.
- //
- if (pkg->build_warning_email && *bld->status >= result_status::warning)
- send_email (*pkg->build_warning_email);
-
- if (pkg->build_error_email && *bld->status >= result_status::error)
- send_email (*pkg->build_error_email);
+ if (bld != nullptr)
+ {
+ // Don't sent the notification email for success-to-success status change,
+ // etc.
+ //
+ if (!build_notify)
+ (cfg->email ? cfg->email : pkg->build_email) = email ();
+
+ send_notification_email (*options_,
+ conn,
+ *bld,
+ *pkg,
+ *cfg,
+ unforced ? "build" : "rebuild",
+ error,
+ verb_ >= 2 ? &trace : nullptr);
+ }
return true;
}
diff --git a/mod/mod-build-result.hxx b/mod/mod-build-result.hxx
index f65dc08..96449d5 100644
--- a/mod/mod-build-result.hxx
+++ b/mod/mod-build-result.hxx
@@ -1,5 +1,4 @@
// file : mod/mod-build-result.hxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
#ifndef MOD_MOD_BUILD_RESULT_HXX
@@ -8,22 +7,22 @@
#include <libbrep/types.hxx>
#include <libbrep/utility.hxx>
-#include <mod/options.hxx>
-#include <mod/database-module.hxx>
-#include <mod/build-config-module.hxx>
+#include <mod/module-options.hxx>
+#include <mod/tenant-service.hxx>
+#include <mod/build-result-module.hxx>
namespace brep
{
- class build_result: public database_module, private build_config_module
+ class build_result: public build_result_module
{
public:
- build_result () = default;
+ explicit
+ build_result (const tenant_service_map&);
// Create a shallow copy (handling instance) if initialized and a deep
// copy (context exemplar) otherwise.
//
- explicit
- build_result (const build_result&);
+ build_result (const build_result&, const tenant_service_map&);
virtual bool
handle (request&, response&);
@@ -37,6 +36,7 @@ namespace brep
private:
shared_ptr<options::build_result> options_;
+ const tenant_service_map& tenant_service_map_;
};
}
diff --git a/mod/mod-build-task.cxx b/mod/mod-build-task.cxx
index 9f97098..e0aad4b 100644
--- a/mod/mod-build-task.cxx
+++ b/mod/mod-build-task.cxx
@@ -1,36 +1,40 @@
// file : mod/mod-build-task.cxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
#include <mod/mod-build-task.hxx>
#include <map>
+#include <regex>
#include <chrono>
+#include <random>
#include <odb/database.hxx>
#include <odb/transaction.hxx>
#include <odb/schema-catalog.hxx>
-#include <libbutl/sha256.mxx>
-#include <libbutl/utility.mxx> // compare_c_string
-#include <libbutl/openssl.mxx>
-#include <libbutl/fdstream.mxx> // nullfd
-#include <libbutl/process-io.mxx>
-#include <libbutl/path-pattern.mxx>
-#include <libbutl/manifest-parser.mxx>
-#include <libbutl/manifest-serializer.mxx>
+#include <libbutl/ft/lang.hxx> // thread_local
+
+#include <libbutl/regex.hxx>
+#include <libbutl/sha256.hxx>
+#include <libbutl/openssl.hxx>
+#include <libbutl/fdstream.hxx> // nullfd
+#include <libbutl/process-io.hxx>
+#include <libbutl/path-pattern.hxx>
+#include <libbutl/manifest-parser.hxx>
+#include <libbutl/manifest-serializer.hxx>
#include <libbbot/manifest.hxx>
-#include <libbbot/build-config.hxx>
-#include <web/module.hxx>
+#include <web/server/module.hxx>
#include <libbrep/build.hxx>
#include <libbrep/build-odb.hxx>
#include <libbrep/build-package.hxx>
#include <libbrep/build-package-odb.hxx>
-#include <mod/options.hxx>
+#include <mod/build.hxx> // send_notification_email()
+#include <mod/module-options.hxx>
+#include <mod/build-target-config.hxx>
using namespace std;
using namespace butl;
@@ -38,15 +42,40 @@ using namespace bbot;
using namespace brep::cli;
using namespace odb::core;
+static thread_local mt19937 rand_gen (random_device {} ());
+
+// Generate a random number in the specified range (max value is included).
+//
+static inline size_t
+rand (size_t min_val, size_t max_val)
+{
+ // Note that size_t is not whitelisted as a type the
+ // uniform_int_distribution class template can be instantiated with.
+ //
+ return min_val == max_val
+ ? min_val
+ : static_cast<size_t> (
+ uniform_int_distribution<unsigned long long> (
+ static_cast<unsigned long long> (min_val),
+ static_cast<unsigned long long> (max_val)) (rand_gen));
+}
+
+brep::build_task::
+build_task (const tenant_service_map& tsm)
+ : tenant_service_map_ (tsm)
+{
+}
+
// While currently the user-defined copy constructor is not required (we don't
// need to deep copy nullptr's), it is a good idea to keep the placeholder
// ready for less trivial cases.
//
brep::build_task::
-build_task (const build_task& r)
+build_task (const build_task& r, const tenant_service_map& tsm)
: database_module (r),
build_config_module (r),
- options_ (r.initialized_ ? r.options_ : nullptr)
+ options_ (r.initialized_ ? r.options_ : nullptr),
+ tenant_service_map_ (tsm)
{
}
@@ -60,6 +89,24 @@ init (scanner& s)
if (options_->build_config_specified ())
{
+ // Verify that build-alt-*-rebuild-{start,stop} are both either specified
+ // or not.
+ //
+ auto bad_alt = [&fail] (const char* what)
+ {
+ fail << "build-alt-" << what << "-rebuild-start and build-alt-" << what
+ << "-rebuild-stop configuration options must both be either "
+ << "specified or not";
+ };
+
+ if (options_->build_alt_soft_rebuild_start_specified () !=
+ options_->build_alt_soft_rebuild_stop_specified ())
+ bad_alt ("soft");
+
+ if (options_->build_alt_hard_rebuild_start_specified () !=
+ options_->build_alt_hard_rebuild_stop_specified ())
+ bad_alt ("hard");
+
database_module::init (*options_, options_->build_db_retry ());
// Check that the database 'build' schema matches the current one. It's
@@ -79,6 +126,84 @@ init (scanner& s)
options_->root (dir_path ("/"));
}
+// Skip tenants with the freshly queued packages from the consideration (see
+// tenant::queued_timestamp for the details on the service notifications race
+// prevention).
+//
+template <typename T>
+static inline query<T>
+package_query (bool custom_bot,
+ brep::params::build_task& params,
+ interactive_mode imode,
+ uint64_t queued_expiration_ns)
+{
+ using namespace brep;
+ using query = query<T>;
+
+ query q (!query::build_tenant::archived);
+
+ if (custom_bot)
+ {
+ // Note that we could potentially only query the packages which refer to
+ // this custom bot key in one of their build configurations. For that we
+ // would need to additionally join the current query tables with the bot
+ // fingerprint-containing build_package_bot_keys and
+ // build_package_config_bot_keys tables and use the SELECT DISTINCT
+ // clause. The problem is that we also use the ORDER BY clause and in this
+ // case PostgreSQL requires all the ORDER BY clause expressions to also be
+ // present in the SELECT DISTINCT clause and fails with the 'for SELECT
+ // DISTINCT, ORDER BY expressions must appear in select list' error if
+ // that's not the case. Also note that in the ODB-generated code the
+ // 'build_package.project::TEXT' expression in the SELECT DISTINCT clause
+ // (see the CITEXT type mapping for details in libbrep/common.hxx) would
+ // not match the 'build_package.name' expression in the ORDER BY clause
+ // and so we will end up with the mentioned error. One (hackish) way to
+ // fix that would be to add a dummy member of the string type for the
+ // build_package.name column. This all sounds quite hairy at the moment
+ // and it also feels that this can potentially pessimize querying the
+ // packages built with the default bots only. Thus let's keep it simple
+ // for now and filter packages by the bot fingerprint at the program
+ // level.
+ //
+ q = q && (query::build_package::custom_bot.is_null () ||
+ query::build_package::custom_bot);
+ }
+ else
+ q = q && (query::build_package::custom_bot.is_null () ||
+ !query::build_package::custom_bot);
+
+ // Filter by repositories canonical names (if requested).
+ //
+ const strings& rp (params.repository ());
+
+ if (!rp.empty ())
+ q = q &&
+ query::build_repository::id.canonical_name.in_range (rp.begin (),
+ rp.end ());
+
+ // If the interactive mode is false or true, then filter out the respective
+ // packages.
+ //
+ switch (imode)
+ {
+ case interactive_mode::false_:
+ {
+ q = q && query::build_tenant::interactive.is_null ();
+ break;
+ }
+ case interactive_mode::true_:
+ {
+ q = q && query::build_tenant::interactive.is_not_null ();
+ break;
+ }
+ case interactive_mode::both: break;
+ }
+
+ return q &&
+ (query::build_tenant::queued_timestamp.is_null () ||
+ query::build_tenant::queued_timestamp < queued_expiration_ns);
+}
+
bool brep::build_task::
handle (request& rq, response& rs)
{
@@ -119,122 +244,263 @@ handle (request& rq, response& rs)
throw invalid_request (400, e.what ());
}
- // Obtain the agent's public key fingerprint if requested. If the fingerprint
- // is requested but is not present in the request or is unknown, then respond
- // with 401 HTTP code (unauthorized).
+ // Obtain the agent's public key fingerprint if requested. If the
+ // fingerprint is requested but is not present in the request, then respond
+ // with 401 HTTP code (unauthorized). If a key with the specified
+ // fingerprint is not present in the build bot agent keys directory, then
+ // assume that this is a custom build bot.
+ //
+ // Note that if the agent authentication is not configured (the agent keys
+ // directory is not specified), then the bot can never be custom and its
+ // fingerprint is ignored, if present.
//
optional<string> agent_fp;
+ bool custom_bot (false);
if (bot_agent_key_map_ != nullptr)
{
- if (!tqm.fingerprint ||
- bot_agent_key_map_->find (*tqm.fingerprint) ==
- bot_agent_key_map_->end ())
+ if (!tqm.fingerprint)
throw invalid_request (401, "unauthorized");
agent_fp = move (tqm.fingerprint);
+
+ custom_bot = (bot_agent_key_map_->find (*agent_fp) ==
+ bot_agent_key_map_->end ());
}
- task_response_manifest tsm;
+ // The resulting task manifest and the related build, package, and
+ // configuration objects. Note that the latter 3 are only meaningful if the
+ // the task manifest is present.
+ //
+ task_response_manifest task_response;
+ shared_ptr<build> task_build;
+ shared_ptr<build_package> task_package;
+ const build_package_config* task_config;
+
+ auto serialize_task_response_manifest = [&task_response, &rs] ()
+ {
+ // @@ Probably it would be a good idea to also send some cache control
+ // headers to avoid caching by HTTP proxies. That would require
+ // extension of the web::response interface.
+ //
+
+ manifest_serializer s (rs.content (200, "text/manifest;charset=utf-8"),
+ "task_response_manifest");
+ task_response.serialize (s);
+ };
+
+ interactive_mode imode (tqm.effective_interactive_mode ());
+
+ // Restict the interactive mode (specified by the task request manifest) if
+ // the interactive parameter is specified and is other than "both". If
+ // values specified by the parameter and manifest are incompatible (false vs
+ // true), then just bail out responding with the manifest with an empty
+ // session.
+ //
+ if (params.interactive () != interactive_mode::both)
+ {
+ if (imode != interactive_mode::both)
+ {
+ if (params.interactive () != imode)
+ {
+ serialize_task_response_manifest ();
+ return true;
+ }
+ }
+ else
+ imode = params.interactive (); // Can only change both to true or false.
+ }
- // Map build configurations to machines that are capable of building them.
- // The first matching machine is selected for each configuration. Also
- // create the configuration name list for use in database queries.
+ // Map build target configurations to machines that are capable of building
+ // them. The first matching machine is selected for each configuration.
//
struct config_machine
{
- const build_config* config;
+ const build_target_config* config;
machine_header_manifest* machine;
};
- using config_machines = map<const char*, config_machine, compare_c_string>;
+ using config_machines = map<build_target_config_id, config_machine>;
- cstrings cfg_names;
- config_machines cfg_machines;
+ config_machines conf_machines;
- for (const auto& c: *build_conf_)
+ for (const build_target_config& c: *target_conf_)
{
- for (auto& m: tqm.machines)
+ for (machine_header_manifest& m: tqm.machines)
{
- // The same story as in exclude() from build-config.cxx.
- //
+ if (m.effective_role () == machine_role::build)
try
{
+ // The same story as in exclude() from build-target-config.cxx.
+ //
if (path_match (dash_components_to_path (m.name),
dash_components_to_path (c.machine_pattern),
dir_path () /* start */,
- path_match_flags::match_absent) &&
- cfg_machines.insert (
- make_pair (c.name.c_str (), config_machine ({&c, &m}))).second)
- cfg_names.push_back (c.name.c_str ());
+ path_match_flags::match_absent))
+ {
+ conf_machines.emplace (build_target_config_id {c.target, c.name},
+ config_machine {&c, &m});
+ break;
+ }
}
catch (const invalid_path&) {}
}
}
- // Go through packages until we find one that has no build configuration
- // present in the database, or is in the building state but expired
- // (collectively called unbuilt). If such a package configuration is found
- // then put it into the building state, set the current timestamp and respond
- // with the task for building this package configuration.
+ // Collect the auxiliary configurations/machines available for the build.
+ //
+ struct auxiliary_config_machine
+ {
+ string config;
+ const machine_header_manifest* machine;
+ };
+
+ vector<auxiliary_config_machine> auxiliary_config_machines;
+
+ for (const machine_header_manifest& m: tqm.machines)
+ {
+ if (m.effective_role () == machine_role::auxiliary)
+ {
+ // Derive the auxiliary configuration name by stripping the first
+ // (architecture) component from the machine name.
+ //
+ size_t p (m.name.find ('-'));
+
+ if (p == string::npos || p == 0 || p == m.name.size () - 1)
+ throw invalid_request (400,
+ (string ("no ") +
+ (p == 0 ? "architecture" : "OS") +
+ " component in machine name '" + m.name + "'"));
+
+ auxiliary_config_machines.push_back (
+ auxiliary_config_machine {string (m.name, p + 1), &m});
+ }
+ }
+
+ // Go through package build configurations until we find one that has no
+ // build target configuration present in the database, or is in the building
+ // state but expired (collectively called unbuilt). If such a target
+ // configuration is found then put it into the building state, set the
+ // current timestamp and respond with the task for building this package
+ // configuration.
//
// While trying to find a non-built package configuration we will also
- // collect the list of the built package configurations which it's time to
- // rebuild. So if no unbuilt package is found, we will pickup one to
- // rebuild. The rebuild preference is given in the following order: the
- // greater force state, the greater overall status, the lower timestamp.
+ // collect the list of the built configurations which it's time to
+ // rebuild. So if no unbuilt package configuration is found, we will pickup
+ // one to rebuild. The rebuild preference is given in the following order:
+ // the greater force state, the greater overall status, the lower timestamp.
//
- if (!cfg_machines.empty ())
+ if (!conf_machines.empty ())
{
vector<shared_ptr<build>> rebuilds;
- // Create the task response manifest. The package must have the internal
- // repository loaded.
+ // Create the task response manifest. Must be called inside the build db
+ // transaction.
//
- auto task = [this] (shared_ptr<build>&& b,
- shared_ptr<build_package>&& p,
+ auto task = [this] (const build& b,
+ const build_package& p,
+ const build_package_config& pc,
+ small_vector<bpkg::test_dependency, 1>&& tests,
+ vector<auxiliary_machine>&& ams,
+ optional<string>&& interactive,
const config_machine& cm) -> task_response_manifest
{
uint64_t ts (
chrono::duration_cast<std::chrono::nanoseconds> (
- b->timestamp.time_since_epoch ()).count ());
-
- string session (b->tenant + '/' +
- b->package_name.string () + '/' +
- b->package_version.string () + '/' +
- b->configuration + '/' +
- b->toolchain_name + '/' +
- b->toolchain_version.string () + '/' +
+ b.timestamp.time_since_epoch ()).count ());
+
+ string session (b.tenant + '/' +
+ b.package_name.string () + '/' +
+ b.package_version.string () + '/' +
+ b.target.string () + '/' +
+ b.target_config_name + '/' +
+ b.package_config_name + '/' +
+ b.toolchain_name + '/' +
+ b.toolchain_version.string () + '/' +
to_string (ts));
- string result_url (options_->host () +
- tenant_dir (options_->root (), b->tenant).string () +
- "?build-result");
+ string tenant (tenant_dir (options_->root (), b.tenant).string ());
+ string result_url (options_->host () + tenant + "?build-result");
+
+ assert (transaction::has_current ());
+
+ assert (p.internal ()); // The package is expected to be buildable.
- lazy_shared_ptr<build_repository> r (p->internal_repository);
+ shared_ptr<build_repository> r (p.internal_repository.load ());
- strings fp;
+ strings fps;
if (r->certificate_fingerprint)
- fp.emplace_back (move (*r->certificate_fingerprint));
+ fps.emplace_back (move (*r->certificate_fingerprint));
- task_manifest task (move (b->package_name),
- move (b->package_version),
+ const package_name& pn (p.id.name);
+
+ bool module_pkg (pn.string ().compare (0, 10, "libbuild2-") == 0);
+
+ // Note that the auxiliary environment is crafted by the bbot agent
+ // after the auxiliary machines are booted.
+ //
+ task_manifest task (pn,
+ p.version,
move (r->location),
- move (fp),
+ move (fps),
+ p.requirements,
+ move (tests),
+ b.dependency_checksum,
cm.machine->name,
+ move (ams),
cm.config->target,
cm.config->environment,
+ nullopt /* auxiliary_environment */,
cm.config->args,
- cm.config->warning_regexes);
+ pc.arguments,
+ belongs (*cm.config, module_pkg ? "build2" : "host"),
+ cm.config->warning_regexes,
+ move (interactive),
+ b.worker_checksum);
+
+ // Collect the build artifacts upload URLs, skipping those which are
+ // excluded with the upload-*-exclude configuration options.
+ //
+ vector<upload_url> upload_urls;
+
+ for (const auto& ud: options_->upload_data ())
+ {
+ const string& t (ud.first);
+
+ auto exclude = [&t] (const multimap<string, string>& mm,
+ const string& v)
+ {
+ auto range (mm.equal_range (t));
+
+ for (auto i (range.first); i != range.second; ++i)
+ {
+ if (i->second == v)
+ return true;
+ }
+
+ return false;
+ };
+
+ if (!exclude (options_->upload_toolchain_exclude (),
+ b.toolchain_name) &&
+ !exclude (options_->upload_repository_exclude (),
+ r->canonical_name))
+ {
+ upload_urls.emplace_back (options_->host () + tenant + "?upload=" + t,
+ t);
+ }
+ }
return task_response_manifest (move (session),
- move (b->agent_challenge),
+ b.agent_challenge,
move (result_url),
+ move (upload_urls),
+ b.agent_checksum,
move (task));
};
- // Calculate the build (building state) or rebuild (built state) expiration
- // time for package configurations
+ // Calculate the build/rebuild (building/built state) and the `queued`
+ // notifications expiration time for package configurations.
//
timestamp now (system_clock::now ());
@@ -255,12 +521,102 @@ handle (request& rq, response& rs)
uint64_t forced_result_expiration_ns (
expiration_ns (options_->build_forced_rebuild_timeout ()));
- timestamp normal_rebuild_expiration (
- expiration (options_->build_normal_rebuild_timeout ()));
-
timestamp forced_rebuild_expiration (
expiration (options_->build_forced_rebuild_timeout ()));
+ uint64_t queued_expiration_ns (
+ expiration_ns (options_->build_queued_timeout ()));
+
+ // Calculate the soft/hard rebuild expiration time, based on the
+ // respective build-{soft,hard}-rebuild-timeout and
+ // build-alt-{soft,hard}-rebuild-{start,stop,timeout} configuration
+ // options.
+ //
+ // If normal_timeout is zero, then return timestamp_unknown to indicate
+ // 'never expire'. Note that this value is less than any build timestamp
+ // value, including timestamp_nonexistent.
+ //
+ // NOTE: there is a similar code in monitor/monitor.cxx.
+ //
+ auto build_expiration = [&now] (
+ const optional<pair<duration, duration>>& alt_interval,
+ optional<size_t> alt_timeout,
+ size_t normal_timeout)
+ {
+ if (normal_timeout == 0)
+ return timestamp_unknown;
+
+ timestamp r;
+ chrono::seconds nt (normal_timeout);
+
+ if (alt_interval)
+ {
+ const duration& start (alt_interval->first);
+ const duration& stop (alt_interval->second);
+
+ duration dt (daytime (now));
+
+ // Note that if the stop time is less than the start time then the
+ // interval extends through the midnight.
+ //
+ bool use_alt_timeout (start <= stop
+ ? dt >= start && dt < stop
+ : dt >= start || dt < stop);
+
+ // If we out of the alternative rebuild timeout interval, then fall
+ // back to using the normal rebuild timeout.
+ //
+ if (use_alt_timeout)
+ {
+ // Calculate the alternative timeout, unless it is specified
+ // explicitly.
+ //
+ duration t;
+
+ if (!alt_timeout)
+ {
+ t = start <= stop ? (stop - start) : ((24h - start) + stop);
+
+ // If the normal rebuild timeout is greater than 24 hours, then
+ // increase the default alternative timeout by (normal - 24h) (see
+ // build-alt-soft-rebuild-timeout configuration option for
+ // details).
+ //
+ if (nt > 24h)
+ t += nt - 24h;
+ }
+ else
+ t = chrono::seconds (*alt_timeout);
+
+ r = now - t;
+ }
+ }
+
+ return r != timestamp_nonexistent ? r : (now - nt);
+ };
+
+ timestamp soft_rebuild_expiration (
+ build_expiration (
+ (options_->build_alt_soft_rebuild_start_specified ()
+ ? make_pair (options_->build_alt_soft_rebuild_start (),
+ options_->build_alt_soft_rebuild_stop ())
+ : optional<pair<duration, duration>> ()),
+ (options_->build_alt_soft_rebuild_timeout_specified ()
+ ? options_->build_alt_soft_rebuild_timeout ()
+ : optional<size_t> ()),
+ options_->build_soft_rebuild_timeout ()));
+
+ timestamp hard_rebuild_expiration (
+ build_expiration (
+ (options_->build_alt_hard_rebuild_start_specified ()
+ ? make_pair (options_->build_alt_hard_rebuild_start (),
+ options_->build_alt_hard_rebuild_stop ())
+ : optional<pair<duration, duration>> ()),
+ (options_->build_alt_hard_rebuild_timeout_specified ()
+ ? options_->build_alt_hard_rebuild_timeout ()
+ : optional<size_t> ()),
+ options_->build_hard_rebuild_timeout ()));
+
// Return the challenge (nonce) if brep is configured to authenticate bbot
// agents. Return nullopt otherwise.
//
@@ -302,7 +658,7 @@ handle (request& rq, response& rs)
if (!os.wait () || nonce.size () != 64)
fail << "unable to generate nonce";
- uint64_t t (chrono::duration_cast<std::chrono::nanoseconds> (
+ uint64_t t (chrono::duration_cast<chrono::nanoseconds> (
now.time_since_epoch ()).count ());
sha256 cs (nonce.data (), nonce.size ());
@@ -321,6 +677,7 @@ handle (request& rq, response& rs)
// Convert butl::standard_version type to brep::version.
//
brep::version toolchain_version (tqm.toolchain_version.string ());
+ string& toolchain_name (tqm.toolchain_name);
// Prepare the buildable package prepared query.
//
@@ -341,364 +698,1707 @@ handle (request& rq, response& rs)
using pkg_query = query<buildable_package>;
using prep_pkg_query = prepared_query<buildable_package>;
- // Exclude archived tenants.
+ pkg_query pq (package_query<buildable_package> (custom_bot,
+ params,
+ imode,
+ queued_expiration_ns));
+
+ // Transform (in-place) the interactive login information into the actual
+ // login command, if specified in the manifest and the transformation
+ // regexes are specified in the configuration.
//
- pkg_query pq (!pkg_query::build_tenant::archived);
+ if (tqm.interactive_login &&
+ options_->build_interactive_login_specified ())
+ {
+ optional<string> lc;
+ string l (tqm.agent + ' ' + *tqm.interactive_login);
+
+ // Use the first matching regex for the transformation.
+ //
+ for (const pair<regex, string>& rf: options_->build_interactive_login ())
+ {
+ pair<string, bool> r (regex_replace_match (l, rf.first, rf.second));
+
+ if (r.second)
+ {
+ lc = move (r.first);
+ break;
+ }
+ }
- // Filter by repositories canonical names (if requested).
+ if (!lc)
+ throw invalid_request (400, "unable to match login info '" + l + '\'');
+
+ tqm.interactive_login = move (lc);
+ }
+
+ // In the random package ordering mode iterate over the packages list by
+ // starting from the random offset and wrapping around when reaching the
+ // end.
+ //
+ // Note, however, that since there can be some packages which are already
+ // built for all configurations and are not archived yet, picking an
+ // unbuilt package this way may not work as desired. Think of the
+ // following case with 5 packages in 3 non-archived tenants:
+ //
+ // 0: A - unbuilt, tenant 1
+ // 1: B - built, tenant 2
+ // 2: C - built, tenant 2
+ // 3: D - built, tenant 2
+ // 4: E - unbuilt, tenant 3
//
- const vector<string>& rp (params.repository ());
+ // If we just pick a random starting offset in the [0, 4] range, then we
+ // will build A package with probability 0.2 and E with probability 0.8.
+ //
+ // To fix that we will only try to build a package from a tenant that the
+ // random starting offset refers to. Failed that, we will randomly pick
+ // new starting offset and retry. To make sure we don't retry indefinitely
+ // when there are no more packages to build (and also for the sake of
+ // optimization; see below), we will track positions of packages which we
+ // (unsuccessfully) have already tried to build and skip them while
+ // generating the random starting offsets and while iterating over
+ // packages.
+ //
+ // Also note that since we iterate over packages in chunks, each queried
+ // in a separate transaction, the number of packages may potentially
+ // increase or decrease while iterating over them. Thus, to keep things
+ // consistent, we may need to update our tried positions tracking state
+ // accordingly (not to cycle, not to refer to an entry out of the list
+ // boundaries, etc). Generally, regardless whether the number of packages
+ // has changed or not, the offsets and position statuses may now refer to
+ // some different packages. The only sensible thing we can do in such
+ // cases (without trying to detect this situation and restart from
+ // scratch) is to serve the request and issue some build task, if
+ // possible.
+ //
+ bool random (options_->build_package_order () == build_order::random);
+ size_t start_offset (0);
- if (!rp.empty ())
- pq = pq &&
- pkg_query::build_repository::id.canonical_name.in_range (rp.begin (),
- rp.end ());
+ // List of "tried to build" package statuses. True entries denote
+ // positions of packages which we have tried to build. Initially all
+ // entries are false.
+ //
+ vector<bool> tried_positions;
- // Specify the portion.
+ // Number of false entries in the above vector. Used merely as an
+ // optimization to bail out.
//
- size_t offset (0);
+ size_t untried_positions_count (0);
- pq += "ORDER BY" +
- pkg_query::build_package::id.tenant + "," +
- pkg_query::build_package::id.name +
- order_by_version (pkg_query::build_package::id.version, false) +
- "OFFSET" + pkg_query::_ref (offset) + "LIMIT 50";
+ // Return a random position of a package that we have not yet tried to
+ // build, if present, and nullopt otherwise.
+ //
+ auto rand_position = [&tried_positions,
+ &untried_positions_count] () -> optional<size_t>
+ {
+ assert (untried_positions_count <= tried_positions.size ());
- connection_ptr conn (build_db_->connection ());
+ if (untried_positions_count == 0)
+ return nullopt;
- prep_pkg_query pkg_prep_query (
- conn->prepare_query<buildable_package> (
- "mod-build-task-package-query", pq));
+ size_t r;
+ while (tried_positions[r = rand (0, tried_positions.size () - 1)]) ;
+ return r;
+ };
- // Prepare the build prepared query.
+ // Mark the package at specified position as tried to build. Assume that
+ // it is not yet been tried to build.
//
- // Note that we can not query the database for configurations that a
- // package was not built with, as the database contains only those package
- // configurations that have already been acted upon (initially empty).
+ auto position_tried = [&tried_positions,
+ &untried_positions_count] (size_t i)
+ {
+ assert (i < tried_positions.size () &&
+ !tried_positions[i] &&
+ untried_positions_count != 0);
+
+ tried_positions[i] = true;
+ --untried_positions_count;
+ };
+
+ // Resize the tried positions list and update the untried positions
+ // counter accordingly if the package number has changed.
//
- // This is why we query the database for package configurations that
- // should not be built (in the built state, or in the building state and
- // not expired). Having such a list we will select the first build
- // configuration that is not in the list (if available) for the response.
+ // For simplicity, assume that packages are added/removed to/from the end
+ // of the list. Note that misguessing in such a rare cases are possible
+ // but not harmful (see above for the reasoning).
//
- using bld_query = query<build>;
- using prep_bld_query = prepared_query<build>;
+ auto resize_tried_positions = [&tried_positions, &untried_positions_count]
+ (size_t n)
+ {
+ if (n > tried_positions.size ()) // Packages added?
+ {
+ untried_positions_count += n - tried_positions.size ();
+ tried_positions.resize (n, false);
+ }
+ else if (n < tried_positions.size ()) // Packages removed?
+ {
+ for (size_t i (n); i != tried_positions.size (); ++i)
+ {
+ if (!tried_positions[i])
+ {
+ assert (untried_positions_count != 0);
+ --untried_positions_count;
+ }
+ }
- package_id id;
- const auto& qv (bld_query::id.package.version);
+ tried_positions.resize (n);
+ }
+ else
+ {
+ // Not supposed to be called if the number of packages didn't change.
+ //
+ assert (false);
+ }
+ };
- bld_query bq (
- bld_query::id.package.tenant == bld_query::_ref (id.tenant) &&
+ if (random)
+ {
+ using query = query<buildable_package_count>;
- bld_query::id.package.name == bld_query::_ref (id.name) &&
+ query q (package_query<buildable_package_count> (custom_bot,
+ params,
+ imode,
+ queued_expiration_ns));
- qv.epoch == bld_query::_ref (id.version.epoch) &&
- qv.canonical_upstream ==
- bld_query::_ref (id.version.canonical_upstream) &&
- qv.canonical_release ==
- bld_query::_ref (id.version.canonical_release) &&
- qv.revision == bld_query::_ref (id.version.revision) &&
+ transaction t (build_db_->begin ());
- bld_query::id.configuration.in_range (cfg_names.begin (),
- cfg_names.end ()) &&
+ // If there are any non-archived interactive build tenants, then the
+ // chosen randomization approach doesn't really work since interactive
+ // tenants must be preferred over non-interactive ones, which is
+ // achieved by proper ordering of the package query result (see below).
+ // Thus, we just disable randomization if there are any interactive
+ // tenants.
+ //
+ // But shouldn't we randomize the order between packages in multiple
+ // interactive tenants? Given that such a tenant may only contain a
+ // single package and can only be built in a single configuration that
+ // is probably not important. However, we may assume that the
+ // randomization still happens naturally due to the random nature of the
+ // tenant id, which is used as a primary sorting criteria (see below).
+ //
+ size_t interactive_package_count (
+ build_db_->query_value<buildable_package_count> (
+ q && query::build_tenant::interactive.is_not_null ()));
- bld_query::id.toolchain_name == tqm.toolchain_name &&
+ if (interactive_package_count == 0)
+ {
+ untried_positions_count =
+ build_db_->query_value<buildable_package_count> (q);
+ }
+ else
+ random = false;
- compare_version_eq (bld_query::id.toolchain_version,
- canonical_version (toolchain_version),
- true /* revision */) &&
+ t.commit ();
- (bld_query::state == "built" ||
- ((bld_query::force == "forcing" &&
- bld_query::timestamp > forced_result_expiration_ns) ||
- (bld_query::force != "forcing" && // Unforced or forced.
- bld_query::timestamp > normal_result_expiration_ns))));
+ if (untried_positions_count != 0)
+ {
+ tried_positions.resize (untried_positions_count, false);
- prep_bld_query bld_prep_query (
- conn->prepare_query<build> ("mod-build-task-build-query", bq));
+ optional<size_t> so (rand_position ());
+ assert (so); // Wouldn't be here otherwise.
+ start_offset = *so;
+ }
+ }
- while (tsm.session.empty ())
+ if (!random || !tried_positions.empty ())
{
- transaction t (conn->begin ());
+ // Specify the portion.
+ //
+ size_t offset (start_offset);
+ size_t limit (50);
- // Query (and cache) buildable packages.
+ pq += "ORDER BY";
+
+ // If the interactive mode is both, then order the packages so that ones
+ // from the interactive build tenants appear first.
//
- auto packages (pkg_prep_query.execute ());
+ if (imode == interactive_mode::both)
+ pq += pkg_query::build_tenant::interactive + "NULLS LAST,";
+
+ pq += pkg_query::build_package::id.tenant + "," +
+ pkg_query::build_package::id.name +
+ order_by_version (pkg_query::build_package::id.version, false) +
+ "OFFSET" + pkg_query::_ref (offset) +
+ "LIMIT" + pkg_query::_ref (limit);
+
+ connection_ptr conn (build_db_->connection ());
+
+ prep_pkg_query pkg_prep_query (
+ conn->prepare_query<buildable_package> (
+ "mod-build-task-package-query", pq));
- // Bail out if there is nothing left.
+ // Prepare the build prepared query.
+ //
+ // Note that we can not query the database for configurations that a
+ // package was not built with, as the database contains only those build
+ // configurations that have already been acted upon (initially empty).
+ //
+ // This is why we query the database for configurations that should not
+ // be built (in the built state, or in the building state and not
+ // expired). Having such a list we will select the first build
+ // configuration that is not in the list (if available) for the
+ // response.
+ //
+ using bld_query = query<build>;
+ using prep_bld_query = prepared_query<build>;
+
+ package_id id;
+ string pkg_config;
+
+ bld_query sq (false);
+ for (const auto& cm: conf_machines)
+ sq = sq || (bld_query::id.target == cm.first.target &&
+ bld_query::id.target_config_name == cm.first.config);
+
+ bld_query bq (
+ equal<build> (bld_query::id.package, id) &&
+ bld_query::id.package_config_name == bld_query::_ref (pkg_config) &&
+ sq &&
+ bld_query::id.toolchain_name == toolchain_name &&
+
+ compare_version_eq (bld_query::id.toolchain_version,
+ canonical_version (toolchain_version),
+ true /* revision */) &&
+
+ (bld_query::state == "built" ||
+ (bld_query::state == "building" &&
+ ((bld_query::force == "forcing" &&
+ bld_query::timestamp > forced_result_expiration_ns) ||
+ (bld_query::force != "forcing" && // Unforced or forced.
+ bld_query::timestamp > normal_result_expiration_ns)))));
+
+ prep_bld_query bld_prep_query (
+ conn->prepare_query<build> ("mod-build-task-build-query", bq));
+
+ // Return true if a package needs to be rebuilt.
//
- if (packages.empty ())
+ auto needs_rebuild = [&forced_rebuild_expiration,
+ &soft_rebuild_expiration,
+ &hard_rebuild_expiration] (const build& b)
{
- t.commit ();
- break;
- }
+ assert (b.state == build_state::built);
+
+ return (b.force == force_state::forced &&
+ b.timestamp <= forced_rebuild_expiration) ||
+ b.soft_timestamp <= soft_rebuild_expiration ||
+ b.hard_timestamp <= hard_rebuild_expiration;
+ };
+
+ // Convert a build to the hard rebuild, resetting the agent checksum.
+ //
+ // Note that since the checksums are hierarchical, the agent checksum
+ // reset will trigger resets of the "subordinate" checksums up to the
+ // dependency checksum and so the package will be rebuilt.
+ //
+ // Also note that we keep the previous build task result and status
+ // intact since we may still need to revert the build into the built
+ // state if the task execution is interrupted.
+ //
+ auto convert_to_hard = [] (const shared_ptr<build>& b)
+ {
+ b->agent_checksum = nullopt;
+ };
+
+ // Return SHA256 checksum of the controller logic and the configuration
+ // target, environment, arguments, and warning-detecting regular
+ // expressions.
+ //
+ auto controller_checksum = [] (const build_target_config& c)
+ {
+ sha256 cs ("1"); // Hash the logic version.
+
+ cs.append (c.target.string ());
+ cs.append (c.environment ? *c.environment : "");
+
+ for (const string& a: c.args)
+ cs.append (a);
- offset += packages.size ();
+ for (const string& re: c.warning_regexes)
+ cs.append (re);
+
+ return string (cs.string ());
+ };
+
+ // Return the machine id as a machine checksum.
+ //
+ // Note that we don't include auxiliary machine ids into this checksum
+ // since a different machine will most likely get picked for a pattern.
+ // And we view all auxiliary machines that match a pattern as equal for
+ // testing purposes (in other words, pattern is not the way to get
+ // coverage).
+ //
+ auto machine_checksum = [] (const machine_header_manifest& m)
+ {
+ return m.id;
+ };
- // Iterate over packages until we find one that needs building.
+ // Tenant that the start offset refers to.
//
- for (auto& bp: packages)
+ optional<string> start_tenant;
+
+ // If the build task is created and the tenant of the being built
+ // package has a third-party service state associated with it, then
+ // check if the tenant_service_build_building and/or
+ // tenant_service_build_queued callbacks are registered for the type of
+ // the associated service. If they are, then stash the state, the build
+ // object, and the callback pointers for the subsequent service
+ // notifications.
+ //
+ // Also, if the tenant_service_build_queued callback is registered, then
+ // create, persist, and stash the queued build objects for all the
+ // unbuilt by the current toolchain and not yet queued configurations of
+ // the package the build task is created for and calculate the hints.
+ // Note that for the task build, we need to make sure that the
+ // third-party service receives the `queued` notification prior to the
+ // `building` notification (see mod/tenant-service.hxx for valid
+ // transitions). The `queued` notification is assumed to be already sent
+ // for the build if the respective object exists and any of the
+ // following is true for it:
+ //
+ // - It is in the queued state (initial_state is build_state::queued).
+ //
+ // - It is a user-forced rebuild of an incomplete build
+ // (rebuild_forced_build is true).
+ //
+ // - It is a rebuild of an interrupted rebuild (rebuild_forced_build is
+ // true).
+ //
+ const tenant_service_build_building* tsb (nullptr);
+ const tenant_service_build_queued* tsq (nullptr);
+ optional<pair<tenant_service, shared_ptr<build>>> tss;
+ vector<build> qbs;
+ tenant_service_build_queued::build_queued_hints qhs;
+ optional<build_state> initial_state;
+ bool rebuild_forced_build (false);
+ bool rebuild_interrupted_rebuild (false);
+
+ // Create, persist, and return the queued build objects for all the
+ // unbuilt by the current toolchain and not yet queued configurations of
+ // the specified package.
+ //
+ // Note that the build object argument is only used for the toolchain
+ // information retrieval. Also note that the package constraints section
+ // is expected to be loaded.
+ //
+ auto queue_builds = [this] (const build_package& p, const build& b)
{
- id = move (bp.id);
+ assert (p.constraints_section.loaded ());
- // Iterate through the package configurations and erase those that
- // don't need building from the build configuration map. All those
- // configurations that remained can be built. We will take the first
- // one, if present.
+ // Query the existing build ids and stash them into the set.
//
- // Also save the built package configurations for which it's time to be
- // rebuilt.
+ set<build_id> existing_builds;
+
+ using query = query<package_build_id>;
+
+ query q (query::build::id.package == p.id &&
+ query::build::id.toolchain_name == b.toolchain_name &&
+ compare_version_eq (query::build::id.toolchain_version,
+ b.id.toolchain_version,
+ true /* revision */));
+
+ for (build_id& id: build_db_->query<package_build_id> (q))
+ existing_builds.emplace (move (id));
+
+ // Go through all the potential package builds and queue those which
+ // are not in the existing builds set.
//
- config_machines configs (cfg_machines); // Make a copy for this pkg.
- auto pkg_builds (bld_prep_query.execute ());
+ vector<build> r;
- for (auto i (pkg_builds.begin ()); i != pkg_builds.end (); ++i)
+ for (const build_package_config& pc: p.configs)
{
- auto j (configs.find (i->id.configuration.c_str ()));
+ for (const build_target_config& tc: *target_conf_)
+ {
+ if (!exclude (pc, p.builds, p.constraints, tc))
+ {
+ build_id id (p.id,
+ tc.target, tc.name,
+ pc.name,
+ b.toolchain_name, b.toolchain_version);
+
+ if (existing_builds.find (id) == existing_builds.end ())
+ {
+ r.emplace_back (move (id.package.tenant),
+ move (id.package.name),
+ p.version,
+ move (id.target),
+ move (id.target_config_name),
+ move (id.package_config_name),
+ move (id.toolchain_name),
+ b.toolchain_version);
+
+ // @@ TODO Persist the whole vector of builds with a single
+ // operation if/when bulk operations support is added
+ // for objects with containers.
+ //
+ build_db_->persist (r.back ());
+ }
+ }
+ }
+ }
+
+ return r;
+ };
+
+ auto queue_hints = [this] (const build_package& p)
+ {
+ buildable_package_count tpc (
+ build_db_->query_value<buildable_package_count> (
+ query<buildable_package_count>::build_tenant::id == p.id.tenant));
+
+ return tenant_service_build_queued::build_queued_hints {
+ tpc == 1, p.configs.size () == 1};
+ };
- // Outdated configurations are already excluded with the database
- // query.
+ // Collect the auxiliary machines required for testing of the specified
+ // package configuration and the external test packages, if present for
+ // the specified target configuration (task_auxiliary_machines),
+ // together with the auxiliary machines information that needs to be
+ // persisted in the database as a part of the build object
+ // (build_auxiliary_machines, which is parallel to
+ // task_auxiliary_machines). While at it collect the involved test
+ // dependencies. Return nullopt if any auxiliary configuration patterns
+ // may not be resolved to the auxiliary machines (no matching
+ // configuration, auxiliary machines RAM limit is exceeded, etc).
+ //
+ // Note that if the same auxiliary environment name is used for multiple
+ // packages (for example, for the main and tests packages or for the
+ // tests and examples packages, etc), then a shared auxiliary machine is
+ // used for all these packages. In this case all the respective
+ // configuration patterns must match the configuration derived from this
+ // machine name. If they don't, then return nullopt. The thinking here
+ // is that on the next task request a machine whose derived
+ // configuration matches all the patterns can potentially be picked.
+ //
+ struct collect_auxiliaries_result
+ {
+ vector<auxiliary_machine> task_auxiliary_machines;
+ vector<build_machine> build_auxiliary_machines;
+ small_vector<bpkg::test_dependency, 1> tests;
+ };
+
+ auto collect_auxiliaries = [&tqm, &auxiliary_config_machines, this]
+ (const shared_ptr<build_package>& p,
+ const build_package_config& pc,
+ const build_target_config& tc)
+ -> optional<collect_auxiliaries_result>
+ {
+ // The list of the picked build auxiliary machines together with the
+ // environment names they have been picked for.
+ //
+ vector<pair<auxiliary_config_machine, string>> picked_machines;
+
+ // Try to randomly pick the auxiliary machine that matches the
+ // specified pattern and which can be supplied with the minimum
+ // required RAM, if specified. Return false if such a machine is not
+ // available. If a machine is already picked for the specified
+ // environment name, then return true if the machine's configuration
+ // matches the specified pattern and false otherwise.
+ //
+ auto pick_machine =
+ [&tqm,
+ &picked_machines,
+ used_ram = uint64_t (0),
+ available_machines = auxiliary_config_machines]
+ (const build_auxiliary& ba) mutable -> bool
+ {
+ vector<size_t> ams; // Indexes of the available matching machines.
+ optional<uint64_t> ar (tqm.auxiliary_ram);
+
+ // If the machine configuration name pattern (which is legal) or any
+ // of the machine configuration names (illegal) are invalid paths,
+ // then we assume we cannot pick the machine.
+ //
+ try
+ {
+ // The same story as in exclude() from build-target-config.cxx.
+ //
+ auto match = [pattern = dash_components_to_path (ba.config)]
+ (const string& config)
+ {
+ return path_match (dash_components_to_path (config),
+ pattern,
+ dir_path () /* start */,
+ path_match_flags::match_absent);
+ };
+
+ // Check if a machine is already picked for the specified
+ // environment name.
+ //
+ for (const auto& m: picked_machines)
+ {
+ if (m.second == ba.environment_name)
+ return match (m.first.config);
+ }
+
+ // Collect the matching machines from the list of the available
+ // machines and bail out if there are none.
+ //
+ for (size_t i (0); i != available_machines.size (); ++i)
+ {
+ const auxiliary_config_machine& m (available_machines[i]);
+ optional<uint64_t> mr (m.machine->ram_minimum);
+
+ if (match (m.config) && (!mr || !ar || used_ram + *mr <= *ar))
+ ams.push_back (i);
+ }
+
+ if (ams.empty ())
+ return false;
+ }
+ catch (const invalid_path&)
+ {
+ return false;
+ }
+
+ // Pick the matching machine randomly.
//
- assert (j != configs.end ());
- configs.erase (j);
+ size_t i (ams[rand (0, ams.size () - 1)]);
+ auxiliary_config_machine& cm (available_machines[i]);
+
+ // Bump the used RAM.
+ //
+ if (optional<uint64_t> r = cm.machine->ram_minimum)
+ used_ram += *r;
+
+ // Move out the picked machine from the available machines list.
+ //
+ picked_machines.emplace_back (move (cm), ba.environment_name);
+ available_machines.erase (available_machines.begin () + i);
+ return true;
+ };
+
+ // Collect auxiliary machines for the main package build configuration.
+ //
+ for (const build_auxiliary& ba:
+ pc.effective_auxiliaries (p->auxiliaries))
+ {
+ if (!pick_machine (ba))
+ return nullopt; // No matched auxiliary machine.
+ }
+
+ // Collect the test packages and the auxiliary machines for their
+ // default build configurations. Exclude external test packages which
+ // exclude the current target configuration.
+ //
+ small_vector<bpkg::test_dependency, 1> tests;
+
+ if (!p->requirements_tests_section.loaded ())
+ build_db_->load (*p, p->requirements_tests_section);
- if (i->state == build_state::built)
+ for (const build_test_dependency& td: p->tests)
+ {
+ // Don't exclude unresolved external tests.
+ //
+ // Note that this may result in the build task failure. However,
+ // silently excluding such tests could end up with missed software
+ // bugs which feels much worse.
+ //
+ if (td.package != nullptr)
{
- assert (i->force != force_state::forcing);
+ shared_ptr<build_package> tp (td.package.load ());
+
+ // Try to use the test package configuration named the same as the
+ // current configuration of the main package. If there is no such
+ // a configuration, then fallback to using the default
+ // configuration (which must exist). If the selected test package
+ // configuration excludes the current target configuration, then
+ // exclude this external test package from the build task.
+ //
+ // Note that potentially the selected test package configuration
+ // may contain some (bpkg) arguments associated, but we currently
+ // don't provide build bot worker with such information. This,
+ // however, is probably too far fetched so let's keep it simple
+ // for now.
+ //
+ const build_package_config* tpc (find (pc.name, tp->configs));
+
+ if (tpc == nullptr)
+ {
+ tpc = find ("default", tp->configs);
+
+ assert (tpc != nullptr); // Must always be present.
+ }
+
+ // Use the `all` class as a least restrictive default underlying
+ // build class set. Note that we should only apply the explicit
+ // build restrictions to the external test packages (think about
+ // the `builds: all` and `builds: -windows` manifest values for
+ // the primary and external test packages, respectively).
+ //
+ build_db_->load (*tp, tp->constraints_section);
+
+ if (exclude (*tpc,
+ tp->builds,
+ tp->constraints,
+ tc,
+ nullptr /* reason */,
+ true /* default_all_ucs */))
+ continue;
+
+ build_db_->load (*tp, tp->auxiliaries_section);
+
+ for (const build_auxiliary& ba:
+ tpc->effective_auxiliaries (tp->auxiliaries))
+ {
+ if (!pick_machine (ba))
+ return nullopt; // No matched auxiliary machine.
+ }
+ }
- if (i->timestamp <= (i->force == force_state::forced
- ? forced_rebuild_expiration
- : normal_rebuild_expiration))
- rebuilds.emplace_back (i.load ());
+ tests.emplace_back (td.name,
+ td.type,
+ td.buildtime,
+ td.constraint,
+ td.enable,
+ td.reflect);
+ }
+
+ vector<auxiliary_machine> tms;
+ vector<build_machine> bms;
+
+ if (size_t n = picked_machines.size ())
+ {
+ tms.reserve (n);
+ bms.reserve (n);
+
+ for (pair<auxiliary_config_machine, string>& pm: picked_machines)
+ {
+ const machine_header_manifest& m (*pm.first.machine);
+ tms.push_back (auxiliary_machine {m.name, move (pm.second)});
+ bms.push_back (build_machine {m.name, m.summary});
}
}
- if (!configs.empty ())
+ return collect_auxiliaries_result {
+ move (tms), move (bms), move (tests)};
+ };
+
+ // While at it, collect the aborted for various reasons builds
+ // (interactive builds in multiple configurations, builds with too many
+ // auxiliary machines, etc) to send the notification emails at the end
+ // of the request handling.
+ //
+ struct aborted_build
+ {
+ shared_ptr<build> b;
+ shared_ptr<build_package> p;
+ const build_package_config* pc;
+ const char* what;
+ };
+ vector<aborted_build> aborted_builds;
+
+ // Note: is only used for crafting of the notification email subjects.
+ //
+ bool unforced (true);
+
+ for (bool done (false); !task_response.task && !done; )
+ {
+ transaction tr (conn->begin ());
+
+ // We need to be careful in the random package ordering mode not to
+ // miss the end after having wrapped around.
+ //
+ done = (start_offset != 0 &&
+ offset < start_offset &&
+ offset + limit >= start_offset);
+
+ if (done)
+ limit = start_offset - offset;
+
+ // Query (and cache) buildable packages.
+ //
+ auto packages (pkg_prep_query.execute ());
+
+ size_t chunk_size (packages.size ());
+ size_t next_offset (offset + chunk_size);
+
+ // If we are in the random package ordering mode, then also check if
+ // the package number has changed and, if that's the case, resize the
+ // tried positions list accordingly.
+ //
+ if (random &&
+ (next_offset > tried_positions.size () ||
+ (next_offset < tried_positions.size () && chunk_size < limit)))
+ {
+ resize_tried_positions (next_offset);
+ }
+
+ // Bail out if there is nothing left, unless we need to wrap around in
+ // the random package ordering mode.
+ //
+ if (chunk_size == 0)
+ {
+ tr.commit ();
+
+ if (start_offset != 0 && offset >= start_offset)
+ offset = 0;
+ else
+ done = true;
+
+ continue;
+ }
+
+ size_t position (offset); // Current package position.
+ offset = next_offset;
+
+ // Iterate over packages until we find one that needs building or have
+ // to bail out in the random package ordering mode for some reason (no
+ // more untried positions, need to restart, etc).
+ //
+ // Note that it is not uncommon for the sequentially examined packages
+ // to belong to the same tenant (single tenant mode, etc). Thus, we
+ // will cache the loaded tenant objects.
+ //
+ shared_ptr<build_tenant> t;
+
+ for (auto& bp: packages)
{
- // Find the first build configuration that is not excluded by the
- // package.
+ shared_ptr<build_package>& p (bp.package);
+
+ id = p->id;
+
+ // Reset the tenant cache if the current package belongs to a
+ // different tenant.
+ //
+ if (t != nullptr && t->id != id.tenant)
+ t = nullptr;
+
+ // If we are in the random package ordering mode, then cache the
+ // tenant the start offset refers to, if not cached yet, and check
+ // if we are still iterating over packages from this tenant
+ // otherwise. If the latter is not the case, then restart from a new
+ // random untried offset, if present, and bail out otherwise.
//
- shared_ptr<build_package> p (build_db_->load<build_package> (id));
+ if (random)
+ {
+ if (!start_tenant)
+ {
+ start_tenant = id.tenant;
+ }
+ else if (*start_tenant != id.tenant)
+ {
+ if (optional<size_t> so = rand_position ())
+ {
+ start_offset = *so;
+ offset = start_offset;
+ start_tenant = nullopt;
+ limit = 50;
+ done = false;
+ }
+ else
+ done = true;
+
+ break;
+ }
- auto i (configs.begin ());
- auto e (configs.end ());
+ size_t pos (position++);
+
+ // Should have been resized, if required.
+ //
+ assert (pos < tried_positions.size ());
- for (;
- i != e &&
- exclude (p->builds, p->constraints, *i->second.config);
- ++i) ;
+ // Skip the position if it has already been tried.
+ //
+ if (tried_positions[pos])
+ continue;
+
+ position_tried (pos);
+ }
- if (i != e)
+ // Note that a request to interactively build a package in multiple
+ // configurations is most likely a mistake than a deliberate choice.
+ // Thus, for the interactive tenant let's check if the package can
+ // be built in multiple configurations. If that's the case then we
+ // will put all the potential builds into the aborted state and
+ // continue iterating looking for another package. Otherwise, just
+ // proceed for this package normally.
+ //
+ // It also feels like a good idea to archive an interactive tenant
+ // after a build object is created for it, regardless if the build
+ // task is issued or not. This way we make sure that an interactive
+ // build is never performed multiple times for such a tenant for any
+ // reason (multiple toolchains, buildtab change, etc). Note that the
+ // build result will still be accepted for an archived build.
+ //
+ if (bp.interactive)
{
- config_machine& cm (i->second);
- machine_header_manifest& mh (*cm.machine);
+ // Note that the tenant can be archived via some other package on
+ // some previous iteration. Skip the package if that's the case.
+ //
+ // Also note that if bp.archived is false, then we need to
+ // (re-)load the tenant object to re-check the archived flag.
+ //
+ if (!bp.archived)
+ {
+ if (t == nullptr)
+ t = build_db_->load<build_tenant> (id.tenant);
+
+ bp.archived = t->archived;
+ }
- build_id bid (move (id),
- cm.config->name,
- move (tqm.toolchain_name),
- toolchain_version);
+ if (bp.archived)
+ continue;
- shared_ptr<build> b (build_db_->find<build> (bid));
- optional<string> cl (challenge ());
+ assert (t != nullptr); // Wouldn't be here otherwise.
- // If build configuration doesn't exist then create the new one
- // and persist. Otherwise put it into the building state, refresh
- // the timestamp and update.
+ // Collect the potential build configurations as all combinations
+ // of the tenant's packages build configurations and the
+ // non-excluded (by the packages) build target
+ // configurations. Note that here we ignore the machines from the
+ // task request.
//
- if (b == nullptr)
+ struct build_config
{
- b = make_shared<build> (move (bid.package.tenant),
- move (bid.package.name),
- move (bp.version),
- move (bid.configuration),
- move (bid.toolchain_name),
- move (toolchain_version),
- move (agent_fp),
- move (cl),
- mh.name,
- move (mh.summary),
- cm.config->target);
-
- build_db_->persist (b);
+ shared_ptr<build_package> p;
+ const build_package_config* pc;
+ const build_target_config* tc;
+ };
+
+ small_vector<build_config, 1> build_configs;
+
+ // Note that we don't bother creating a prepared query here, since
+ // its highly unlikely to encounter multiple interactive tenants
+ // per task request. Given that we archive such tenants
+ // immediately, as a common case there will be none.
+ //
+ pkg_query pq (pkg_query::build_tenant::id == id.tenant);
+ for (auto& tp: build_db_->query<buildable_package> (pq))
+ {
+ shared_ptr<build_package>& p (tp.package);
+
+ build_db_->load (*p, p->constraints_section);
+
+ for (build_package_config& pc: p->configs)
+ {
+ for (const auto& tc: *target_conf_)
+ {
+ if (!exclude (pc, p->builds, p->constraints, tc))
+ build_configs.push_back (build_config {p, &pc, &tc});
+ }
+ }
}
- else
+
+ // If multiple build configurations are collected, then abort all
+ // the potential builds and continue iterating over the packages.
+ //
+ if (build_configs.size () > 1)
{
- // The package configuration is in the building state, and there
- // are no results.
- //
- // Note that in both cases we keep the status intact to be able
- // to compare it with the final one in the result request
- // handling in order to decide if to send the notification
- // email. The same is true for the forced flag (in the sense
- // that we don't set the force state to unforced).
+ // Abort the builds.
//
- // Load the section to assert the above statement.
+ for (build_config& c: build_configs)
+ {
+ shared_ptr<build_package>& p (c.p);
+ const string& pc (c.pc->name);
+ const build_target_config& tc (*c.tc);
+
+ build_id bid (p->id,
+ tc.target,
+ tc.name,
+ pc,
+ toolchain_name,
+ toolchain_version);
+
+ // Can there be any existing builds for such a tenant? Doesn't
+ // seem so, unless due to some manual intervention into the
+ // database. Anyway, let's just leave such a build alone.
+ //
+ shared_ptr<build> b (build_db_->find<build> (bid));
+
+ if (b == nullptr)
+ {
+ b = make_shared<build> (move (bid.package.tenant),
+ move (bid.package.name),
+ p->version,
+ move (bid.target),
+ move (bid.target_config_name),
+ move (bid.package_config_name),
+ move (bid.toolchain_name),
+ toolchain_version,
+ result_status::abort,
+ operation_results ({
+ operation_result {
+ "configure",
+ result_status::abort,
+ "error: multiple configurations "
+ "for interactive build\n"}}),
+ build_machine {
+ "brep", "build task module"});
+
+ build_db_->persist (b);
+
+ // Schedule the build notification email.
+ //
+ aborted_builds.push_back (aborted_build {
+ move (b), move (p), c.pc, "build"});
+ }
+ }
+
+ // Archive the tenant.
//
- build_db_->load (*b, b->results_section);
+ t->archived = true;
+ build_db_->update (t);
- assert (b->state == build_state::building &&
- b->results.empty ());
+ continue; // Skip the package.
+ }
+ }
+
+ // If true, then the package is (being) built for some
+ // configurations.
+ //
+ // Note that since we only query the built and forced rebuild
+ // objects there can be false negatives.
+ //
+ bool package_built (false);
+
+ build_db_->load (*p, p->bot_keys_section);
+
+ for (const build_package_config& pc: p->configs)
+ {
+ // If this is a custom bot, then skip this configuration if it
+ // doesn't contain this bot's public key in its custom bot keys
+ // list. Otherwise (this is a default bot), skip this
+ // configuration if its custom bot keys list is not empty.
+ //
+ {
+ const build_package_bot_keys& bks (
+ pc.effective_bot_keys (p->bot_keys));
+
+ if (custom_bot)
+ {
+ assert (agent_fp); // Wouldn't be here otherwise.
+
+ if (find_if (
+ bks.begin (), bks.end (),
+ [&agent_fp] (const lazy_shared_ptr<build_public_key>& k)
+ {
+ return k.object_id ().fingerprint == *agent_fp;
+ }) == bks.end ())
+ {
+ continue;
+ }
+ }
+ else
+ {
+ if (!bks.empty ())
+ continue;
+ }
+ }
+
+ pkg_config = pc.name;
+
+ // Iterate through the built configurations and erase them from the
+ // build configuration map. All those configurations that remained
+ // can be built. We will take the first one, if present.
+ //
+ // Also save the built configurations for which it's time to be
+ // rebuilt.
+ //
+ config_machines configs (conf_machines); // Make copy for this pkg.
+ auto pkg_builds (bld_prep_query.execute ());
+
+ if (!package_built && !pkg_builds.empty ())
+ package_built = true;
- b->state = build_state::building;
+ for (auto i (pkg_builds.begin ()); i != pkg_builds.end (); ++i)
+ {
+ auto j (
+ configs.find (build_target_config_id {
+ i->id.target, i->id.target_config_name}));
- // Switch the force state not to reissue the task after the
- // forced rebuild timeout. Note that the result handler will
- // still recognize that the rebuild was forced.
+ // Outdated configurations are already excluded with the
+ // database query.
//
- if (b->force == force_state::forcing)
- b->force = force_state::forced;
+ assert (j != configs.end ());
+ configs.erase (j);
- b->agent_fingerprint = move (agent_fp);
- b->agent_challenge = move (cl);
- b->machine = mh.name;
- b->machine_summary = move (mh.summary);
- b->target = cm.config->target;
- b->timestamp = system_clock::now ();
+ if (i->state == build_state::built)
+ {
+ assert (i->force != force_state::forcing);
- build_db_->update (b);
+ if (needs_rebuild (*i))
+ rebuilds.emplace_back (i.load ());
+ }
}
- // Finally, prepare the task response manifest.
- //
- // We iterate over buildable packages.
+ if (!configs.empty ())
+ {
+ // Find the first build configuration that is not excluded by
+ // the package configuration and for which all the requested
+ // auxiliary machines can be provided.
+ //
+ const config_machine* cm (nullptr);
+ optional<collect_auxiliaries_result> aux;
+
+ build_db_->load (*p, p->constraints_section);
+
+ for (auto i (configs.begin ()), e (configs.end ()); i != e; ++i)
+ {
+ cm = &i->second;
+ const build_target_config& tc (*cm->config);
+
+ if (!exclude (pc, p->builds, p->constraints, tc))
+ {
+ if (!p->auxiliaries_section.loaded ())
+ build_db_->load (*p, p->auxiliaries_section);
+
+ if ((aux = collect_auxiliaries (p, pc, tc)))
+ break;
+ }
+ }
+
+ if (aux)
+ {
+ machine_header_manifest& mh (*cm->machine);
+
+ build_id bid (move (id),
+ cm->config->target,
+ cm->config->name,
+ move (pkg_config),
+ move (toolchain_name),
+ toolchain_version);
+
+ shared_ptr<build> b (build_db_->find<build> (bid));
+ optional<string> cl (challenge ());
+
+ // Move the interactive build login information into the build
+ // object, if the package to be built interactively.
+ //
+ optional<string> login (bp.interactive
+ ? move (tqm.interactive_login)
+ : nullopt);
+
+ // If build configuration doesn't exist then create the new
+ // one and persist. Otherwise put it into the building state,
+ // refresh the timestamp and update.
+ //
+ if (b == nullptr)
+ {
+ b = make_shared<build> (move (bid.package.tenant),
+ move (bid.package.name),
+ p->version,
+ move (bid.target),
+ move (bid.target_config_name),
+ move (bid.package_config_name),
+ move (bid.toolchain_name),
+ move (toolchain_version),
+ move (login),
+ move (agent_fp),
+ move (cl),
+ build_machine {
+ mh.name, move (mh.summary)},
+ move (aux->build_auxiliary_machines),
+ controller_checksum (*cm->config),
+ machine_checksum (*cm->machine));
+
+ build_db_->persist (b);
+ }
+ else
+ {
+ // The build configuration is in the building or queued
+ // state.
+ //
+ // Note that in both the building and built cases we keep
+ // the status intact to be able to compare it with the final
+ // one in the result request handling in order to decide if
+ // to send the notification email or to revert it to the
+ // built state if interrupted. The same is true for the
+ // forced flag (in the sense that we don't set the force
+ // state to unforced).
+ //
+ assert (b->state != build_state::built);
+
+ initial_state = b->state;
+
+ b->state = build_state::building;
+ b->interactive = move (login);
+
+ unforced = (b->force == force_state::unforced);
+
+ // Switch the force state not to reissue the task after the
+ // forced rebuild timeout. Note that the result handler will
+ // still recognize that the rebuild was forced.
+ //
+ if (b->force == force_state::forcing)
+ {
+ b->force = force_state::forced;
+ rebuild_forced_build = true;
+ }
+
+ b->agent_fingerprint = move (agent_fp);
+ b->agent_challenge = move (cl);
+ b->machine = build_machine {mh.name, move (mh.summary)};
+
+ // Mark the section as loaded, so auxiliary_machines are
+ // updated.
+ //
+ b->auxiliary_machines_section.load ();
+
+ b->auxiliary_machines =
+ move (aux->build_auxiliary_machines);
+
+ string ccs (controller_checksum (*cm->config));
+ string mcs (machine_checksum (*cm->machine));
+
+ // Issue the hard rebuild if it is forced or the
+ // configuration or machine has changed.
+ //
+ if (b->hard_timestamp <= hard_rebuild_expiration ||
+ b->force == force_state::forced ||
+ b->controller_checksum != ccs ||
+ b->machine_checksum != mcs)
+ convert_to_hard (b);
+
+ b->controller_checksum = move (ccs);
+ b->machine_checksum = move (mcs);
+
+ b->timestamp = system_clock::now ();
+
+ build_db_->update (b);
+ }
+
+ if (t == nullptr)
+ t = build_db_->load<build_tenant> (b->tenant);
+
+ // Archive an interactive tenant.
+ //
+ if (bp.interactive)
+ {
+ t->archived = true;
+ build_db_->update (t);
+ }
+
+ // Finally, stash the service notification information, if
+ // present, and prepare the task response manifest.
+ //
+ if (t->service)
+ {
+ auto i (tenant_service_map_.find (t->service->type));
+
+ if (i != tenant_service_map_.end ())
+ {
+ const tenant_service_base* s (i->second.get ());
+
+ tsb = dynamic_cast<const tenant_service_build_building*> (s);
+ tsq = dynamic_cast<const tenant_service_build_queued*> (s);
+
+ if (tsq != nullptr)
+ {
+ qbs = queue_builds (*p, *b);
+
+ // If we ought to call the
+ // tenant_service_build_queued::build_queued() callback,
+ // then also set the package tenant's queued timestamp
+ // to the current time to prevent the notifications race
+ // (see tenant::queued_timestamp for details).
+ //
+ if (!qbs.empty () ||
+ !initial_state ||
+ (*initial_state != build_state::queued &&
+ !rebuild_forced_build))
+ {
+ qhs = queue_hints (*p);
+
+ t->queued_timestamp = system_clock::now ();
+ build_db_->update (t);
+ }
+ }
+
+ if (tsb != nullptr || tsq != nullptr)
+ tss = make_pair (*t->service, b);
+ }
+ }
+
+ task_response = task (*b,
+ *p,
+ pc,
+ move (aux->tests),
+ move (aux->task_auxiliary_machines),
+ move (bp.interactive),
+ *cm);
+
+ task_build = move (b);
+ task_package = move (p);
+ task_config = &pc;
+
+ package_built = true;
+
+ break; // Bail out from the package configurations loop.
+ }
+ }
+ }
+
+ // If the task manifest is prepared, then bail out from the package
+ // loop, commit the transaction and respond. Otherwise, stash the
+ // build toolchain into the tenant, unless it is already stashed or
+ // the current package already has some configurations (being)
+ // built.
+ //
+ if (!task_response.task)
+ {
+ // Note that since there can be false negatives for the
+ // package_built flag (see above), there can be redundant tenant
+ // queries which, however, seems harmless (query uses the primary
+ // key and the object memory footprint is small).
//
- assert (p->internal_repository != nullptr);
+ if (!package_built)
+ {
+ if (t == nullptr)
+ t = build_db_->load<build_tenant> (p->id.tenant);
- p->internal_repository.load ();
+ if (!t->toolchain)
+ {
+ t->toolchain = build_toolchain {toolchain_name,
+ toolchain_version};
- tsm = task (move (b), move (p), cm);
+ build_db_->update (t);
+ }
+ }
}
+ else
+ break;
}
- // If the task response manifest is prepared, then bail out from the
- // package loop, commit the transaction and respond.
- //
- if (!tsm.session.empty ())
- break;
+ tr.commit ();
}
- t.commit ();
- }
-
- // If we don't have an unbuilt package, then let's see if we have a
- // package to rebuild.
- //
- if (tsm.session.empty () && !rebuilds.empty ())
- {
- // Sort the package configuration rebuild list with the following sort
- // priority:
+ // If we don't have an unbuilt package, then let's see if we have a
+ // build configuration to rebuild.
//
- // 1: force state
- // 2: overall status
- // 3: timestamp (less is preferred)
+ if (!task_response.task && !rebuilds.empty ())
+ {
+ // Sort the configuration rebuild list with the following sort
+ // priority:
+ //
+ // 1: force state
+ // 2: overall status
+ // 3: timestamp (less is preferred)
+ //
+ auto cmp = [] (const shared_ptr<build>& x, const shared_ptr<build>& y)
+ {
+ if (x->force != y->force)
+ return x->force > y->force; // Forced goes first.
+
+ assert (x->status && y->status); // Both built.
+
+ if (x->status != y->status)
+ return x->status > y->status; // Larger status goes first.
+
+ // Older build completion goes first.
+ //
+ // Note that a completed build can have the state change timestamp
+ // (timestamp member) newer than the completion timestamp
+ // (soft_timestamp member) if the build was interrupted.
+ //
+ return x->soft_timestamp < y->soft_timestamp;
+ };
+
+ sort (rebuilds.begin (), rebuilds.end (), cmp);
+
+ optional<string> cl (challenge ());
+
+ // Pick the first build configuration from the ordered list.
+ //
+ // Note that the configurations and packages may not match the
+ // required criteria anymore (as we have committed the database
+ // transactions that were used to collect this data) so we recheck. If
+ // we find one that matches then put it into the building state,
+ // refresh the timestamp and update. Note that we don't amend the
+ // status and the force state to have them available in the result
+ // request handling (see above).
+ //
+ for (auto& b: rebuilds)
+ {
+ try
+ {
+ transaction t (conn->begin ());
+
+ b = build_db_->find<build> (b->id);
+
+ if (b != nullptr &&
+ b->state == build_state::built &&
+ needs_rebuild (*b))
+ {
+ auto i (conf_machines.find (
+ build_target_config_id {
+ b->target, b->target_config_name}));
+
+ // Only actual package configurations are loaded (see above).
+ //
+ assert (i != conf_machines.end ());
+ const config_machine& cm (i->second);
+
+ // Rebuild the package configuration if still present, is
+ // buildable, doesn't exclude the target configuration, can be
+ // provided with all the requested auxiliary machines, and
+ // matches the request's interactive mode.
+ //
+ // Note that while change of the latter seems rather far fetched,
+ // let's check it for good measure.
+ //
+ shared_ptr<build_package> p (
+ build_db_->find<build_package> (b->id.package));
+
+ shared_ptr<build_tenant> t (
+ p != nullptr
+ ? build_db_->load<build_tenant> (p->id.tenant)
+ : nullptr);
+
+ build_package_config* pc (p != nullptr
+ ? find (b->package_config_name,
+ p->configs)
+ : nullptr);
+
+ if (pc != nullptr &&
+ p->buildable &&
+ (imode == interactive_mode::both ||
+ (t->interactive.has_value () ==
+ (imode == interactive_mode::true_))))
+ {
+ const build_target_config& tc (*cm.config);
+
+ build_db_->load (*p, p->constraints_section);
+
+ if (exclude (*pc, p->builds, p->constraints, tc))
+ continue;
+
+ build_db_->load (*p, p->auxiliaries_section);
+
+ if (optional<collect_auxiliaries_result> aux =
+ collect_auxiliaries (p, *pc, tc))
+ {
+ assert (b->status);
+
+ initial_state = build_state::built;
+
+ rebuild_interrupted_rebuild =
+ (b->timestamp > b->soft_timestamp);
+
+ b->state = build_state::building;
+
+ // Save the interactive build login information into the
+ // build object, if the package to be built interactively.
+ //
+ // Can't move from, as may need it on the next iteration.
+ //
+ b->interactive = t->interactive
+ ? tqm.interactive_login
+ : nullopt;
+
+ unforced = (b->force == force_state::unforced);
+
+ // Can't move from, as may need them on the next iteration.
+ //
+ b->agent_fingerprint = agent_fp;
+ b->agent_challenge = cl;
+
+ const machine_header_manifest& mh (*cm.machine);
+ b->machine = build_machine {mh.name, mh.summary};
+
+ // Mark the section as loaded, so auxiliary_machines are
+ // updated.
+ //
+ b->auxiliary_machines_section.load ();
+
+ b->auxiliary_machines =
+ move (aux->build_auxiliary_machines);
+
+ // Issue the hard rebuild if the timeout expired, rebuild is
+ // forced, or the configuration or machine has changed.
+ //
+ // Note that we never reset the build status (see above for
+ // the reasoning).
+ //
+ string ccs (controller_checksum (*cm.config));
+ string mcs (machine_checksum (*cm.machine));
+
+ if (b->hard_timestamp <= hard_rebuild_expiration ||
+ b->force == force_state::forced ||
+ b->controller_checksum != ccs ||
+ b->machine_checksum != mcs)
+ convert_to_hard (b);
+
+ b->controller_checksum = move (ccs);
+ b->machine_checksum = move (mcs);
+
+ b->timestamp = system_clock::now ();
+
+ build_db_->update (b);
+
+ // Stash the service notification information, if present,
+ // and prepare the task response manifest.
+ //
+ if (t->service)
+ {
+ auto i (tenant_service_map_.find (t->service->type));
+
+ if (i != tenant_service_map_.end ())
+ {
+ const tenant_service_base* s (i->second.get ());
+
+ tsb = dynamic_cast<const tenant_service_build_building*> (s);
+ tsq = dynamic_cast<const tenant_service_build_queued*> (s);
+
+ if (tsq != nullptr)
+ {
+ qbs = queue_builds (*p, *b);
+
+ // If we ought to call the
+ // tenant_service_build_queued::build_queued()
+ // callback, then also set the package tenant's queued
+ // timestamp to the current time to prevent the
+ // notifications race (see tenant::queued_timestamp
+ // for details).
+ //
+ if (!qbs.empty () || !rebuild_interrupted_rebuild)
+ {
+ qhs = queue_hints (*p);
+
+ t->queued_timestamp = system_clock::now ();
+ build_db_->update (t);
+ }
+ }
+
+ if (tsb != nullptr || tsq != nullptr)
+ tss = make_pair (move (*t->service), b);
+ }
+ }
+
+ task_response = task (*b,
+ *p,
+ *pc,
+ move (aux->tests),
+ move (aux->task_auxiliary_machines),
+ move (t->interactive),
+ cm);
+
+ task_build = move (b);
+ task_package = move (p);
+ task_config = pc;
+ }
+ }
+ }
+
+ t.commit ();
+ }
+ catch (const odb::deadlock&)
+ {
+ // Just try with the next rebuild. But first, reset the task
+ // manifest and the session that we may have prepared.
+ //
+ task_response = task_response_manifest ();
+ }
+
+ // If the task manifest is prepared, then bail out from the package
+ // configuration rebuilds loop and respond.
+ //
+ if (task_response.task)
+ break;
+ }
+ }
+
+ // If the tenant-associated third-party service needs to be notified
+ // about the queued builds, then call the
+ // tenant_service_build_queued::build_queued() callback function and
+ // update the service state, if requested.
//
- auto cmp = [] (const shared_ptr<build>& x, const shared_ptr<build>& y)
+ if (tsq != nullptr)
{
- if (x->force != y->force)
- return x->force > y->force; // Forced goes first.
+ assert (tss); // Wouldn't be here otherwise.
- assert (x->status && y->status); // Both built.
+ const tenant_service& ss (tss->first);
- if (x->status != y->status)
- return x->status > y->status; // Larger status goes first.
+ // If the task build has no initial state (is just created), then
+ // temporarily move it into the list of the queued builds until the
+ // `queued` notification is delivered. Afterwards, restore it so that
+ // the `building` notification can also be sent.
+ //
+ build& b (*tss->second);
+ bool restore_build (false);
- return x->timestamp < y->timestamp; // Older goes first.
- };
+ if (!initial_state)
+ {
+ qbs.push_back (move (b));
+ restore_build = true;
+ }
+
+ if (!qbs.empty ())
+ {
+ if (auto f = tsq->build_queued (ss,
+ qbs,
+ nullopt /* initial_state */,
+ qhs,
+ log_writer_))
+ update_tenant_service_state (conn, qbs.back ().tenant, f);
+ }
- sort (rebuilds.begin (), rebuilds.end (), cmp);
+ // Send the `queued` notification for the task build, unless it is
+ // already sent, and update the service state, if requested.
+ //
+ if (initial_state &&
+ *initial_state != build_state::queued &&
+ !rebuild_interrupted_rebuild &&
+ !rebuild_forced_build)
+ {
+ qbs.clear ();
+ qbs.push_back (move (b));
+ restore_build = true;
+
+ if (auto f = tsq->build_queued (ss,
+ qbs,
+ initial_state,
+ qhs,
+ log_writer_))
+ update_tenant_service_state (conn, qbs.back ().tenant, f);
+ }
- optional<string> cl (challenge ());
+ if (restore_build)
+ b = move (qbs.back ());
+ }
- // Pick the first package configuration from the ordered list.
+ // If a third-party service needs to be notified about the package
+ // build, then call the tenant_service_build_built::build_building()
+ // callback function and, if requested, update the tenant-associated
+ // service state.
//
- // Note that the configurations and packages may not match the required
- // criteria anymore (as we have committed the database transactions that
- // were used to collect this data) so we recheck. If we find one that
- // matches then put it into the building state, refresh the timestamp and
- // update. Note that we don't amend the status and the force state to
- // have them available in the result request handling (see above).
+ if (tsb != nullptr)
+ {
+ assert (tss); // Wouldn't be here otherwise.
+
+ const tenant_service& ss (tss->first);
+ const build& b (*tss->second);
+
+ if (auto f = tsb->build_building (ss, b, log_writer_))
+ update_tenant_service_state (conn, b.tenant, f);
+ }
+
+ // If the task manifest is prepared, then check that the number of the
+ // build auxiliary machines is less than 10. If that's not the case,
+ // then turn the build into the built state with the abort status.
//
- for (auto& b: rebuilds)
+ if (task_response.task &&
+ task_response.task->auxiliary_machines.size () > 9)
{
- try
+ // Respond with the no-task manifest.
+ //
+ task_response = task_response_manifest ();
+
+ // If the package tenant has a third-party service state associated
+ // with it, then check if the tenant_service_build_built callback is
+ // registered for the type of the associated service. If it is, then
+ // stash the state, the build object, and the callback pointer for the
+ // subsequent service `built` notification.
+ //
+ const tenant_service_build_built* tsb (nullptr);
+ optional<pair<tenant_service, shared_ptr<build>>> tss;
{
- transaction t (build_db_->begin ());
+ transaction t (conn->begin ());
- b = build_db_->find<build> (b->id);
+ shared_ptr<build> b (build_db_->find<build> (task_build->id));
- if (b != nullptr && b->state == build_state::built &&
- b->timestamp <= (b->force == force_state::forced
- ? forced_rebuild_expiration
- : normal_rebuild_expiration))
+ // For good measure, check that the build object is in the building
+ // state and has not been updated.
+ //
+ if (b->state == build_state::building &&
+ b->timestamp == task_build->timestamp)
{
- auto i (cfg_machines.find (b->id.configuration.c_str ()));
+ b->state = build_state::built;
+ b->status = result_status::abort;
+ b->force = force_state::unforced;
- // Only actual package configurations are loaded (see above).
+ // Cleanup the interactive build login information.
//
- assert (i != cfg_machines.end ());
- const config_machine& cm (i->second);
+ b->interactive = nullopt;
- // Rebuild the package if still present, is buildable and doesn't
- // exclude the configuration.
+ // Cleanup the authentication data.
//
- shared_ptr<build_package> p (
- build_db_->find<build_package> (b->id.package));
+ b->agent_fingerprint = nullopt;
+ b->agent_challenge = nullopt;
- if (p != nullptr &&
- p->internal_repository != nullptr &&
- !exclude (p->builds, p->constraints, *cm.config))
- {
- assert (b->status);
-
- b->state = build_state::building;
-
- // Can't move from, as may need them on the next iteration.
- //
- b->agent_fingerprint = agent_fp;
- b->agent_challenge = cl;
+ b->timestamp = system_clock::now ();
+ b->soft_timestamp = b->timestamp;
+ b->hard_timestamp = b->soft_timestamp;
- const machine_header_manifest& mh (*cm.machine);
- b->machine = mh.name;
- b->machine_summary = mh.summary;
-
- b->target = cm.config->target;
+ // Mark the section as loaded, so results are updated.
+ //
+ b->results_section.load ();
- // Mark the section as loaded, so results are updated.
- //
- b->results_section.load ();
- b->results.clear ();
+ b->results = operation_results ({
+ operation_result {
+ "configure",
+ result_status::abort,
+ "error: not more than 9 auxiliary machines are allowed"}});
- b->timestamp = system_clock::now ();
+ b->agent_checksum = nullopt;
+ b->worker_checksum = nullopt;
+ b->dependency_checksum = nullopt;
- build_db_->update (b);
+ build_db_->update (b);
- p->internal_repository.load ();
+ // Schedule the `built` notification, if the
+ // tenant_service_build_built callback is registered for the
+ // tenant.
+ //
+ shared_ptr<build_tenant> t (
+ build_db_->load<build_tenant> (b->tenant));
- tsm = task (move (b), move (p), cm);
+ if (t->service)
+ {
+ auto i (tenant_service_map_.find (t->service->type));
+
+ if (i != tenant_service_map_.end ())
+ {
+ tsb = dynamic_cast<const tenant_service_build_built*> (
+ i->second.get ());
+
+ // If required, stash the service notification information.
+ //
+ if (tsb != nullptr)
+ tss = make_pair (move (*t->service), b);
+ }
}
+
+ // Schedule the build notification email.
+ //
+ aborted_builds.push_back (
+ aborted_build {move (b),
+ move (task_package),
+ task_config,
+ unforced ? "build" : "rebuild"});
}
t.commit ();
}
- catch (const odb::deadlock&) {} // Just try with the next rebuild.
- // If the task response manifest is prepared, then bail out from the
- // package configuration rebuilds loop and respond.
+ // If a third-party service needs to be notified about the built
+ // package, then call the tenant_service_build_built::build_built()
+ // callback function and update the service state, if requested.
//
- if (!tsm.session.empty ())
- break;
+ if (tsb != nullptr)
+ {
+ assert (tss); // Wouldn't be here otherwise.
+
+ const tenant_service& ss (tss->first);
+ const build& b (*tss->second);
+
+ if (auto f = tsb->build_built (ss, b, log_writer_))
+ update_tenant_service_state (conn, b.tenant, f);
+ }
}
+
+ // Send notification emails for all the aborted builds.
+ //
+ for (const aborted_build& ab: aborted_builds)
+ send_notification_email (*options_,
+ conn,
+ *ab.b,
+ *ab.p,
+ *ab.pc,
+ ab.what,
+ error,
+ verb_ >= 2 ? &trace : nullptr);
}
}
- // @@ Probably it would be a good idea to also send some cache control
- // headers to avoid caching by HTTP proxies. That would require extension
- // of the web::response interface.
- //
-
- manifest_serializer s (rs.content (200, "text/manifest;charset=utf-8"),
- "task_response_manifest");
- tsm.serialize (s);
-
+ serialize_task_response_manifest ();
return true;
}
diff --git a/mod/mod-build-task.hxx b/mod/mod-build-task.hxx
index 3721363..d0b3d44 100644
--- a/mod/mod-build-task.hxx
+++ b/mod/mod-build-task.hxx
@@ -1,5 +1,4 @@
// file : mod/mod-build-task.hxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
#ifndef MOD_MOD_BUILD_TASK_HXX
@@ -8,7 +7,8 @@
#include <libbrep/types.hxx>
#include <libbrep/utility.hxx>
-#include <mod/options.hxx>
+#include <mod/module-options.hxx>
+#include <mod/tenant-service.hxx>
#include <mod/database-module.hxx>
#include <mod/build-config-module.hxx>
@@ -17,13 +17,13 @@ namespace brep
class build_task: public database_module, private build_config_module
{
public:
- build_task () = default;
+ explicit
+ build_task (const tenant_service_map&);
// Create a shallow copy (handling instance) if initialized and a deep
// copy (context exemplar) otherwise.
//
- explicit
- build_task (const build_task&);
+ build_task (const build_task&, const tenant_service_map&);
virtual bool
handle (request&, response&);
@@ -37,6 +37,7 @@ namespace brep
private:
shared_ptr<options::build_task> options_;
+ const tenant_service_map& tenant_service_map_;
};
}
diff --git a/mod/mod-builds.cxx b/mod/mod-builds.cxx
index e749461..30562f3 100644
--- a/mod/mod-builds.cxx
+++ b/mod/mod-builds.cxx
@@ -1,25 +1,25 @@
// file : mod/mod-builds.cxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
#include <mod/mod-builds.hxx>
#include <set>
-#include <algorithm> // find_if()
#include <libstudxml/serializer.hxx>
#include <odb/database.hxx>
#include <odb/transaction.hxx>
-#include <libbutl/timestamp.mxx> // to_string()
-#include <libbutl/path-pattern.mxx>
+#include <libbutl/utility.hxx> // compare_c_string
+#include <libbutl/timestamp.hxx> // to_string()
+#include <libbutl/path-pattern.hxx>
#include <libbbot/manifest.hxx> // to_result_status(), to_string(result_status)
-#include <web/xhtml.hxx>
-#include <web/module.hxx>
-#include <web/mime-url-encoding.hxx>
+#include <web/server/module.hxx>
+#include <web/server/mime-url-encoding.hxx>
+
+#include <web/xhtml/serialization.hxx>
#include <libbrep/build.hxx>
#include <libbrep/build-odb.hxx>
@@ -27,11 +27,10 @@
#include <libbrep/build-package-odb.hxx>
#include <mod/page.hxx>
-#include <mod/options.hxx>
+#include <mod/module-options.hxx>
using namespace std;
using namespace butl;
-using namespace bbot;
using namespace web;
using namespace odb::core;
using namespace brep::cli;
@@ -110,7 +109,11 @@ transform (const string& pattern)
case '|':
case '+':
case '{':
- case '(': r += '\\'; break;
+ case '}':
+ case '(':
+ case ')':
+ case '[':
+ case ']': r += '\\'; break;
}
r += c;
@@ -129,28 +132,32 @@ match (const C qc, const string& pattern)
return qc + "SIMILAR TO" + query<T>::_val (transform (pattern));
}
+// If tenant is absent, then query builds from all the public tenants.
+//
template <typename T>
static inline query<T>
-build_query (const brep::cstrings* configs,
+build_query (const brep::vector<brep::build_target_config_id>* config_ids,
const brep::params::builds& params,
- const brep::optional<brep::string>& tenant,
- const brep::optional<bool>& archived)
+ const brep::optional<brep::string>& tenant)
{
using namespace brep;
using query = query<T>;
using qb = typename query::build;
-
- query q (configs != nullptr
- ? qb::id.configuration.in_range (configs->begin (), configs->end ())
- : query (true));
+ using qt = typename query::build_tenant;
const auto& pid (qb::id.package);
- if (tenant)
- q = q && pid.tenant == *tenant;
+ query q (tenant ? pid.tenant == *tenant : !qt::private_);
+
+ if (config_ids != nullptr)
+ {
+ query sq (false);
+ for (const auto& id: *config_ids)
+ sq = sq || (qb::id.target == id.target &&
+ qb::id.target_config_name == id.config);
- if (archived)
- q = q && query::build_tenant::archived == *archived;
+ q = q && sq;
+ }
// Note that there is no error reported if the filter parameters parsing
// fails. Instead, it is considered that no package builds match such a
@@ -169,7 +176,7 @@ build_query (const brep::cstrings* configs,
{
// May throw invalid_argument.
//
- version v (params.version (), false /* fold_zero_revision */);
+ version v (params.version (), version::none);
q = q && compare_version_eq (pid.version,
canonical_version (v),
@@ -178,11 +185,11 @@ build_query (const brep::cstrings* configs,
// Build toolchain name/version.
//
- const string& tc (params.toolchain ());
+ const string& th (params.toolchain ());
- if (tc != "*")
+ if (th != "*")
{
- size_t p (tc.find ('-'));
+ size_t p (th.find ('-'));
if (p == string::npos) // Invalid format.
throw invalid_argument ("");
@@ -190,8 +197,8 @@ build_query (const brep::cstrings* configs,
// the exact version revision, so an absent and zero revisions have the
// same semantics and the zero revision is folded.
//
- string tn (tc, 0, p);
- version tv (string (tc, p + 1)); // May throw invalid_argument.
+ string tn (th, 0, p);
+ version tv (string (th, p + 1)); // May throw invalid_argument.
q = q &&
qb::id.toolchain_name == tn &&
@@ -200,35 +207,44 @@ build_query (const brep::cstrings* configs,
true /* revision */);
}
- // Build configuration name.
+ // Build target.
//
- if (!params.configuration ().empty ())
- q = q && match<T> (qb::id.configuration, params.configuration ());
+ if (!params.target ().empty ())
+ q = q && match<T> (qb::id.target, params.target ());
- // Build machine name.
+ // Build target configuration name.
//
- if (!params.machine ().empty ())
- q = q && match<T> (qb::machine, params.machine ());
+ if (!params.target_config ().empty ())
+ q = q && match<T> (qb::id.target_config_name, params.target_config ());
- // Build target.
+ // Build package configuration name.
//
- if (!params.target ().empty ())
- q = q && match<T> (qb::target, params.target ());
+ if (!params.package_config ().empty ())
+ q = q && match<T> (qb::id.package_config_name, params.package_config ());
// Build result.
//
const string& rs (params.result ());
+ bool add_state (true);
if (rs != "*")
{
if (rs == "pending")
+ {
q = q && qb::force != "unforced";
+ }
else if (rs == "building")
+ {
q = q && qb::state == "building";
+ add_state = false;
+ }
else
{
query sq (qb::status == rs);
- result_status st (to_result_status(rs)); // May throw invalid_argument.
+
+ // May throw invalid_argument.
+ //
+ result_status st (bbot::to_result_status (rs));
if (st != result_status::success)
{
@@ -249,8 +265,12 @@ build_query (const brep::cstrings* configs,
// well (rebuild).
//
q = q && qb::state == "built" && sq;
+ add_state = false;
}
}
+
+ if (add_state)
+ q = q && qb::state != "queued";
}
catch (const invalid_argument&)
{
@@ -260,23 +280,19 @@ build_query (const brep::cstrings* configs,
return q;
}
+// If tenant is absent, then query packages from all the public tenants.
+//
template <typename T>
static inline query<T>
package_query (const brep::params::builds& params,
- const brep::optional<brep::string>& tenant,
- const brep::optional<bool>& archived)
+ const brep::optional<brep::string>& tenant)
{
using namespace brep;
using query = query<T>;
using qp = typename query::build_package;
+ using qt = typename query::build_tenant;
- query q (true);
-
- if (tenant)
- q = q && qp::id.tenant == *tenant;
-
- if (archived)
- q = q && query::build_tenant::archived == *archived;
+ query q (tenant ? qp::id.tenant == *tenant : !qt::private_);
// Note that there is no error reported if the filter parameters parsing
// fails. Instead, it is considered that no packages match such a query.
@@ -294,7 +310,7 @@ package_query (const brep::params::builds& params,
{
// May throw invalid_argument.
//
- version v (params.version (), false /* fold_zero_revision */);
+ version v (params.version (), version::none);
q = q && compare_version_eq (qp::id.version,
canonical_version (v),
@@ -309,22 +325,6 @@ package_query (const brep::params::builds& params,
return q;
}
-template <typename T, typename ID>
-static inline query<T>
-package_id_eq (const ID& x, const brep::package_id& y)
-{
- using query = query<T>;
- const auto& qv (x.version);
-
- return
- x.tenant == query::_ref (y.tenant) &&
- x.name == query::_ref (y.name) &&
- qv.epoch == query::_ref (y.version.epoch) &&
- qv.canonical_upstream == query::_ref (y.version.canonical_upstream) &&
- qv.canonical_release == query::_ref (y.version.canonical_release) &&
- qv.revision == query::_ref (y.version.revision);
-}
-
static const vector<pair<string, string>> build_results ({
{"unbuilt", "<unbuilt>"},
{"*", "*"},
@@ -364,11 +364,6 @@ handle (request& rq, response& rs)
throw invalid_request (400, e.what ());
}
- // Override the name parameter for the old URL (see options.cli for details).
- //
- if (params.name_legacy_specified ())
- params.name (params.name_legacy ());
-
const char* title ("Builds");
xml::serializer s (rs.content (), title);
@@ -392,14 +387,17 @@ handle (request& rq, response& rs)
<< DIV(ID="content");
// If the tenant is empty then we are in the global view and will display
- // builds from all the tenants.
+ // builds from all the public tenants.
//
optional<string> tn;
if (!tenant.empty ())
tn = tenant;
- // Return the list of distinct toolchain name/version pairs. The build db
- // transaction must be started.
+ // Return the list of distinct toolchain name/version pairs. If no builds
+ // are present for the tenant, then fallback to the toolchain recorded in
+ // the tenant object, if present.
+ //
+ // Note: the build db transaction must be started.
//
using toolchains = vector<pair<string, version>>;
@@ -415,11 +413,19 @@ handle (request& rq, response& rs)
false /* first */)))
r.emplace_back (move (t.name), move (t.version));
+ if (r.empty ())
+ {
+ shared_ptr<build_tenant> t (build_db_->find<build_tenant> (tenant));
+
+ if (t != nullptr && t->toolchain)
+ r.emplace_back (t->toolchain->name, t->toolchain->version);
+ }
+
return r;
};
auto print_form = [&s, &params, this] (const toolchains& toolchains,
- size_t build_count)
+ optional<size_t> build_count)
{
// Print the package builds filter form on the first page only.
//
@@ -430,16 +436,16 @@ handle (request& rq, response& rs)
// the selected toolchain is still present in the database. Otherwise
// fallback to the * wildcard selection.
//
- string ctc ("*");
+ string cth ("*");
vector<pair<string, string>> toolchain_opts ({{"*", "*"}});
{
for (const auto& t: toolchains)
{
- string tc (t.first + '-' + t.second.string ());
- toolchain_opts.emplace_back (tc, tc);
+ string th (t.first + '-' + t.second.string ());
+ toolchain_opts.emplace_back (th, th);
- if (tc == params.toolchain ())
- ctc = move (tc);
+ if (th == params.toolchain ())
+ cth = move (th);
}
}
@@ -455,28 +461,42 @@ handle (request& rq, response& rs)
<< TBODY
<< TR_INPUT ("name", "builds", params.name (), "*", true)
<< TR_INPUT ("version", "pv", params.version (), "*")
- << TR_SELECT ("toolchain", "tc", ctc, toolchain_opts)
+ << TR_SELECT ("toolchain", "th", cth, toolchain_opts)
+ << TR_INPUT ("target", "tg", params.target (), "*")
- << TR(CLASS="config")
- << TH << "config" << ~TH
+ << TR(CLASS="tgt-config")
+ << TH << "tgt config" << ~TH
<< TD
<< *INPUT(TYPE="text",
- NAME="cf",
- VALUE=params.configuration (),
+ NAME="tc",
+ VALUE=params.target_config (),
PLACEHOLDER="*",
- LIST="configs")
- << DATALIST(ID="configs")
+ LIST="target-configs")
+ << DATALIST(ID="target-configs")
<< *OPTION(VALUE="*");
- for (const auto& c: *build_conf_names_)
- s << *OPTION(VALUE=c);
+ // Print unique config names from the target config map.
+ //
+ set<const char*, butl::compare_c_string> conf_names;
+ for (const auto& c: *target_conf_map_)
+ {
+ if (conf_names.insert (c.first.config.get ().c_str ()).second)
+ s << *OPTION(VALUE=c.first.config.get ());
+ }
s << ~DATALIST
<< ~TD
<< ~TR
- << TR_INPUT ("machine", "mn", params.machine (), "*")
- << TR_INPUT ("target", "tg", params.target (), "*")
+ << TR(CLASS="pkg-config")
+ << TH << "pkg config" << ~TH
+ << TD
+ << *INPUT(TYPE="text",
+ NAME="pc",
+ VALUE=params.package_config (),
+ PLACEHOLDER="*")
+ << ~TD
+ << ~TR
<< TR_SELECT ("result", "rs", params.result (), build_results)
<< ~TBODY
<< ~TABLE
@@ -498,26 +518,25 @@ handle (request& rq, response& rs)
s << DIV_COUNTER (build_count, "Build", "Builds");
};
+ const string& tgt (params.target ());
+ const string& tgt_cfg (params.target_config ());
+ const string& pkg_cfg (params.package_config ());
+
// We will not display hidden configurations, unless the configuration is
// specified explicitly.
//
- bool exclude_hidden (params.configuration ().empty () ||
- path_pattern (params.configuration ()));
+ bool exclude_hidden (tgt_cfg.empty () || path_pattern (tgt_cfg));
- cstrings conf_names;
+ vector<build_target_config_id> conf_ids;
+ conf_ids.reserve (target_conf_map_->size ());
- if (exclude_hidden)
+ for (const auto& c: *target_conf_map_)
{
- for (const auto& c: *build_conf_map_)
- {
- if (belongs (*c.second, "all"))
- conf_names.push_back (c.first);
- }
+ if (!exclude_hidden || !belongs (*c.second, "hidden"))
+ conf_ids.push_back (c.first);
}
- else
- conf_names = *build_conf_names_;
- size_t count;
+ optional<size_t> count;
size_t page (params.page ());
if (params.result () != "unbuilt") // Print package build configurations.
@@ -532,37 +551,22 @@ handle (request& rq, response& rs)
// printing the builds.
//
count = 0;
- vector<shared_ptr<build>> builds;
+ vector<package_build> builds;
builds.reserve (page_configs);
- // Prepare the package build prepared query.
+ // Prepare the package build query.
//
using query = query<package_build>;
- using prep_query = prepared_query<package_build>;
- query q (build_query<package_build> (
- &conf_names, params, tn, nullopt /* archived */));
-
- // Specify the portion. Note that we will be querying builds in chunks,
- // not to hold locks for too long.
- //
- // Also note that for each build we also load the corresponding
- // package. Nevertheless, we use a fairly large portion to speed-up the
- // builds traversal but also cache the package objects (see below).
- //
- size_t offset (0);
+ query q (build_query<package_build> (&conf_ids, params, tn));
// Print package build configurations ordered by the timestamp (later goes
// first).
//
- q += "ORDER BY" + query::build::timestamp + "DESC" +
- "OFFSET" + query::_ref (offset) + "LIMIT 500";
+ q += "ORDER BY" + query::build::timestamp + "DESC";
connection_ptr conn (build_db_->connection ());
- prep_query pq (
- conn->prepare_query<package_build> ("mod-builds-query", q));
-
// Note that we can't skip the proper number of builds in the database
// query for a page numbers greater than one. So we will query builds from
// the very beginning and skip the appropriate number of them while
@@ -578,81 +582,101 @@ handle (request& rq, response& rs)
//
session sn;
- for (bool ne (true); ne; )
+ transaction t (conn->begin ());
+
+ // For some reason PostgreSQL (as of 9.4) picks the nested loop join
+ // strategy for the below package_build query, which executes quite slow
+ // even for reasonably small number of builds. Thus, we just discourage
+ // PostgreSQL from using this strategy in the current transaction.
+ //
+ // @@ TMP Re-check for the later PostgreSQL versions if we can drop this
+ // hint. If drop, then also grep for other places where this hint
+ // is used.
+ //
+ conn->execute ("SET LOCAL enable_nestloop=off");
+
+ // Iterate over builds and cache build objects that should be printed.
+ // Skip the appropriate number of them (for page number greater than
+ // one).
+ //
+ for (auto& pb: build_db_->query<package_build> (q))
{
- transaction t (conn->begin ());
+ shared_ptr<build>& b (pb.build);
+
+ auto i (
+ target_conf_map_->find (
+ build_target_config_id {b->target, b->target_config_name}));
- // Query package builds (and cache the result).
+ assert (i != target_conf_map_->end ());
+
+ // Match the target configuration against the package build
+ // configuration expressions/constraints.
//
- auto bs (pq.execute ());
+ shared_ptr<build_package> p (
+ build_db_->load<build_package> (b->id.package));
+
+ const build_package_config* pc (find (b->package_config_name,
+ p->configs));
- if ((ne = !bs.empty ()))
+ // The package configuration should be present since the configurations
+ // set cannot change if the package version doesn't change. If that's
+ // not the case, then the database has probably been manually amended.
+ // In this case let's just skip such a build as if it excluded and log
+ // the warning.
+ //
+ if (pc == nullptr)
{
- offset += bs.size ();
+ warn << "cannot find configuration '" << b->package_config_name
+ << "' for package " << p->id.name << '/' << p->version;
- // Iterate over builds and cache build objects that should be printed.
- // Skip the appropriate number of them (for page number greater than
- // one).
- //
- for (auto& pb: bs)
- {
- shared_ptr<build>& b (pb.build);
+ continue;
+ }
- auto i (build_conf_map_->find (b->configuration.c_str ()));
- assert (i != build_conf_map_->end ());
+ if (!p->constraints_section.loaded ())
+ build_db_->load (*p, p->constraints_section);
- // Match the configuration against the package build
- // expressions/constraints.
+ if (!exclude (*pc, p->builds, p->constraints, *i->second))
+ {
+ if (skip != 0)
+ --skip;
+ else if (print != 0)
+ {
+ // As we query builds in multiple transactions we may see the same
+ // build multiple times. Let's skip the duplicates. Note: we don't
+ // increment the counter in this case.
//
- shared_ptr<build_package> p (
- build_db_->load<build_package> (b->id.package));
-
- if (!exclude (p->builds, p->constraints, *i->second))
+ if (find_if (builds.begin (), builds.end (),
+ [&b] (const package_build& pb)
+ {
+ return b->id == pb.build->id;
+ }) != builds.end ())
+ continue;
+
+ if (b->state == build_state::built)
{
- if (skip != 0)
- --skip;
- else if (print != 0)
- {
- // As we query builds in multiple transactions we may see the
- // same build multiple times. Let's skip the duplicates. Note:
- // we don't increment the counter in this case.
- //
- if (find_if (builds.begin (),
- builds.end (),
- [&b] (const shared_ptr<build>& pb)
- {
- return b->id == pb->id;
- }) != builds.end ())
- continue;
-
- if (b->state == build_state::built)
- {
- build_db_->load (*b, b->results_section);
+ build_db_->load (*b, b->results_section);
- // Let's clear unneeded result logs for builds being cached.
- //
- for (operation_result& r: b->results)
- r.log.clear ();
- }
+ // Let's clear unneeded result logs for builds being cached.
+ //
+ for (operation_result& r: b->results)
+ r.log.clear ();
+ }
- builds.push_back (move (b));
+ builds.push_back (move (pb));
- --print;
- }
-
- ++count;
- }
+ --print;
}
+
+ ++(*count);
}
+ }
- // Print the filter form after the build count is calculated. Note:
- // query_toolchains() must be called inside the build db transaction.
- //
- else
- print_form (query_toolchains (), count);
+ // Print the filter form after the build count is calculated. Note:
+ // query_toolchains() must be called inside the build db transaction.
+ //
+ print_form (query_toolchains (), count);
- t.commit ();
- }
+ t.commit ();
// Finally, print the cached package build configurations.
//
@@ -661,34 +685,43 @@ handle (request& rq, response& rs)
// Enclose the subsequent tables to be able to use nth-child CSS selector.
//
s << DIV;
- for (const shared_ptr<build>& pb: builds)
+ for (const package_build& pb: builds)
{
- const build& b (*pb);
+ const build& b (*pb.build);
string ts (butl::to_string (b.timestamp,
"%Y-%m-%d %H:%M:%S %Z",
true /* special */,
true /* local */) +
- " (" + butl::to_string (now - b.timestamp, false) + " ago)");
+ " (" + butl::to_string (now - b.timestamp, false) + " ago");
+
+ if (pb.archived)
+ ts += ", archived";
+
+ ts += ')';
s << TABLE(CLASS="proplist build")
<< TBODY
- << TR_NAME (b.package_name, string (), root, b.tenant)
+ << TR_NAME (b.package_name, root, b.tenant)
<< TR_VERSION (b.package_name, b.package_version, root, b.tenant)
<< TR_VALUE ("toolchain",
b.toolchain_name + '-' +
b.toolchain_version.string ())
- << TR_VALUE ("config", b.configuration)
- << TR_VALUE ("machine", b.machine)
<< TR_VALUE ("target", b.target.string ())
- << TR_VALUE ("timestamp", ts)
- << TR_BUILD_RESULT (b, host, root);
+ << TR_VALUE ("tgt config", b.target_config_name)
+ << TR_VALUE ("pkg config", b.package_config_name)
+ << TR_VALUE ("timestamp", ts);
+
+ if (b.interactive) // Note: can only be present for the building state.
+ s << TR_VALUE ("login", *b.interactive);
+
+ s << TR_BUILD_RESULT (b, pb.archived, host, root);
// In the global view mode add the tenant builds link. Note that the
// global view (and the link) makes sense only in the multi-tenant mode.
//
if (!tn && !b.tenant.empty ())
- s << TR_TENANT (tenant_name, "builds", root, b.tenant);
+ s << TR_TENANT (tenant_name, "builds", root, b.tenant);
s << ~TBODY
<< ~TABLE;
@@ -698,47 +731,73 @@ handle (request& rq, response& rs)
else // Print unbuilt package configurations.
{
// Parameters to use for package build configurations queries. Note that
- // we cleanup the machine and the result filter arguments, as they are
- // irrelevant for unbuilt configurations.
+ // we cleanup the result filter argument, as it is irrelevant for unbuilt
+ // configurations.
//
params::builds bld_params (params);
- bld_params.machine ().clear ();
bld_params.result () = "*";
- // Query toolchains, filter build configurations and toolchains, and
- // create the set of configuration/toolchain combinations, that we will
- // print for packages. Also calculate the number of unbuilt package
- // configurations.
+ // Query toolchains, filter build target configurations and toolchains,
+ // and create the set of target configuration/toolchain combinations, that
+ // we will print for package configurations. Also calculate the number of
+ // unbuilt package configurations.
//
toolchains toolchains;
- // Note that config_toolchains contains shallow references to the
- // toolchain names and versions.
+ // Target configuration/toolchain combination.
+ //
+ // Note: all members are the shallow references.
+ //
+ struct target_config_toolchain
+ {
+ const butl::target_triplet& target;
+ const string& target_config;
+ const string& toolchain_name;
+ const bpkg::version& toolchain_version;
+ };
+
+ // Cache the build package objects that would otherwise be loaded twice:
+ // first time during calculating the builds count and then during printing
+ // the builds. Note that the build package is a subset of the package
+ // object and normally has a small memory footprint.
+ //
+ // @@ TMP It feels that we can try to combine the mentioned steps and
+ // improve the performance a bit. We won't need the session in this
+ // case.
+ //
+ session sn;
+
+ connection_ptr conn (build_db_->connection ());
+ transaction t (conn->begin ());
+
+ // Discourage PostgreSQL from using the nested loop join strategy in the
+ // current transaction (see above for details).
//
- set<config_toolchain> config_toolchains;
+ conn->execute ("SET LOCAL enable_nestloop=off");
+
+ vector<target_config_toolchain> config_toolchains;
{
- transaction t (build_db_->begin ());
toolchains = query_toolchains ();
- string tc_name;
- version tc_version;
- const string& tc (params.toolchain ());
+ string th_name;
+ version th_version;
+ const string& th (params.toolchain ());
- if (tc != "*")
+ if (th != "*")
try
{
- size_t p (tc.find ('-'));
+ size_t p (th.find ('-'));
if (p == string::npos) // Invalid format.
throw invalid_argument ("");
- tc_name.assign (tc, 0, p);
+ th_name.assign (th, 0, p);
// May throw invalid_argument.
//
// Note that an absent and zero revisions have the same semantics,
// so the zero revision is folded (see above for details).
//
- tc_version = version (string (tc, p + 1));
+ th_version = version (string (th, p + 1));
}
catch (const invalid_argument&)
{
@@ -748,63 +807,63 @@ handle (request& rq, response& rs)
throw invalid_request (400, "invalid toolchain");
}
- const string& pc (params.configuration ());
- const string& tg (params.target ());
- vector<const build_config*> configs;
+ vector<const build_target_config*> target_configs;
- for (const auto& c: *build_conf_)
+ for (const auto& c: *target_conf_)
{
- if ((pc.empty () || path_match (c.name, pc)) && // Filter by name.
+ // Filter by name.
+ //
+ if ((tgt_cfg.empty () || path_match (c.name, tgt_cfg)) &&
// Filter by target.
//
- (tg.empty () || path_match (c.target.string (), tg)) &&
+ (tgt.empty () || path_match (c.target.string (), tgt)) &&
- (!exclude_hidden || belongs (c, "all"))) // Filter hidden.
+ (!exclude_hidden || !belongs (c, "hidden"))) // Filter hidden.
{
- configs.push_back (&c);
+ target_configs.push_back (&c);
for (const auto& t: toolchains)
{
// Filter by toolchain.
//
- if (tc == "*" || (t.first == tc_name && t.second == tc_version))
- config_toolchains.insert ({c.name, t.first, t.second});
+ if (th == "*" || (t.first == th_name && t.second == th_version))
+ config_toolchains.push_back (
+ target_config_toolchain {c.target, c.name, t.first, t.second});
}
}
}
- // Calculate the number of unbuilt package configurations as a
- // difference between the maximum possible number of unbuilt
- // configurations and the number of existing package builds.
- //
- // Note that we also need to deduct the package-excluded configurations
- // count from the maximum possible number of unbuilt configurations. The
- // only way to achieve this is to traverse through the packages and
- // match their build expressions/constraints against our configurations.
- //
- // Also note that some existing builds can now be excluded by packages
- // due to the build configuration target or class set change. We should
- // deduct such builds count from the number of existing package builds.
- //
- size_t nmax (
- config_toolchains.size () *
- build_db_->query_value<buildable_package_count> (
- package_query<buildable_package_count> (
- params, tn, false /* archived */)));
-
- size_t ncur = build_db_->query_value<package_build_count> (
- build_query<package_build_count> (
- &conf_names, bld_params, tn, false /* archived */));
-
- // From now we will be using specific package name and version for each
- // build database query.
- //
- bld_params.name ().clear ();
- bld_params.version ().clear ();
-
if (!config_toolchains.empty ())
{
+ // Calculate the number of unbuilt package configurations as a
+ // difference between the possible number of unbuilt configurations
+ // and the number of existing package builds.
+ //
+ // Note that some existing builds can now be excluded by package
+ // configurations due to the build target configuration class set
+ // change. We should deduct such builds count from the number of
+ // existing package configurations builds.
+ //
+ // The only way to calculate both numbers is to traverse through the
+ // package configurations and match their build
+ // expressions/constraints against our target configurations.
+ //
+ size_t npos (0);
+
+ size_t ncur (build_db_->query_value<package_build_count> (
+ build_query<package_build_count> (&conf_ids, bld_params, tn)));
+
+ // From now we will be using specific values for the below filters for
+ // each build database query. Note that the toolchain is the only
+ // filter left in bld_params.
+ //
+ bld_params.name ().clear ();
+ bld_params.version ().clear ();
+ bld_params.target ().clear ();
+ bld_params.target_config ().clear ();
+ bld_params.package_config ().clear ();
+
// Prepare the build count prepared query.
//
// For each package-excluded configuration we will query the number of
@@ -814,59 +873,82 @@ handle (request& rq, response& rs)
using prep_bld_query = prepared_query<package_build_count>;
package_id id;
- string config;
+ target_triplet target;
+ string target_config_name;
+ string package_config_name;
const auto& bid (bld_query::build::id);
bld_query bq (
- package_id_eq<package_build_count> (bid.package, id) &&
- bid.configuration == bld_query::_ref (config) &&
+ equal<package_build_count> (bid.package, id) &&
+ bid.target == bld_query::_ref (target) &&
+ bid.target_config_name == bld_query::_ref (target_config_name) &&
+ bid.package_config_name == bld_query::_ref (package_config_name) &&
// Note that the query already constrains configurations via the
- // configuration name and the tenant via the build package id.
+ // configuration name and target.
//
- build_query<package_build_count> (nullptr /* configs */,
+ // Also note that while the query already constrains the tenant via
+ // the build package id, we still need to pass the tenant not to
+ // erroneously filter out the private tenants.
+ //
+ build_query<package_build_count> (nullptr /* config_ids */,
bld_params,
- nullopt /* tenant */,
- false /* archived */));
+ tn));
prep_bld_query bld_prep_query (
build_db_->prepare_query<package_build_count> (
"mod-builds-build-count-query", bq));
- size_t nt (tc == "*" ? toolchains.size () : 1);
+ // Number of possible builds per package configuration.
+ //
+ size_t nt (th == "*" ? toolchains.size () : 1);
// The number of packages can potentially be large, and we may
// implement some caching in the future. However, the caching will not
// be easy as the cached values depend on the filter form parameters.
//
query<buildable_package> q (
- package_query<buildable_package> (
- params, tn, false /* archived */));
+ package_query<buildable_package> (params, tn));
for (auto& bp: build_db_->query<buildable_package> (q))
{
- id = move (bp.id);
+ shared_ptr<build_package>& p (bp.package);
- shared_ptr<build_package> p (build_db_->load<build_package> (id));
+ id = p->id;
- for (const auto& c: configs)
+ // Note: load the constrains section lazily.
+ //
+ for (const build_package_config& c: p->configs)
{
- if (exclude (p->builds, p->constraints, *c))
+ // Filter by package config name.
+ //
+ if (pkg_cfg.empty () || path_match (c.name, pkg_cfg))
{
- nmax -= nt;
-
- config = c->name;
- ncur -= bld_prep_query.execute_value ();
+ for (const auto& tc: target_configs)
+ {
+ if (!p->constraints_section.loaded ())
+ build_db_->load (*p, p->constraints_section);
+
+ if (exclude (c, p->builds, p->constraints, *tc))
+ {
+ target = tc->target;
+ target_config_name = tc->name;
+ package_config_name = c.name;
+ ncur -= bld_prep_query.execute_value ();
+ }
+ else
+ npos += nt;
+ }
}
}
}
- }
-
- assert (nmax >= ncur);
- count = nmax - ncur;
- t.commit ();
+ assert (npos >= ncur);
+ count = npos - ncur;
+ }
+ else
+ count = nullopt; // Unknown count.
}
// Print the filter form.
@@ -880,9 +962,11 @@ handle (request& rq, response& rs)
// 3: package tenant
// 4: toolchain name
// 5: toolchain version (descending)
- // 6: configuration name
+ // 6: target
+ // 7: target configuration name
+ // 8: package configuration name
//
- // Prepare the build package prepared query.
+ // Prepare the build package query.
//
// Note that we can't skip the proper number of packages in the database
// query for a page numbers greater than one. So we will query packages
@@ -897,28 +981,14 @@ handle (request& rq, response& rs)
// URL query parameter. Alternatively, we can invent the page number cap.
//
using pkg_query = query<buildable_package>;
- using prep_pkg_query = prepared_query<buildable_package>;
- pkg_query pq (
- package_query<buildable_package> (params, tn, false /* archived */));
-
- // Specify the portion. Note that we will still be querying packages in
- // chunks, not to hold locks for too long. For each package we will query
- // its builds, so let's keep the portion small.
- //
- size_t offset (0);
+ pkg_query pq (package_query<buildable_package> (params, tn));
pq += "ORDER BY" +
pkg_query::build_package::id.name +
order_by_version_desc (pkg_query::build_package::id.version,
false /* first */) + "," +
- pkg_query::build_package::id.tenant +
- "OFFSET" + pkg_query::_ref (offset) + "LIMIT 50";
-
- connection_ptr conn (build_db_->connection ());
-
- prep_pkg_query pkg_prep_query (
- conn->prepare_query<buildable_package> ("mod-builds-package-query", pq));
+ pkg_query::build_package::id.tenant;
// Prepare the build prepared query.
//
@@ -932,14 +1002,13 @@ handle (request& rq, response& rs)
package_id id;
- bld_query bq (
- package_id_eq<package_build> (bld_query::build::id.package, id) &&
+ bld_query bq (equal<package_build> (bld_query::build::id.package, id) &&
- // Note that the query already constrains the tenant via the build
- // package id.
- //
- build_query<package_build> (
- &conf_names, bld_params, nullopt /* tenant */, false /* archived */));
+ // Note that while the query already constrains the tenant
+ // via the build package id, we still need to pass the
+ // tenant not to erroneously filter out the private tenants.
+ //
+ build_query<package_build> (&conf_ids, bld_params, tn));
prep_bld_query bld_prep_query (
conn->prepare_query<package_build> ("mod-builds-build-query", bq));
@@ -950,99 +1019,115 @@ handle (request& rq, response& rs)
// Enclose the subsequent tables to be able to use nth-child CSS selector.
//
s << DIV;
- while (print != 0)
- {
- transaction t (conn->begin ());
- // Query (and cache) buildable packages.
- //
- auto packages (pkg_prep_query.execute ());
+ // Query (and cache) buildable packages.
+ //
+ auto packages (build_db_->query<buildable_package> (pq));
- if (packages.empty ())
- print = 0;
- else
+ if (packages.empty ())
+ print = 0;
+ else
+ {
+ // Iterate over packages and print unbuilt configurations. Skip the
+ // appropriate number of them first (for page number greater than one).
+ //
+ for (auto& bp: packages)
{
- offset += packages.size ();
+ shared_ptr<build_package>& p (bp.package);
+
+ id = p->id;
- // Iterate over packages and print unbuilt configurations. Skip the
- // appropriate number of them first (for page number greater than one).
+ // Copy configuration/toolchain combinations for this package,
+ // skipping excluded configurations.
//
- for (auto& p: packages)
- {
- id = move (p.id);
+ set<config_toolchain> unbuilt_configs;
- // Copy configuration/toolchain combinations for this package,
- // skipping excluded configurations.
+ // Load the constrains section lazily.
+ //
+ for (const build_package_config& pc: p->configs)
+ {
+ // Filter by package config name.
//
- set<config_toolchain> unbuilt_configs;
+ if (pkg_cfg.empty () || path_match (pc.name, pkg_cfg))
{
- shared_ptr<build_package> p (build_db_->load<build_package> (id));
-
- for (const auto& ct: config_toolchains)
+ for (const target_config_toolchain& ct: config_toolchains)
{
- auto i (build_conf_map_->find (ct.configuration.c_str ()));
- assert (i != build_conf_map_->end ());
-
- if (!exclude (p->builds, p->constraints, *i->second))
- unbuilt_configs.insert (ct);
+ auto i (
+ target_conf_map_->find (
+ build_target_config_id {ct.target, ct.target_config}));
+
+ assert (i != target_conf_map_->end ());
+
+ if (!p->constraints_section.loaded ())
+ build_db_->load (*p, p->constraints_section);
+
+ if (!exclude (pc, p->builds, p->constraints, *i->second))
+ unbuilt_configs.insert (
+ config_toolchain {ct.target,
+ ct.target_config,
+ pc.name,
+ ct.toolchain_name,
+ ct.toolchain_version});
}
}
+ }
- // Iterate through the package configuration builds and erase them
- // from the unbuilt configurations set.
- //
- for (const auto& pb: bld_prep_query.execute ())
- {
- const build& b (*pb.build);
+ // Iterate through the package configuration builds and erase them
+ // from the unbuilt configurations set.
+ //
+ for (const auto& pb: bld_prep_query.execute ())
+ {
+ const build& b (*pb.build);
- unbuilt_configs.erase ({
- b.id.configuration, b.toolchain_name, b.toolchain_version});
- }
+ unbuilt_configs.erase (config_toolchain {b.target,
+ b.target_config_name,
+ b.package_config_name,
+ b.toolchain_name,
+ b.toolchain_version});
+ }
- // Print unbuilt package configurations.
- //
- for (const auto& ct: unbuilt_configs)
+ // Print unbuilt package configurations.
+ //
+ for (const auto& ct: unbuilt_configs)
+ {
+ if (skip != 0)
{
- if (skip != 0)
- {
- --skip;
- continue;
- }
-
- auto i (build_conf_map_->find (ct.configuration.c_str ()));
- assert (i != build_conf_map_->end ());
-
- s << TABLE(CLASS="proplist build")
- << TBODY
- << TR_NAME (id.name, string (), root, id.tenant)
- << TR_VERSION (id.name, p.version, root, id.tenant)
- << TR_VALUE ("toolchain",
- string (ct.toolchain_name) + '-' +
- ct.toolchain_version.string ())
- << TR_VALUE ("config", ct.configuration)
- << TR_VALUE ("target", i->second->target.string ());
-
- // In the global view mode add the tenant builds link. Note that
- // the global view (and the link) makes sense only in the
- // multi-tenant mode.
- //
- if (!tn && !id.tenant.empty ())
- s << TR_TENANT (tenant_name, "builds", root, id.tenant);
+ --skip;
+ continue;
+ }
- s << ~TBODY
- << ~TABLE;
+ s << TABLE(CLASS="proplist build")
+ << TBODY
+ << TR_NAME (id.name, root, id.tenant)
+ << TR_VERSION (id.name, p->version, root, id.tenant)
+ << TR_VALUE ("toolchain",
+ string (ct.toolchain_name) + '-' +
+ ct.toolchain_version.string ())
+ << TR_VALUE ("target", ct.target.string ())
+ << TR_VALUE ("tgt config", ct.target_config)
+ << TR_VALUE ("pkg config", ct.package_config);
+
+ // In the global view mode add the tenant builds link. Note that
+ // the global view (and the link) makes sense only in the
+ // multi-tenant mode.
+ //
+ if (!tn && !id.tenant.empty ())
+ s << TR_TENANT (tenant_name, "builds", root, id.tenant);
- if (--print == 0) // Bail out the configuration loop.
- break;
- }
+ s << ~TBODY
+ << ~TABLE;
- if (print == 0) // Bail out the package loop.
+ if (--print == 0) // Bail out the configuration loop.
break;
}
- }
- t.commit ();
+ if (print == 0) // Bail out the package loop.
+ break;
+ }
}
+
+ t.commit ();
+
s << ~DIV;
}
@@ -1068,13 +1153,17 @@ handle (request& rq, response& rs)
};
add_filter ("pv", params.version ());
- add_filter ("tc", params.toolchain (), "*");
- add_filter ("cf", params.configuration ());
- add_filter ("mn", params.machine ());
- add_filter ("tg", params.target ());
+ add_filter ("th", params.toolchain (), "*");
+ add_filter ("tg", tgt);
+ add_filter ("tc", tgt_cfg);
+ add_filter ("pc", pkg_cfg);
add_filter ("rs", params.result (), "*");
- s << DIV_PAGER (page, count, page_configs, options_->build_pages (), u)
+ s << DIV_PAGER (page,
+ count ? *count : 0,
+ page_configs,
+ options_->build_pages (),
+ u)
<< ~DIV
<< ~BODY
<< ~HTML;
diff --git a/mod/mod-builds.hxx b/mod/mod-builds.hxx
index 1447fab..0aa7916 100644
--- a/mod/mod-builds.hxx
+++ b/mod/mod-builds.hxx
@@ -1,5 +1,4 @@
// file : mod/mod-builds.hxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
#ifndef MOD_MOD_BUILDS_HXX
@@ -8,7 +7,7 @@
#include <libbrep/types.hxx>
#include <libbrep/utility.hxx>
-#include <mod/options.hxx>
+#include <mod/module-options.hxx>
#include <mod/database-module.hxx>
#include <mod/build-config-module.hxx>
diff --git a/mod/mod-ci.cxx b/mod/mod-ci.cxx
index 5a56526..5974d45 100644
--- a/mod/mod-ci.cxx
+++ b/mod/mod-ci.cxx
@@ -1,42 +1,50 @@
// file : mod/mod-ci.cxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
#include <mod/mod-ci.hxx>
-#include <ostream>
+#include <libbutl/fdstream.hxx>
+#include <libbutl/manifest-parser.hxx>
+#include <libbutl/manifest-serializer.hxx>
-#include <libbutl/uuid.hxx>
-#include <libbutl/sendmail.mxx>
-#include <libbutl/fdstream.mxx>
-#include <libbutl/timestamp.mxx>
-#include <libbutl/filesystem.mxx>
-#include <libbutl/process-io.mxx> // operator<<(ostream, process_args)
-#include <libbutl/manifest-parser.mxx>
-#include <libbutl/manifest-serializer.mxx>
-
-#include <libbpkg/manifest.hxx>
+#include <libbpkg/manifest.hxx> // package_manifest
#include <libbpkg/package-name.hxx>
-#include <web/xhtml.hxx>
-#include <web/module.hxx>
+#include <web/server/module.hxx>
+
+#include <web/xhtml/serialization.hxx>
#include <mod/page.hxx>
-#include <mod/options.hxx>
-#include <mod/external-handler.hxx>
+#include <mod/module-options.hxx>
using namespace std;
using namespace butl;
using namespace web;
using namespace brep::cli;
+#ifdef BREP_CI_TENANT_SERVICE
+brep::ci::
+ci (tenant_service_map& tsm)
+ : tenant_service_map_ (tsm)
+{
+}
+#endif
+
brep::ci::
+#ifdef BREP_CI_TENANT_SERVICE
+ci (const ci& r, tenant_service_map& tsm)
+#else
ci (const ci& r)
+#endif
: handler (r),
+ ci_start (r),
options_ (r.initialized_ ? r.options_ : nullptr),
form_ (r.initialized_ || r.form_ == nullptr
? r.form_
: make_shared<xhtml::fragment> (*r.form_))
+#ifdef BREP_CI_TENANT_SERVICE
+ , tenant_service_map_ (tsm)
+#endif
{
}
@@ -45,22 +53,25 @@ init (scanner& s)
{
HANDLER_DIAG;
+#ifdef BREP_CI_TENANT_SERVICE
+ {
+ shared_ptr<tenant_service_base> ts (
+ dynamic_pointer_cast<tenant_service_base> (shared_from_this ()));
+
+ assert (ts != nullptr); // By definition.
+
+ tenant_service_map_["ci"] = move (ts);
+ }
+#endif
+
options_ = make_shared<options::ci> (
s, unknown_mode::fail, unknown_mode::fail);
- // Verify that the CI request handling is setup properly, if configured.
+ // Prepare for the CI requests handling, if configured.
//
if (options_->ci_data_specified ())
{
- // Verify the data directory satisfies the requirements.
- //
- const dir_path& d (options_->ci_data ());
-
- if (d.relative ())
- fail << "ci-data directory path must be absolute";
-
- if (!dir_exists (d))
- fail << "ci-data directory '" << d << "' does not exist";
+ ci_start::init (make_shared<options::ci_start> (*options_));
// Parse XHTML5 form file, if configured.
//
@@ -87,10 +98,6 @@ init (scanner& s)
fail << "unable to read ci-form file '" << ci_form << "': " << e;
}
}
-
- if (options_->ci_handler_specified () &&
- options_->ci_handler ().relative ())
- fail << "ci-handler path must be absolute";
}
if (options_->root ().empty ())
@@ -117,8 +124,8 @@ handle (request& rq, response& rs)
// latter case we will always respond with the same neutral message for
// security reason, logging the error details. Note that descriptions of
// exceptions caught by the web server are returned to the client (see
- // web/module.hxx for details), and we want to avoid this when there is a
- // danger of exposing sensitive data.
+ // web/server/module.hxx for details), and we want to avoid this when there
+ // is a danger of exposing sensitive data.
//
// Also we will pass through exceptions thrown by the underlying API, unless
// we need to handle them or add details for the description, in which case
@@ -130,9 +137,8 @@ handle (request& rq, response& rs)
//
// return respond_error (); // Request is handled with an error.
//
- string request_id; // Will be set later.
- auto respond_manifest = [&rs, &request_id] (status_code status,
- const string& message) -> bool
+ auto respond_manifest = [&rs] (status_code status,
+ const string& message) -> bool
{
serializer s (rs.content (status, "text/manifest;charset=utf-8"),
"response");
@@ -140,10 +146,6 @@ handle (request& rq, response& rs)
s.next ("", "1"); // Start of manifest.
s.next ("status", to_string (status));
s.next ("message", message);
-
- if (!request_id.empty ())
- s.next ("reference", request_id);
-
s.next ("", ""); // End of manifest.
return true;
};
@@ -234,9 +236,11 @@ handle (request& rq, response& rs)
if (rl.empty () || rl.local ())
return respond_manifest (400, "invalid repository location");
- // Verify the package name[/version] arguments.
+ // Parse the package name[/version] arguments.
//
- for (const string& s: params.package())
+ vector<package> packages;
+
+ for (const string& s: params.package ())
{
// Let's skip the potentially unfilled package form fields.
//
@@ -245,18 +249,21 @@ handle (request& rq, response& rs)
try
{
+ package pkg;
size_t p (s.find ('/'));
if (p != string::npos)
{
- package_name (string (s, 0, p));
+ pkg.name = package_name (string (s, 0, p));
// Not to confuse with module::version.
//
- bpkg::version (string (s, p + 1));
+ pkg.version = bpkg::version (string (s, p + 1));
}
else
- package_name p (s); // Not to confuse with the s variable declaration.
+ pkg.name = package_name (s);
+
+ packages.push_back (move (pkg));
}
catch (const invalid_argument&)
{
@@ -265,38 +272,49 @@ handle (request& rq, response& rs)
}
// Verify that unknown parameter values satisfy the requirements (contain
- // only ASCII printable characters plus '\r', '\n', and '\t').
+ // only UTF-8 encoded graphic characters plus '\t', '\r', and '\n') and
+ // stash them.
//
// Actually, the expected ones must satisfy too, so check them as well.
//
- auto printable = [] (const string& s) -> bool
+ vector<pair<string, string>> custom_request;
{
- for (char c: s)
+ string what;
+ for (const name_value& nv: rps)
{
- if (!((c >= 0x20 && c <= 0x7E) || c == '\n' || c == '\r' || c == '\t'))
- return false;
+ if (nv.value &&
+ !utf8 (*nv.value, what, codepoint_types::graphic, U"\n\r\t"))
+ return respond_manifest (400,
+ "invalid parameter " + nv.name + ": " + what);
+
+ const string& n (nv.name);
+
+ if (n != "repository" &&
+ n != "_" &&
+ n != "package" &&
+ n != "overrides" &&
+ n != "interactive" &&
+ n != "simulate")
+ custom_request.emplace_back (n, nv.value ? *nv.value : "");
}
- return true;
- };
-
- for (const name_value& nv: rps)
- {
- if (nv.value && !printable (*nv.value))
- return respond_manifest (400, "invalid parameter " + nv.name);
}
// Parse and validate overrides, if present.
//
- vector<manifest_name_value> overrides;
+ vector<pair<string, string>> overrides;
if (params.overrides_specified ())
try
{
istream& is (rq.open_upload ("overrides"));
parser mp (is, "overrides");
- overrides = parse_manifest (mp);
+ vector<manifest_name_value> ovrs (parse_manifest (mp));
+
+ package_manifest::validate_overrides (ovrs, mp.name ());
- package_manifest::validate_overrides (overrides, mp.name ());
+ overrides.reserve (ovrs.size ());
+ for (manifest_name_value& nv: ovrs)
+ overrides.emplace_back (move (nv.name), move (nv.value));
}
// Note that invalid_argument (thrown by open_upload() function call) can
// mean both no overrides upload or multiple overrides uploads.
@@ -317,381 +335,141 @@ handle (request& rq, response& rs)
return respond_error ();
}
- try
- {
- // Note that from now on the result manifest we respond with will contain
- // the reference value.
- //
- request_id = uuid::generate ().string ();
- }
- catch (const system_error& e)
- {
- error << "unable to generate request id: " << e;
- return respond_error ();
- }
-
- // Create the submission data directory.
+ // Stash the User-Agent HTTP header and the client IP address.
//
- dir_path dd (options_->ci_data () / dir_path (request_id));
-
- try
- {
- // It's highly unlikely but still possible that the directory already
- // exists. This can only happen if the generated uuid is not unique.
- //
- if (try_mkdir (dd) == mkdir_status::already_exists)
- throw_generic_error (EEXIST);
- }
- catch (const system_error& e)
+ optional<string> client_ip;
+ optional<string> user_agent;
+ for (const name_value& h: rq.headers ())
{
- error << "unable to create directory '" << dd << "': " << e;
- return respond_error ();
+ if (icasecmp (h.name, ":Client-IP") == 0)
+ client_ip = h.value;
+ else if (icasecmp (h.name, "User-Agent") == 0)
+ user_agent = h.value;
}
- auto_rmdir ddr (dd);
-
- // Serialize the CI request manifest to a stream. On the serialization error
- // respond to the client with the manifest containing the bad request (400)
- // code and return false, on the stream error pass through the io_error
- // exception, otherwise return true.
- //
- timestamp ts (system_clock::now ());
-
- auto rqm = [&request_id,
- &rl,
- &ts,
- &simulate,
- &rq,
- &rps,
- &params,
- &respond_manifest]
- (ostream& os, bool long_lines = false) -> bool
- {
- try
- {
- serializer s (os, "request", long_lines);
-
- // Serialize the submission manifest header.
- //
- s.next ("", "1"); // Start of manifest.
- s.next ("id", request_id);
- s.next ("repository", rl.string ());
-
- for (const string& p: params.package ())
- {
- if (!p.empty ()) // Skip empty package names (see above for details).
- s.next ("package", p);
- }
-
- s.next ("timestamp",
- butl::to_string (ts,
- "%Y-%m-%dT%H:%M:%SZ",
- false /* special */,
- false /* local */));
-
- if (!simulate.empty ())
- s.next ("simulate", simulate);
-
- // Serialize the User-Agent HTTP header and the client IP address.
- //
- optional<string> ip;
- optional<string> ua;
- for (const name_value& h: rq.headers ())
- {
- if (icasecmp (h.name, ":Client-IP") == 0)
- ip = h.value;
- else if (icasecmp (h.name, "User-Agent") == 0)
- ua = h.value;
- }
-
- if (ip)
- s.next ("client-ip", *ip);
-
- if (ua)
- s.next ("user-agent", *ua);
-
- // Serialize the request parameters.
- //
- // Note that the serializer constraints the parameter names (can't start
- // with '#', can't contain ':' and the whitespaces, etc.).
- //
- for (const name_value& nv: rps)
- {
- const string& n (nv.name);
-
- if (n != "repository" &&
- n != "_" &&
- n != "package" &&
- n != "overrides" &&
- n != "simulate")
- s.next (n, nv.value ? *nv.value : "");
- }
-
- s.next ("", ""); // End of manifest.
- return true;
- }
- catch (const serialization& e)
- {
- respond_manifest (400, string ("invalid parameter: ") + e.what ());
- return false;
- }
- };
-
- // Serialize the CI request manifest to the submission directory.
- //
- path rqf (dd / "request.manifest");
+ optional<start_result> r (start (error,
+ warn,
+ verb_ ? &trace : nullptr,
+#ifdef BREP_CI_TENANT_SERVICE
+ tenant_service ("", "ci"),
+#else
+ nullopt /* service */,
+#endif
+ rl,
+ packages,
+ client_ip,
+ user_agent,
+ (params.interactive_specified ()
+ ? params.interactive ()
+ : optional<string> ()),
+ (!simulate.empty ()
+ ? simulate
+ : optional<string> ()),
+ custom_request,
+ overrides));
+
+ if (!r)
+ return respond_error (); // The diagnostics is already issued.
try
{
- ofdstream os (rqf);
- bool r (rqm (os));
- os.close ();
-
- if (!r)
- return true; // The client is already responded with the manifest.
- }
- catch (const io_error& e)
- {
- error << "unable to write to '" << rqf << "': " << e;
- return respond_error ();
+ serialize_manifest (*r,
+ rs.content (r->status, "text/manifest;charset=utf-8"));
}
-
- // Serialize the CI overrides manifest to a stream. On the stream error pass
- // through the io_error exception.
- //
- // Note that it can't throw the serialization exception as the override
- // manifest is parsed from the stream and so verified.
- //
- auto ovm = [&overrides] (ostream& os, bool long_lines = false)
+ catch (const serialization& e)
{
- try
- {
- serializer s (os, "overrides", long_lines);
- serialize_manifest (s, overrides);
- }
- catch (const serialization&) {assert (false);} // See above.
- };
-
- // Serialize the CI overrides manifest to the submission directory.
- //
- path ovf (dd / "overrides.manifest");
+ error << "ref " << r->reference << ": unable to serialize handler's "
+ << "output: " << e;
- if (!overrides.empty ())
- try
- {
- ofdstream os (ovf);
- ovm (os);
- os.close ();
- }
- catch (const io_error& e)
- {
- error << "unable to write to '" << ovf << "': " << e;
return respond_error ();
}
- // Given that the submission data is now successfully persisted we are no
- // longer in charge of removing it, except for the cases when the submission
- // handler terminates with an error (see below for details).
- //
- ddr.cancel ();
-
- // If the handler terminates with non-zero exit status or specifies 5XX
- // (HTTP server error) submission result manifest status value, then we
- // stash the submission data directory for troubleshooting. Otherwise, if
- // it's the 4XX (HTTP client error) status value, then we remove the
- // directory.
- //
- // Note that leaving the directory in place in case of a submission error
- // would have prevent the user from re-submitting until we research the
- // issue and manually remove the directory.
- //
- auto stash_submit_dir = [&dd, error] ()
- {
- if (dir_exists (dd))
- try
- {
- mvdir (dd, dir_path (dd + ".fail"));
- }
- catch (const system_error& e)
- {
- // Not much we can do here. Let's just log the issue and bail out
- // leaving the directory in place.
- //
- error << "unable to rename directory '" << dd << "': " << e;
- }
- };
-
- // Run the submission handler, if specified, reading the result manifest
- // from its stdout and caching it as a name/value pair list for later use
- // (forwarding to the client, sending via email, etc.). Otherwise, create
- // implied result manifest.
- //
- status_code sc;
- vector<manifest_name_value> rvs;
-
- if (options_->ci_handler_specified ())
- {
- using namespace external_handler;
-
- optional<result_manifest> r (run (options_->ci_handler (),
- options_->ci_handler_argument (),
- dd,
- options_->ci_handler_timeout (),
- error,
- warn,
- verb_ ? &trace : nullptr));
- if (!r)
- {
- stash_submit_dir ();
- return respond_error (); // The diagnostics is already issued.
- }
-
- sc = r->status;
- rvs = move (r->values);
- }
- else // Create the implied result manifest.
- {
- sc = 200;
-
- auto add = [&rvs] (string n, string v)
- {
- manifest_name_value nv {
- move (n), move (v),
- 0 /* name_line */, 0 /* name_column */,
- 0 /* value_line */, 0 /* value_column */,
- 0 /* start_pos */, 0 /* colon_pos */, 0 /* end_pos */};
-
- rvs.emplace_back (move (nv));
- };
-
- add ("status", "200");
- add ("message", "CI request is queued");
- add ("reference", request_id);
- }
-
- assert (!rvs.empty ()); // Produced by the handler or is implied.
-
- // Serialize the submission result manifest to a stream. On the
- // serialization error log the error description and return false, on the
- // stream error pass through the io_error exception, otherwise return true.
- //
- auto rsm = [&rvs, &error, &request_id] (ostream& os,
- bool long_lines = false) -> bool
- {
- try
- {
- serializer s (os, "result", long_lines);
- serialize_manifest (s, rvs);
- return true;
- }
- catch (const serialization& e)
- {
- error << "ref " << request_id << ": unable to serialize handler's "
- << "output: " << e;
- return false;
- }
- };
-
- // If the submission data directory still exists then perform an appropriate
- // action on it, depending on the submission result status. Note that the
- // handler could move or remove the directory.
- //
- if (dir_exists (dd))
- {
- // Remove the directory if the client error is detected.
- //
- if (sc >= 400 && sc < 500)
- rmdir_r (dd);
-
- // Otherwise, save the result manifest, into the directory. Also stash the
- // directory for troubleshooting in case of the server error.
- //
- else
- {
- path rsf (dd / "result.manifest");
-
- try
- {
- ofdstream os (rsf);
-
- // Not being able to stash the result manifest is not a reason to
- // claim the submission failed. The error is logged nevertheless.
- //
- rsm (os);
-
- os.close ();
- }
- catch (const io_error& e)
- {
- // Not fatal (see above).
- //
- error << "unable to write to '" << rsf << "': " << e;
- }
-
- if (sc >= 500 && sc < 600)
- stash_submit_dir ();
- }
- }
-
- // Send email, if configured, and the CI request submission is not simulated.
- // Use the long lines manifest serialization mode for the convenience of
- // copying/clicking URLs they contain.
- //
- // Note that we don't consider the email sending failure to be a submission
- // failure as the submission data is successfully persisted and the handler
- // is successfully executed, if configured. One can argue that email can be
- // essential for the submission processing and missing it would result in
- // the incomplete submission. In this case it's natural to assume that the
- // web server error log is monitored and the email sending failure will be
- // noticed.
- //
- if (options_->ci_email_specified () && simulate.empty ())
- try
- {
- // Redirect the diagnostics to the web server error log.
- //
- sendmail sm ([&trace, this] (const char* args[], size_t n)
- {
- l2 ([&]{trace << process_args {args, n};});
- },
- 2 /* stderr */,
- options_->email (),
- "CI request submission (" + request_id + ")",
- {options_->ci_email ()});
-
- // Write the CI request manifest.
- //
- bool r (rqm (sm.out, true /* long_lines */));
- assert (r); // The serialization succeeded once, so can't fail now.
-
- // Write the CI overrides manifest.
- //
- sm.out << "\n\n";
-
- ovm (sm.out, true /* long_lines */);
-
- // Write the CI result manifest.
- //
- sm.out << "\n\n";
-
- // We don't care about the result (see above).
- //
- rsm (sm.out, true /* long_lines */);
-
- sm.out.close ();
+ return true;
+}
- if (!sm.wait ())
- error << "sendmail " << *sm.exit;
- }
- // Handle process_error and io_error (both derive from system_error).
- //
- catch (const system_error& e)
- {
- error << "sendmail error: " << e;
- }
+#ifdef BREP_CI_TENANT_SERVICE
+function<optional<string> (const brep::tenant_service&)> brep::ci::
+build_queued (const tenant_service&,
+ const vector<build>& bs,
+ optional<build_state> initial_state,
+ const build_queued_hints& hints,
+ const diag_epilogue& log_writer) const noexcept
+{
+ NOTIFICATION_DIAG (log_writer);
+
+ l2 ([&]{trace << "initial_state: "
+ << (initial_state ? to_string (*initial_state) : "none")
+ << ", hints "
+ << hints.single_package_version << ' '
+ << hints.single_package_config;});
+
+ return [&bs, initial_state] (const tenant_service& ts)
+ {
+ optional<string> r (ts.data);
+
+ for (const build& b: bs)
+ {
+ string s ((!initial_state
+ ? "queued "
+ : "queued " + to_string (*initial_state) + ' ') +
+ b.package_name.string () + '/' +
+ b.package_version.string () + '/' +
+ b.target.string () + '/' +
+ b.target_config_name + '/' +
+ b.package_config_name + '/' +
+ b.toolchain_name + '/' +
+ b.toolchain_version.string ());
+
+ if (r)
+ {
+ *r += ", ";
+ *r += s;
+ }
+ else
+ r = move (s);
+ }
+
+ return r;
+ };
+}
- if (!rsm (rs.content (sc, "text/manifest;charset=utf-8")))
- return respond_error (); // The error description is already logged.
+function<optional<string> (const brep::tenant_service&)> brep::ci::
+build_building (const tenant_service&,
+ const build& b,
+ const diag_epilogue&) const noexcept
+{
+ return [&b] (const tenant_service& ts)
+ {
+ string s ("building " +
+ b.package_name.string () + '/' +
+ b.package_version.string () + '/' +
+ b.target.string () + '/' +
+ b.target_config_name + '/' +
+ b.package_config_name + '/' +
+ b.toolchain_name + '/' +
+ b.toolchain_version.string ());
+
+ return ts.data ? *ts.data + ", " + s : s;
+ };
+}
- return true;
+function<optional<string> (const brep::tenant_service&)> brep::ci::
+build_built (const tenant_service&,
+ const build& b,
+ const diag_epilogue&) const noexcept
+{
+ return [&b] (const tenant_service& ts)
+ {
+ string s ("built " +
+ b.package_name.string () + '/' +
+ b.package_version.string () + '/' +
+ b.target.string () + '/' +
+ b.target_config_name + '/' +
+ b.package_config_name + '/' +
+ b.toolchain_name + '/' +
+ b.toolchain_version.string ());
+
+ return ts.data ? *ts.data + ", " + s : s;
+ };
}
+#endif
diff --git a/mod/mod-ci.hxx b/mod/mod-ci.hxx
index 8a4e51e..1e2ee15 100644
--- a/mod/mod-ci.hxx
+++ b/mod/mod-ci.hxx
@@ -1,23 +1,47 @@
// file : mod/mod-ci.hxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
#ifndef MOD_MOD_CI_HXX
#define MOD_MOD_CI_HXX
-#include <web/xhtml-fragment.hxx>
+#include <web/xhtml/fragment.hxx>
#include <libbrep/types.hxx>
#include <libbrep/utility.hxx>
+#include <libbrep/build.hxx>
+#include <libbrep/common.hxx> // tenant_service
+
#include <mod/module.hxx>
-#include <mod/options.hxx>
+#include <mod/module-options.hxx>
+
+#include <mod/ci-common.hxx>
+
+#ifdef BREP_CI_TENANT_SERVICE
+# include <mod/tenant-service.hxx>
+#endif
namespace brep
{
- class ci: public handler
+ class ci: public handler,
+ private ci_start
+#ifdef BREP_CI_TENANT_SERVICE
+ , public tenant_service_build_queued,
+ public tenant_service_build_building,
+ public tenant_service_build_built
+#endif
{
public:
+
+#ifdef BREP_CI_TENANT_SERVICE
+ explicit
+ ci (tenant_service_map&);
+
+ // Create a shallow copy (handling instance) if initialized and a deep
+ // copy (context exemplar) otherwise.
+ //
+ ci (const ci&, tenant_service_map&);
+#else
ci () = default;
// Create a shallow copy (handling instance) if initialized and a deep
@@ -25,20 +49,44 @@ namespace brep
//
explicit
ci (const ci&);
+#endif
virtual bool
- handle (request&, response&);
+ handle (request&, response&) override;
virtual const cli::options&
- cli_options () const {return options::ci::description ();}
+ cli_options () const override {return options::ci::description ();}
+
+#ifdef BREP_CI_TENANT_SERVICE
+ virtual function<optional<string> (const tenant_service&)>
+ build_queued (const tenant_service&,
+ const vector<build>&,
+ optional<build_state> initial_state,
+ const build_queued_hints&,
+ const diag_epilogue& log_writer) const noexcept override;
+
+ virtual function<optional<string> (const tenant_service&)>
+ build_building (const tenant_service&,
+ const build&,
+ const diag_epilogue& log_writer) const noexcept override;
+
+ virtual function<optional<string> (const tenant_service&)>
+ build_built (const tenant_service&,
+ const build&,
+ const diag_epilogue& log_writer) const noexcept override;
+#endif
private:
virtual void
- init (cli::scanner&);
+ init (cli::scanner&) override;
private:
shared_ptr<options::ci> options_;
shared_ptr<web::xhtml::fragment> form_;
+
+#ifdef BREP_CI_TENANT_SERVICE
+ tenant_service_map& tenant_service_map_;
+#endif
};
}
diff --git a/mod/mod-package-details.cxx b/mod/mod-package-details.cxx
index fc2e6be..fcd50da 100644
--- a/mod/mod-package-details.cxx
+++ b/mod/mod-package-details.cxx
@@ -1,5 +1,4 @@
// file : mod/mod-package-details.cxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
#include <mod/mod-package-details.hxx>
@@ -10,15 +9,16 @@
#include <odb/database.hxx>
#include <odb/transaction.hxx>
-#include <web/xhtml.hxx>
-#include <web/module.hxx>
-#include <web/mime-url-encoding.hxx>
+#include <web/server/module.hxx>
+#include <web/server/mime-url-encoding.hxx>
+
+#include <web/xhtml/serialization.hxx>
#include <libbrep/package.hxx>
#include <libbrep/package-odb.hxx>
#include <mod/page.hxx>
-#include <mod/options.hxx>
+#include <mod/module-options.hxx>
using namespace odb::core;
using namespace brep::cli;
@@ -183,20 +183,20 @@ handle (request& rq, response& rs)
//
s << H2 << pkg->summary << ~H2;
- if (const optional<string>& d = pkg->description)
+ if (const optional<typed_text>& d = pkg->package_description
+ ? pkg->package_description
+ : pkg->description)
{
const string id ("description");
const string what (name.string () + " description");
s << (full
? DIV_TEXT (*d,
- *pkg->description_type,
true /* strip_title */,
id,
what,
error)
: DIV_TEXT (*d,
- *pkg->description_type,
true /* strip_title */,
options_->package_description (),
url (!full, squery, page, id),
@@ -227,7 +227,7 @@ handle (request& rq, response& rs)
<< ~TABLE;
}
- auto pkg_count (
+ size_t pkg_count (
package_db_->query_value<package_count> (
search_params<package_count> (squery, tenant, name)));
@@ -265,23 +265,12 @@ handle (request& rq, response& rs)
assert (p->internal ());
- // @@ Shouldn't we make package repository name to be a link to the proper
- // place of the About page, describing corresponding repository?
- // Yes, I think that's sounds reasonable.
- // Or maybe it can be something more valuable like a link to the
- // repository package search page ?
- //
- // @@ In most cases package location will be the same for all versions
- // of the same package. Shouldn't we put package location to the
- // package summary part and display it here only if it differs
- // from the one in the summary ?
- //
- // Hm, I am not so sure about this. Consider: stable/testing/unstable.
+ const repository_location& rl (p->internal_repository.load ()->location);
+
+ // @@ Maybe the repository link can be something more valuable like a link
+ // to the repository package search page ?
//
- s << TR_REPOSITORY (
- p->internal_repository.object_id ().canonical_name,
- root,
- tenant)
+ s << TR_REPOSITORY (rl, root, tenant)
<< TR_DEPENDS (p->dependencies, root, tenant)
<< TR_REQUIRES (p->requirements)
<< ~TBODY
diff --git a/mod/mod-package-details.hxx b/mod/mod-package-details.hxx
index 3e2a015..e1b0a9c 100644
--- a/mod/mod-package-details.hxx
+++ b/mod/mod-package-details.hxx
@@ -1,5 +1,4 @@
// file : mod/mod-package-details.hxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
#ifndef MOD_MOD_PACKAGE_DETAILS_HXX
@@ -8,7 +7,7 @@
#include <libbrep/types.hxx>
#include <libbrep/utility.hxx>
-#include <mod/options.hxx>
+#include <mod/module-options.hxx>
#include <mod/database-module.hxx>
namespace brep
diff --git a/mod/mod-package-version-details.cxx b/mod/mod-package-version-details.cxx
index 8787860..91923e5 100644
--- a/mod/mod-package-version-details.cxx
+++ b/mod/mod-package-version-details.cxx
@@ -1,5 +1,4 @@
// file : mod/mod-package-version-details.cxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
#include <mod/mod-package-version-details.hxx>
@@ -10,9 +9,12 @@
#include <odb/database.hxx>
#include <odb/transaction.hxx>
-#include <web/xhtml.hxx>
-#include <web/module.hxx>
-#include <web/mime-url-encoding.hxx>
+#include <libbutl/filesystem.hxx> // dir_iterator, dir_entry
+
+#include <web/server/module.hxx>
+#include <web/server/mime-url-encoding.hxx>
+
+#include <web/xhtml/serialization.hxx>
#include <libbrep/build.hxx>
#include <libbrep/build-odb.hxx>
@@ -20,7 +22,7 @@
#include <libbrep/package-odb.hxx>
#include <mod/page.hxx>
-#include <mod/options.hxx>
+#include <mod/module-options.hxx>
using namespace std;
using namespace butl;
@@ -47,6 +49,12 @@ init (scanner& s)
options_ = make_shared<options::package_version_details> (
s, unknown_mode::fail, unknown_mode::fail);
+ // Verify that the bindist-url option is specified when necessary.
+ //
+ if (options_->bindist_root_specified () &&
+ !options_->bindist_url_specified ())
+ fail << "bindist-url must be specified if bindist-root is specified";
+
database_module::init (static_cast<const options::package_db&> (*options_),
options_->package_db_retry ());
@@ -152,7 +160,7 @@ handle (request& rq, response& rs)
const string& name (pkg->name.string ());
- const string title (name + " " + sver);
+ const string title (name + ' ' + sver);
xml::serializer s (rs.content (), title);
s << HTML
@@ -181,20 +189,20 @@ handle (request& rq, response& rs)
s << H2 << pkg->summary << ~H2;
- if (const optional<string>& d = pkg->description)
+ if (const optional<typed_text>& d = pkg->package_description
+ ? pkg->package_description
+ : pkg->description)
{
const string id ("description");
const string what (title + " description");
s << (full
- ? DIV_TEXT (*d, *
- pkg->description_type,
+ ? DIV_TEXT (*d,
true /* strip_title */,
id,
what,
error)
: DIV_TEXT (*d,
- *pkg->description_type,
true /* strip_title */,
options_->package_description (),
url (!full, id),
@@ -214,14 +222,13 @@ handle (request& rq, response& rs)
<< TR_PRIORITY (pkg->priority)
<< TR_LICENSES (pkg->license_alternatives)
- << TR_REPOSITORY (rl.canonical_name (), root, tenant)
- << TR_LOCATION (rl);
+ << TR_REPOSITORY (rl, root, tenant);
if (rl.type () == repository_type::pkg)
{
assert (pkg->location);
- s << TR_LINK (rl.url ().string () + "/" + pkg->location->string (),
+ s << TR_LINK (rl.url ().string () + '/' + pkg->location->string (),
pkg->location->leaf ().string (),
"download");
}
@@ -293,7 +300,7 @@ handle (request& rq, response& rs)
if (dcon)
s << ' '
- << A(HREF=u + "/" + p->version.string ()) << *dcon << ~A;
+ << A(HREF=u + '/' + p->version.string ()) << *dcon << ~A;
}
else if (p->internal ())
{
@@ -321,31 +328,51 @@ handle (request& rq, response& rs)
<< TABLE(CLASS="proplist", ID="depends")
<< TBODY;
- for (const auto& da: ds)
+ for (const auto& das: ds)
{
s << TR(CLASS="depends")
<< TH;
- if (da.conditional)
- s << "?";
-
- if (da.buildtime)
- s << "*";
+ if (das.buildtime)
+ s << '*';
s << ~TH
<< TD
<< SPAN(CLASS="value");
- for (const auto& d: da)
+ for (const auto& da: das)
{
- if (&d != &da[0])
+ if (&da != &das[0])
s << " | ";
- print_dependency (d);
+ // Should we enclose multiple dependencies into curly braces as in the
+ // manifest? Somehow feels redundant here, since there can't be any
+ // ambiguity (dependency group version constraint is already punched
+ // into the specific dependencies without constraints).
+ //
+ for (const dependency& d: da)
+ {
+ if (&d != &da[0])
+ s << ' ';
+
+ print_dependency (d);
+ }
+
+ if (da.enable)
+ {
+ s << " ? (";
+
+ if (full)
+ s << *da.enable;
+ else
+ s << "...";
+
+ s << ')';
+ }
}
s << ~SPAN
- << SPAN_COMMENT (da.comment)
+ << SPAN_COMMENT (das.comment)
<< ~TD
<< ~TR;
}
@@ -361,34 +388,59 @@ handle (request& rq, response& rs)
<< TABLE(CLASS="proplist", ID="requires")
<< TBODY;
- for (const auto& ra: rm)
+ for (const requirement_alternatives& ras: rm)
{
s << TR(CLASS="requires")
<< TH;
- if (ra.conditional)
- s << "?";
-
- if (ra.buildtime)
- s << "*";
-
- if (ra.conditional || ra.buildtime)
- s << " ";
+ if (ras.buildtime)
+ s << '*';
s << ~TH
<< TD
<< SPAN(CLASS="value");
- for (const auto& r: ra)
+ for (const requirement_alternative& ra: ras)
{
- if (&r != &ra[0])
+ if (&ra != &ras[0])
s << " | ";
- s << r;
+ // Should we enclose multiple requirement ids into curly braces as in
+ // the manifest? Somehow feels redundant here, since there can't be
+ // any ambiguity (requirement group version constraint is already
+ // punched into the specific requirements without constraints).
+ //
+ for (const string& r: ra)
+ {
+ if (&r != &ra[0])
+ s << ' ';
+
+ s << r;
+ }
+
+ if (ra.enable)
+ {
+ if (!ra.simple () || !ra[0].empty ())
+ s << ' ';
+
+ s << '?';
+
+ if (!ra.enable->empty ())
+ {
+ s << " (";
+
+ if (full)
+ s << *ra.enable;
+ else
+ s << "...";
+
+ s << ')';
+ }
+ }
}
s << ~SPAN
- << SPAN_COMMENT (ra.comment)
+ << SPAN_COMMENT (ras.comment)
<< ~TD
<< ~TR;
}
@@ -397,38 +449,84 @@ handle (request& rq, response& rs)
<< ~TABLE;
}
- auto print_dependencies = [&s, &print_dependency]
- (const small_vector<dependency, 1>& deps,
- const char* heading,
- const char* id)
+ // Print the test dependencies grouped by types as the separate blocks.
+ //
+ // Print test dependencies of the specific type.
+ //
+ auto print_tests = [&pkg,
+ &s,
+ &print_dependency,
+ full] (test_dependency_type dt)
{
- if (!deps.empty ())
- {
- s << H3 << heading << ~H3
- << TABLE(CLASS="proplist", ID=id)
- << TBODY;
+ string id;
- for (const dependency& d: deps)
+ bool first (true);
+ for (const test_dependency& td: pkg->tests)
+ {
+ if (td.type == dt)
{
+ // Print the table header if this is a first test dependency.
+ //
+ if (first)
+ {
+ id = to_string (dt);
+
+ // Capitalize the heading.
+ //
+ string heading (id);
+ heading[0] = ucase (id[0]);
+
+ s << H3 << heading << ~H3
+ << TABLE(CLASS="proplist", ID=id)
+ << TBODY;
+
+ first = false;
+ }
+
s << TR(CLASS=id)
+ << TH;
+
+ if (td.buildtime)
+ s << '*';
+
+ s << ~TH
<< TD
<< SPAN(CLASS="value");
- print_dependency (d);
+ print_dependency (td);
+
+ if (td.enable || td.reflect)
+ {
+ if (full)
+ {
+ if (td.enable)
+ s << " ? (" << *td.enable << ')';
+
+ if (td.reflect)
+ s << ' ' << *td.reflect;
+ }
+ else
+ s << " ...";
+ }
s << ~SPAN
<< ~TD
<< ~TR;
}
+ }
+ // Print the table closing tags if it was printed.
+ //
+ if (!first)
+ {
s << ~TBODY
<< ~TABLE;
}
};
- print_dependencies (pkg->tests, "Tests", "tests");
- print_dependencies (pkg->examples, "Examples", "examples");
- print_dependencies (pkg->benchmarks, "Benchmarks", "benchmarks");
+ print_tests (test_dependency_type::tests);
+ print_tests (test_dependency_type::examples);
+ print_tests (test_dependency_type::benchmarks);
bool builds (build_db_ != nullptr && pkg->buildable);
@@ -436,34 +534,203 @@ handle (request& rq, response& rs)
{
package_db_->load (*pkg, pkg->build_section);
- // If the package has a singe build configuration class expression with
- // exactly one underlying class and the class is none, then we just drop
- // the page builds section altogether.
+ // If all package build configurations has a singe effective build
+ // configuration class expression with exactly one underlying class and
+ // the class is none, then we just drop the page builds section
+ // altogether.
//
- if (pkg->builds.size () == 1)
+ builds = false;
+
+ for (const package_build_config& pc: pkg->build_configs)
{
- const build_class_expr& be (pkg->builds[0]);
+ const build_class_exprs& exprs (pc.effective_builds (pkg->builds));
- builds = be.underlying_classes.size () != 1 ||
- be.underlying_classes[0] != "none";
+ if (exprs.size () != 1 ||
+ exprs[0].underlying_classes.size () != 1 ||
+ exprs[0].underlying_classes[0] != "none")
+ {
+ builds = true;
+ break;
+ }
}
}
- bool archived (package_db_->load<brep::tenant> (tenant)->archived);
+ shared_ptr<brep::tenant> tn (package_db_->load<brep::tenant> (tenant));
t.commit ();
- if (builds)
+ // Display the binary distribution packages for this tenant, package, and
+ // version, if present. Print the archive distributions last.
+ //
+ if (options_->bindist_root_specified ())
{
- using bbot::build_config;
+ // Collect all the available package configurations by iterating over the
+ // <distribution> and <os-release> subdirectories and the <package-config>
+ // symlinks in the following filesystem hierarchy:
+ //
+ // [<tenant>/]<distribution>/<os-release>/<project>/<package>/<version>/<package-config>
+ //
+ // Note that it is possible that new directories and symlinks are created
+ // and/or removed while we iterate over the filesystem entries in the
+ // above hierarchy, which may result with system_error exceptions. If that
+ // happens, we just ignore such exceptions, trying to collect what we can.
+ //
+ const dir_path& br (options_->bindist_root ());
+
+ dir_path d (br);
+
+ if (!tenant.empty ())
+ d /= tenant;
+
+ // Note that distribution and os_release are simple paths and the
+ // config_symlink and config_dir are relative to the bindist root
+ // directory.
+ //
+ struct bindist_config
+ {
+ dir_path distribution; // debian, fedora, archive
+ dir_path os_release; // fedora37, windows10
+ path symlink; // .../x86_64, .../x86_64-release
+ dir_path directory; // .../x86_64-2023-05-11T10:13:43Z
+
+ bool
+ operator< (const bindist_config& v)
+ {
+ if (int r = distribution.compare (v.distribution))
+ return distribution.string () == "archive" ? false :
+ v.distribution.string () == "archive" ? true :
+ r < 0;
+
+ if (int r = os_release.compare (v.os_release))
+ return r < 0;
+
+ return symlink < v.symlink;
+ }
+ };
+
+ vector<bindist_config> configs;
+
+ if (dir_exists (d))
+ try
+ {
+ for (const dir_entry& de: dir_iterator (d, dir_iterator::ignore_dangling))
+ {
+ if (de.type () != entry_type::directory)
+ continue;
+ // Distribution directory.
+ //
+ dir_path dd (path_cast<dir_path> (de.path ()));
+
+ try
+ {
+ dir_path fdd (d / dd);
+
+ for (const dir_entry& re:
+ dir_iterator (fdd, dir_iterator::ignore_dangling))
+ {
+ if (re.type () != entry_type::directory)
+ continue;
+
+ // OS release directory.
+ //
+ dir_path rd (path_cast<dir_path> (re.path ()));
+
+ // Package version directory.
+ //
+ dir_path vd (fdd /
+ rd /
+ dir_path (pkg->project.string ()) /
+ dir_path (pn.string ()) /
+ dir_path (sver));
+
+ try
+ {
+ for (const dir_entry& ce:
+ dir_iterator (vd, dir_iterator::ignore_dangling))
+ {
+ if (ce.ltype () != entry_type::symlink)
+ continue;
+
+ // Skip the "hidden" symlinks which may potentially be used by
+ // the upload handlers until they expose the finalized upload
+ // directory.
+ //
+ const path& cl (ce.path ());
+ if (cl.string () [0] == '.')
+ continue;
+
+ try
+ {
+ path fcl (vd / cl);
+ dir_path cd (path_cast<dir_path> (followsymlink (fcl)));
+
+ if (cd.sub (br))
+ configs.push_back (
+ bindist_config {dd, rd, fcl.leaf (br), cd.leaf (br)});
+ }
+ catch (const system_error&) {}
+ }
+ }
+ catch (const system_error&) {}
+ }
+ }
+ catch (const system_error&) {}
+ }
+ }
+ catch (const system_error&) {}
+
+ // Sort and print collected package configurations, if any.
+ //
+ if (!configs.empty ())
+ {
+ sort (configs.begin (), configs.end ());
+
+ s << H3 << "Binaries" << ~H3
+ << TABLE(ID="binaries")
+ << TBODY;
+
+ for (const bindist_config& c: configs)
+ {
+ s << TR(CLASS="binaries")
+ << TD << SPAN(CLASS="value") << c.distribution << ~SPAN << ~TD
+ << TD << SPAN(CLASS="value") << c.os_release << ~SPAN << ~TD
+ << TD
+ << SPAN(CLASS="value")
+ << A
+ << HREF
+ << options_->bindist_url () << '/' << c.symlink
+ << ~HREF
+ << c.symlink.leaf ()
+ << ~A
+ << " ("
+ << A
+ << HREF
+ << options_->bindist_url () << '/' << c.directory
+ << ~HREF
+ << "snapshot"
+ << ~A
+ << ")"
+ << ~SPAN
+ << ~TD
+ << ~TR;
+ }
+
+ s << ~TBODY
+ << ~TABLE;
+ }
+ }
+
+ if (builds)
+ {
s << H3 << "Builds" << ~H3
<< DIV(ID="builds");
- auto exclude = [&pkg, this] (const build_config& cfg,
- string* reason = nullptr)
+ auto exclude = [&pkg, this] (const package_build_config& pc,
+ const build_target_config& tc,
+ string* rs = nullptr)
{
- return this->exclude (pkg->builds, pkg->build_constraints, cfg, reason);
+ return this->exclude (pc, pkg->builds, pkg->build_constraints, tc, rs);
};
timestamp now (system_clock::now ());
@@ -475,13 +742,7 @@ handle (request& rq, response& rs)
// Query toolchains seen for the package tenant to produce a list of the
// unbuilt configuration/toolchain combinations.
//
- // Note that it only make sense to print those unbuilt configurations that
- // may still be built. That's why we leave the toolchains list empty if
- // the package tenant is achieved.
- //
vector<pair<string, version>> toolchains;
-
- if (!archived)
{
using query = query<toolchain>;
@@ -492,49 +753,73 @@ handle (request& rq, response& rs)
"ORDER BY" + query::build::id.toolchain_name +
order_by_version_desc (query::build::id.toolchain_version,
false /* first */)))
+ {
toolchains.emplace_back (move (t.name), move (t.version));
+ }
}
- // Collect configuration names and unbuilt configurations, skipping those
- // that are hidden or excluded by the package.
+ // Compose the configuration filtering sub-query and collect unbuilt
+ // target configurations, skipping those that are hidden or excluded by
+ // the package configurations.
//
- cstrings conf_names;
+ using query = query<build>;
+
+ query sq (false);
set<config_toolchain> unbuilt_configs;
- for (const auto& c: *build_conf_map_)
+ for (const package_build_config& pc: pkg->build_configs)
{
- const build_config& cfg (*c.second);
-
- if (belongs (cfg, "all") && !exclude (cfg))
+ for (const auto& bc: *target_conf_map_)
{
- conf_names.push_back (c.first);
-
- // Note: we will erase built configurations from the unbuilt
- // configurations set later (see below).
- //
- for (const auto& t: toolchains)
- unbuilt_configs.insert ({cfg.name, t.first, t.second});
+ const build_target_config& tc (*bc.second);
+
+ if (!belongs (tc, "hidden") && !exclude (pc, tc))
+ {
+ const build_target_config_id& id (bc.first);
+
+ sq = sq || (query::id.target == id.target &&
+ query::id.target_config_name == id.config &&
+ query::id.package_config_name == pc.name);
+
+ // Note: we will erase built configurations from the unbuilt
+ // configurations set later (see below).
+ //
+ for (const auto& t: toolchains)
+ unbuilt_configs.insert (config_toolchain {tc.target,
+ tc.name,
+ pc.name,
+ t.first,
+ t.second});
+ }
}
}
- // Print the package built configurations in the time-descending order.
+ // Let's not print the package configuration row if the default
+ // configuration is the only one.
//
- using query = query<build>;
+ bool ppc (pkg->build_configs.size () != 1); // Note: can't be empty.
+ // Print the package built configurations in the time-descending order.
+ //
for (auto& b: build_db_->query<build> (
- (query::id.package == pkg->id &&
-
- query::id.configuration.in_range (conf_names.begin (),
- conf_names.end ())) +
-
+ (query::id.package == pkg->id && query::state != "queued" && sq) +
"ORDER BY" + query::timestamp + "DESC"))
{
string ts (butl::to_string (b.timestamp,
"%Y-%m-%d %H:%M:%S %Z",
true /* special */,
true /* local */) +
- " (" + butl::to_string (now - b.timestamp, false) + " ago)");
+ " (" + butl::to_string (now - b.timestamp, false) + " ago");
+
+ if (tn->archived)
+ ts += ", archived";
+
+ ts += ')';
+ // @@ Note that here we also load result logs which we don't need.
+ // Probably we should invent some table view to only load operation
+ // names and statuses.
+ //
if (b.state == build_state::built)
build_db_->load (b, b.results_section);
@@ -543,19 +828,29 @@ handle (request& rq, response& rs)
<< TR_VALUE ("toolchain",
b.toolchain_name + '-' +
b.toolchain_version.string ())
- << TR_VALUE ("config",
- b.configuration + " / " + b.target.string ())
- << TR_VALUE ("timestamp", ts)
- << TR_BUILD_RESULT (b, host, root)
+ << TR_VALUE ("target", b.target.string ())
+ << TR_VALUE ("tgt config", b.target_config_name);
+
+ if (ppc)
+ s << TR_VALUE ("pkg config", b.package_config_name);
+
+ s << TR_VALUE ("timestamp", ts);
+
+ if (b.interactive) // Note: can only be present for the building state.
+ s << TR_VALUE ("login", *b.interactive);
+
+ s << TR_BUILD_RESULT (b, tn->archived, host, root)
<< ~TBODY
<< ~TABLE;
// While at it, erase the built configuration from the unbuilt
// configurations set.
//
- unbuilt_configs.erase ({b.id.configuration,
- b.toolchain_name,
- b.toolchain_version});
+ unbuilt_configs.erase (config_toolchain {b.target,
+ b.target_config_name,
+ b.package_config_name,
+ b.toolchain_name,
+ b.toolchain_version});
}
// Print the package unbuilt configurations with the following sort
@@ -563,42 +858,57 @@ handle (request& rq, response& rs)
//
// 1: toolchain name
// 2: toolchain version (descending)
- // 3: configuration name
+ // 3: target
+ // 4: target configuration name
+ // 5: package configuration name
//
for (const auto& ct: unbuilt_configs)
{
- auto i (build_conf_map_->find (ct.configuration.c_str ()));
- assert (i != build_conf_map_->end ());
-
s << TABLE(CLASS="proplist build")
<< TBODY
<< TR_VALUE ("toolchain",
ct.toolchain_name + '-' +
ct.toolchain_version.string ())
- << TR_VALUE ("config",
- ct.configuration + " / " +
- i->second->target.string ())
- << TR_VALUE ("result", "unbuilt")
+ << TR_VALUE ("target", ct.target.string ())
+ << TR_VALUE ("tgt config", ct.target_config);
+
+ if (ppc)
+ s << TR_VALUE ("pkg config", ct.package_config);
+
+ s << TR_VALUE ("result", "unbuilt")
<< ~TBODY
<< ~TABLE;
}
- // Print the package build exclusions that belong to the 'default' class.
+ // Print the package build exclusions that belong to the 'default' class,
+ // unless the package is built interactively (normally for a single
+ // configuration).
//
- for (const auto& c: *build_conf_)
+ if (!tn->interactive)
{
- string reason;
- if (belongs (c, "default") && exclude (c, &reason))
+ for (const package_build_config& pc: pkg->build_configs)
{
- s << TABLE(CLASS="proplist build")
- << TBODY
- << TR_VALUE ("config", c.name + " / " + c.target.string ())
- << TR_VALUE ("result",
- !reason.empty ()
- ? "excluded (" + reason + ')'
- : "excluded")
- << ~TBODY
- << ~TABLE;
+ for (const auto& tc: *target_conf_)
+ {
+ string reason;
+ if (belongs (tc, "default") && exclude (pc, tc, &reason))
+ {
+ s << TABLE(CLASS="proplist build")
+ << TBODY
+ << TR_VALUE ("target", tc.target.string ())
+ << TR_VALUE ("tgt config", tc.name);
+
+ if (ppc)
+ s << TR_VALUE ("pkg config", pc.name);
+
+ s << TR_VALUE ("result",
+ !reason.empty ()
+ ? "excluded (" + reason + ')'
+ : "excluded")
+ << ~TBODY
+ << ~TABLE;
+ }
+ }
}
}
@@ -607,19 +917,25 @@ handle (request& rq, response& rs)
s << ~DIV;
}
- const string& ch (pkg->changes);
-
- if (!ch.empty ())
+ if (const optional<typed_text>& c = pkg->changes)
{
const string id ("changes");
+ const string what (title + " changes");
s << H3 << "Changes" << ~H3
<< (full
- ? PRE_TEXT (ch, id)
- : PRE_TEXT (ch,
+ ? DIV_TEXT (*c,
+ false /* strip_title */,
+ id,
+ what,
+ error)
+ : DIV_TEXT (*c,
+ false /* strip_title */,
options_->package_changes (),
- url (!full, "changes"),
- id));
+ url (!full, id),
+ id,
+ what,
+ error));
}
s << ~DIV
diff --git a/mod/mod-package-version-details.hxx b/mod/mod-package-version-details.hxx
index d9a5f72..a88d6c2 100644
--- a/mod/mod-package-version-details.hxx
+++ b/mod/mod-package-version-details.hxx
@@ -1,5 +1,4 @@
// file : mod/mod-package-version-details.hxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
#ifndef MOD_MOD_PACKAGE_VERSION_DETAILS_HXX
@@ -8,7 +7,7 @@
#include <libbrep/types.hxx>
#include <libbrep/utility.hxx>
-#include <mod/options.hxx>
+#include <mod/module-options.hxx>
#include <mod/database-module.hxx>
#include <mod/build-config-module.hxx>
diff --git a/mod/mod-packages.cxx b/mod/mod-packages.cxx
index 5d1945a..6026024 100644
--- a/mod/mod-packages.cxx
+++ b/mod/mod-packages.cxx
@@ -1,5 +1,4 @@
// file : mod/mod-packages.cxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
#include <mod/mod-packages.hxx>
@@ -11,15 +10,16 @@
#include <odb/transaction.hxx>
#include <odb/schema-catalog.hxx>
-#include <web/xhtml.hxx>
-#include <web/module.hxx>
-#include <web/mime-url-encoding.hxx>
+#include <web/server/module.hxx>
+#include <web/server/mime-url-encoding.hxx>
+
+#include <web/xhtml/serialization.hxx>
#include <libbrep/package.hxx>
#include <libbrep/package-odb.hxx>
#include <mod/page.hxx>
-#include <mod/options.hxx>
+#include <mod/module-options.hxx>
using namespace odb::core;
using namespace brep::cli;
@@ -49,8 +49,8 @@ init (scanner& s)
options_->root (dir_path ("/"));
// Check that the database 'package' schema matches the current one. It's
- // enough to perform the check in just a single module implementation (and we
- // don't do in the dispatcher because it doesn't use the database).
+ // enough to perform the check in just a single module implementation (and
+ // we don't do in the dispatcher because it doesn't use the database).
//
// Note that the failure can be reported by each web server worker process.
// While it could be tempting to move the check to the
@@ -136,8 +136,18 @@ handle (request& rq, response& rs)
<< DIV_HEADER (options_->logo (), options_->menu (), root, tenant)
<< DIV(ID="content");
+ // On the first page print the search page description, if specified.
+ //
+ if (page == 0)
+ {
+ const web::xhtml::fragment& desc (options_->search_description ());
+
+ if (!desc.empty ())
+ s << DIV(ID="search-description") << desc << ~DIV;
+ }
+
// If the tenant is empty then we are in the global view and will display
- // packages from all the tenants.
+ // packages from all the public tenants.
//
optional<string> tn;
if (!tenant.empty ())
@@ -146,7 +156,7 @@ handle (request& rq, response& rs)
session sn;
transaction t (package_db_->begin ());
- auto pkg_count (
+ size_t pkg_count (
package_db_->query_value<latest_package_count> (
search_param<latest_package_count> (squery, tn)));
@@ -167,11 +177,10 @@ handle (request& rq, response& rs)
s << TABLE(CLASS="proplist package")
<< TBODY
- << TR_NAME (p->name, equery, root, p->tenant)
+ << TR_NAME (p->name, root, p->tenant)
<< TR_SUMMARY (p->summary)
<< TR_LICENSE (p->license_alternatives)
- << TR_DEPENDS (p->dependencies, root, p->tenant)
- << TR_REQUIRES (p->requirements);
+ << TR_DEPENDS (p->dependencies, root, p->tenant);
// In the global view mode add the tenant packages link. Note that the
// global view (and the link) makes sense only in the multi-tenant mode.
diff --git a/mod/mod-packages.hxx b/mod/mod-packages.hxx
index a67533c..611d63c 100644
--- a/mod/mod-packages.hxx
+++ b/mod/mod-packages.hxx
@@ -1,5 +1,4 @@
// file : mod/mod-packages.hxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
#ifndef MOD_MOD_PACKAGES_HXX
@@ -8,7 +7,7 @@
#include <libbrep/types.hxx>
#include <libbrep/utility.hxx>
-#include <mod/options.hxx>
+#include <mod/module-options.hxx>
#include <mod/database-module.hxx>
namespace brep
diff --git a/mod/mod-repository-details.cxx b/mod/mod-repository-details.cxx
index 398d8a6..082903b 100644
--- a/mod/mod-repository-details.cxx
+++ b/mod/mod-repository-details.cxx
@@ -1,27 +1,25 @@
// file : mod/mod-repository-details.cxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
#include <mod/mod-repository-details.hxx>
-#include <algorithm> // max()
-
#include <libstudxml/serializer.hxx>
#include <odb/database.hxx>
#include <odb/transaction.hxx>
-#include <libbutl/timestamp.mxx> // to_string()
+#include <libbutl/timestamp.hxx> // to_string()
+
+#include <web/server/module.hxx>
+#include <web/server/mime-url-encoding.hxx>
-#include <web/xhtml.hxx>
-#include <web/module.hxx>
-#include <web/mime-url-encoding.hxx>
+#include <web/xhtml/serialization.hxx>
#include <libbrep/package.hxx>
#include <libbrep/package-odb.hxx>
#include <mod/page.hxx>
-#include <mod/options.hxx>
+#include <mod/module-options.hxx>
using namespace std;
using namespace odb::core;
@@ -100,7 +98,7 @@ handle (request& rq, response& rs)
//
string id (html_id (r.canonical_name));
s << H1(ID=id)
- << A(HREF="#" + web::mime_url_encode (id, false))
+ << A(HREF='#' + web::mime_url_encode (id, false))
<< r.display_name
<< ~A
<< ~H1;
diff --git a/mod/mod-repository-details.hxx b/mod/mod-repository-details.hxx
index dd6efc1..e83831d 100644
--- a/mod/mod-repository-details.hxx
+++ b/mod/mod-repository-details.hxx
@@ -1,5 +1,4 @@
// file : mod/mod-repository-details.hxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
#ifndef MOD_MOD_REPOSITORY_DETAILS_HXX
@@ -8,7 +7,7 @@
#include <libbrep/types.hxx>
#include <libbrep/utility.hxx>
-#include <mod/options.hxx>
+#include <mod/module-options.hxx>
#include <mod/database-module.hxx>
namespace brep
diff --git a/mod/mod-repository-root.cxx b/mod/mod-repository-root.cxx
index ed170c9..34b4007 100644
--- a/mod/mod-repository-root.cxx
+++ b/mod/mod-repository-root.cxx
@@ -1,5 +1,4 @@
// file : mod/mod-repository-root.cxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
#include <mod/mod-repository-root.hxx>
@@ -9,15 +8,15 @@
#include <cmark-gfm-core-extensions.h>
#include <sstream>
-#include <algorithm> // find()
-#include <web/module.hxx>
+#include <web/server/module.hxx>
#include <mod/module.hxx>
-#include <mod/options.hxx>
+#include <mod/module-options.hxx>
#include <mod/mod-ci.hxx>
#include <mod/mod-submit.hxx>
+#include <mod/mod-upload.hxx>
#include <mod/mod-builds.hxx>
#include <mod/mod-packages.hxx>
#include <mod/mod-build-log.hxx>
@@ -109,24 +108,42 @@ namespace brep
//
repository_root::
repository_root ()
- : packages_ (make_shared<packages> ()),
+ :
+ //
+ // Only create and populate the tenant service map in the examplar
+ // passing a reference to it to all the sub-handler exemplars. Note
+ // that we dispatch the tenant service callbacks to the examplar
+ // without creating a new instance for each callback (thus the
+ // callbacks are const).
+ //
+ tenant_service_map_ (make_shared<tenant_service_map> ()),
+ packages_ (make_shared<packages> ()),
package_details_ (make_shared<package_details> ()),
package_version_details_ (make_shared<package_version_details> ()),
repository_details_ (make_shared<repository_details> ()),
- build_task_ (make_shared<build_task> ()),
- build_result_ (make_shared<build_result> ()),
- build_force_ (make_shared<build_force> ()),
+ build_task_ (make_shared<build_task> (*tenant_service_map_)),
+ build_result_ (make_shared<build_result> (*tenant_service_map_)),
+ build_force_ (make_shared<build_force> (*tenant_service_map_)),
build_log_ (make_shared<build_log> ()),
builds_ (make_shared<builds> ()),
build_configs_ (make_shared<build_configs> ()),
submit_ (make_shared<submit> ()),
- ci_ (make_shared<ci> ())
+#ifdef BREP_CI_TENANT_SERVICE
+ ci_ (make_shared<ci> (*tenant_service_map_)),
+#else
+ ci_ (make_shared<ci> ()),
+#endif
+ upload_ (make_shared<upload> ())
{
}
repository_root::
repository_root (const repository_root& r)
: handler (r),
+ tenant_service_map_ (
+ r.initialized_
+ ? r.tenant_service_map_
+ : make_shared<tenant_service_map> ()),
//
// Deep/shallow-copy sub-handlers depending on whether this is an
// exemplar/handler.
@@ -151,15 +168,15 @@ namespace brep
build_task_ (
r.initialized_
? r.build_task_
- : make_shared<build_task> (*r.build_task_)),
+ : make_shared<build_task> (*r.build_task_, *tenant_service_map_)),
build_result_ (
r.initialized_
? r.build_result_
- : make_shared<build_result> (*r.build_result_)),
+ : make_shared<build_result> (*r.build_result_, *tenant_service_map_)),
build_force_ (
r.initialized_
? r.build_force_
- : make_shared<build_force> (*r.build_force_)),
+ : make_shared<build_force> (*r.build_force_, *tenant_service_map_)),
build_log_ (
r.initialized_
? r.build_log_
@@ -179,7 +196,15 @@ namespace brep
ci_ (
r.initialized_
? r.ci_
+#ifdef BREP_CI_TENANT_SERVICE
+ : make_shared<ci> (*r.ci_, *tenant_service_map_)),
+#else
: make_shared<ci> (*r.ci_)),
+#endif
+ upload_ (
+ r.initialized_
+ ? r.upload_
+ : make_shared<upload> (*r.upload_)),
options_ (
r.initialized_
? r.options_
@@ -206,6 +231,7 @@ namespace brep
append (r, build_configs_->options ());
append (r, submit_->options ());
append (r, ci_->options ());
+ append (r, upload_->options ());
return r;
}
@@ -251,6 +277,7 @@ namespace brep
sub_init (*build_configs_, "build_configs");
sub_init (*submit_, "submit");
sub_init (*ci_, "ci");
+ sub_init (*upload_, "upload");
// Parse own configuration options.
//
@@ -446,6 +473,13 @@ namespace brep
return handle ("ci", param);
}
+ else if (func == "upload")
+ {
+ if (handler_ == nullptr)
+ handler_.reset (new upload (*upload_));
+
+ return handle ("upload", param);
+ }
else
return nullopt;
};
diff --git a/mod/mod-repository-root.hxx b/mod/mod-repository-root.hxx
index 20ec0b6..aa60fda 100644
--- a/mod/mod-repository-root.hxx
+++ b/mod/mod-repository-root.hxx
@@ -1,5 +1,4 @@
// file : mod/mod-repository-root.hxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
#ifndef MOD_MOD_REPOSITORY_ROOT_HXX
@@ -9,7 +8,8 @@
#include <libbrep/utility.hxx>
#include <mod/module.hxx>
-#include <mod/options.hxx>
+#include <mod/module-options.hxx>
+#include <mod/tenant-service.hxx>
namespace brep
{
@@ -25,6 +25,7 @@ namespace brep
class build_configs;
class submit;
class ci;
+ class upload;
class repository_root: public handler
{
@@ -59,6 +60,8 @@ namespace brep
version ();
private:
+ shared_ptr<tenant_service_map> tenant_service_map_;
+
shared_ptr<packages> packages_;
shared_ptr<package_details> package_details_;
shared_ptr<package_version_details> package_version_details_;
@@ -71,6 +74,7 @@ namespace brep
shared_ptr<build_configs> build_configs_;
shared_ptr<submit> submit_;
shared_ptr<ci> ci_;
+ shared_ptr<upload> upload_;
shared_ptr<options::repository_root> options_;
diff --git a/mod/mod-submit.cxx b/mod/mod-submit.cxx
index 3130823..5ee358a 100644
--- a/mod/mod-submit.cxx
+++ b/mod/mod-submit.cxx
@@ -1,25 +1,25 @@
// file : mod/mod-submit.cxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
#include <mod/mod-submit.hxx>
#include <ostream>
-#include <libbutl/sha256.mxx>
-#include <libbutl/sendmail.mxx>
-#include <libbutl/fdstream.mxx>
-#include <libbutl/timestamp.mxx>
-#include <libbutl/filesystem.mxx>
-#include <libbutl/process-io.mxx> // operator<<(ostream, process_args)
-#include <libbutl/manifest-types.mxx>
-#include <libbutl/manifest-serializer.mxx>
+#include <libbutl/sha256.hxx>
+#include <libbutl/sendmail.hxx>
+#include <libbutl/fdstream.hxx>
+#include <libbutl/timestamp.hxx>
+#include <libbutl/filesystem.hxx>
+#include <libbutl/process-io.hxx> // operator<<(ostream, process_args)
+#include <libbutl/manifest-types.hxx>
+#include <libbutl/manifest-serializer.hxx>
-#include <web/xhtml.hxx>
-#include <web/module.hxx>
+#include <web/server/module.hxx>
+
+#include <web/xhtml/serialization.hxx>
#include <mod/page.hxx>
-#include <mod/options.hxx>
+#include <mod/module-options.hxx>
#include <mod/external-handler.hxx>
using namespace std;
@@ -163,7 +163,7 @@ handle (request& rq, response& rs)
if (!options_->submit_data_specified ())
return respond_manifest (404, "submission disabled");
- // Parse the request form data and verifying the submission size limit.
+ // Parse the request form data and verify the submission size limit.
//
// Note that if it is exceeded, then there are parameters and this is the
// submission rather than the form request, and so we respond with the
@@ -254,24 +254,17 @@ handle (request& rq, response& rs)
return respond_manifest (400, "invalid package archive checksum");
// Verify that unknown parameter values satisfy the requirements (contain
- // only ASCII printable characters plus '\r', '\n', and '\t').
+ // only UTF-8 encoded graphic characters plus '\t', '\r', and '\n').
//
// Actually, the expected ones must satisfy too, so check them as well.
//
- auto printable = [] (const string& s) -> bool
- {
- for (char c: s)
- {
- if (!((c >= 0x20 && c <= 0x7E) || c == '\n' || c == '\r' || c == '\t'))
- return false;
- }
- return true;
- };
-
+ string what;
for (const name_value& nv: rps)
{
- if (nv.value && !printable (*nv.value))
- return respond_manifest (400, "invalid parameter " + nv.name);
+ if (nv.value &&
+ !utf8 (*nv.value, what, codepoint_types::graphic, U"\n\r\t"))
+ return respond_manifest (400,
+ "invalid parameter " + nv.name + ": " + what);
}
// Note that from now on the result manifest we respond with will contain
@@ -299,8 +292,8 @@ handle (request& rq, response& rs)
// However, using the abbreviated checksum can be helpful for
// troubleshooting.
//
- td = dir_path (options_->submit_temp () /
- dir_path (path::traits_type::temp_name (ref)));
+ td = options_->submit_temp () /
+ dir_path (path::traits_type::temp_name (ref));
// It's highly unlikely but still possible that the temporary directory
// already exists. This can only happen due to the unclean web server
@@ -560,7 +553,7 @@ handle (request& rq, response& rs)
// Run the submission handler, if specified, reading the result manifest
// from its stdout and caching it as a name/value pair list for later use
- // (forwarding to the client, sending via email, etc.). Otherwise, create
+ // (forwarding to the client, sending via email, etc). Otherwise, create
// implied result manifest.
//
status_code sc;
@@ -690,7 +683,7 @@ handle (request& rq, response& rs)
sendmail sm (print_args,
2 /* stderr */,
options_->email (),
- "new package submission " + a.string () + " (" + ref + ")",
+ "new package submission " + a.string () + " (" + ref + ')',
{options_->submit_email ()});
// Write the submission request manifest.
diff --git a/mod/mod-submit.hxx b/mod/mod-submit.hxx
index 891f8a6..fc5f8d4 100644
--- a/mod/mod-submit.hxx
+++ b/mod/mod-submit.hxx
@@ -1,17 +1,16 @@
// file : mod/mod-submit.hxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
#ifndef MOD_MOD_SUBMIT_HXX
#define MOD_MOD_SUBMIT_HXX
-#include <web/xhtml-fragment.hxx>
+#include <web/xhtml/fragment.hxx>
#include <libbrep/types.hxx>
#include <libbrep/utility.hxx>
#include <mod/module.hxx>
-#include <mod/options.hxx>
+#include <mod/module-options.hxx>
namespace brep
{
diff --git a/mod/mod-upload.cxx b/mod/mod-upload.cxx
new file mode 100644
index 0000000..9f8b9de
--- /dev/null
+++ b/mod/mod-upload.cxx
@@ -0,0 +1,763 @@
+// file : mod/mod-upload.cxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#include <mod/mod-upload.hxx>
+
+#include <odb/database.hxx>
+#include <odb/transaction.hxx>
+
+#include <libbutl/uuid.hxx>
+#include <libbutl/base64.hxx>
+#include <libbutl/sha256.hxx>
+#include <libbutl/sendmail.hxx>
+#include <libbutl/fdstream.hxx>
+#include <libbutl/timestamp.hxx>
+#include <libbutl/filesystem.hxx>
+#include <libbutl/process-io.hxx> // operator<<(ostream, process_args)
+#include <libbutl/manifest-types.hxx>
+#include <libbutl/manifest-serializer.hxx>
+
+#include <web/server/module.hxx>
+
+#include <libbrep/build.hxx>
+#include <libbrep/build-odb.hxx>
+#include <libbrep/build-package.hxx>
+#include <libbrep/build-package-odb.hxx>
+
+#include <mod/module-options.hxx>
+#include <mod/external-handler.hxx>
+
+using namespace std;
+using namespace butl;
+using namespace brep::cli;
+using namespace odb::core;
+
+// While currently the user-defined copy constructor is not required (we don't
+// need to deep copy nullptr's), it is a good idea to keep the placeholder
+// ready for less trivial cases.
+//
+brep::upload::
+upload (const upload& r)
+ : build_result_module (r),
+ options_ (r.initialized_ ? r.options_ : nullptr)
+{
+}
+
+void brep::upload::
+init (scanner& s)
+{
+ HANDLER_DIAG;
+
+ options_ = make_shared<options::upload> (
+ s, unknown_mode::fail, unknown_mode::fail);
+
+ // Verify that the upload handling is setup properly, if configured.
+ //
+ for (const auto& ud: options_->upload_data ())
+ {
+ const string& t (ud.first);
+
+ if (t.empty ())
+ fail << "empty upload type in upload-data configuration option";
+
+ if (ud.second.relative ())
+ fail << t << " upload-data path '" << ud.second << "' is relative";
+
+ if (!dir_exists (ud.second))
+ fail << t << " upload-data directory '" << ud.second
+ << "' does not exist";
+
+ const map<string, path>& uh (options_->upload_handler ());
+ auto i (uh.find (t));
+
+ if (i != uh.end () && i->second.relative ())
+ fail << t << " upload-handler path '" << i->second << "' is relative";
+ }
+
+ if (options_->upload_data_specified ())
+ {
+ if (!options_->build_config_specified ())
+ fail << "upload functionality is enabled but package building "
+ << "functionality is disabled";
+
+ build_result_module::init (*options_, *options_);
+ }
+}
+
+bool brep::upload::
+handle (request& rq, response& rs)
+{
+ using brep::version; // Not to confuse with module::version.
+
+ using serializer = manifest_serializer;
+ using serialization = manifest_serialization;
+
+ HANDLER_DIAG;
+
+ // We will respond with the manifest to the upload protocol violations and
+ // with a plain text message on the internal errors. In the latter case we
+ // will always respond with the same neutral message for security reason,
+ // logging the error details. Note that descriptions of exceptions caught by
+ // the web server are returned to the client (see web/module.hxx for
+ // details), and we want to avoid this when there is a danger of exposing
+ // sensitive data.
+ //
+ // Also we will pass through exceptions thrown by the underlying API, unless
+ // we need to handle them or add details for the description, in which case
+ // we will fallback to one of the above mentioned response methods.
+ //
+ // Note that both respond_manifest() and respond_error() are normally called
+ // right before the end of the request handling. They both always return
+ // true to allow bailing out with a single line, for example:
+ //
+ // return respond_error (); // Request is handled with an error.
+ //
+ string request_id; // Will be set later.
+ auto respond_manifest = [&rs, &request_id] (status_code status,
+ const string& message) -> bool
+ {
+ serializer s (rs.content (status, "text/manifest;charset=utf-8"),
+ "response");
+
+ s.next ("", "1"); // Start of manifest.
+ s.next ("status", to_string (status));
+ s.next ("message", message);
+
+ if (!request_id.empty ())
+ s.next ("reference", request_id);
+
+ s.next ("", ""); // End of manifest.
+ return true;
+ };
+
+ auto respond_error = [&rs] (status_code status = 500) -> bool
+ {
+ rs.content (status, "text/plain;charset=utf-8")
+ << "upload handling failed" << endl;
+
+ return true;
+ };
+
+ // Check if the upload functionality is enabled.
+ //
+ // Note that this is not an upload protocol violation but it feels right to
+ // respond with the manifest, to help the client a bit.
+ //
+ if (!options_->upload_data_specified ())
+ return respond_manifest (404, "upload disabled");
+
+ // Parse the request data and verify the upload size limit.
+ //
+ // Note that the size limit is upload type-specific. Thus, first, we need to
+ // determine the upload type which we expect to be specified in the URL as a
+ // value of the upload parameter.
+ //
+ string type;
+ dir_path dir;
+
+ try
+ {
+ name_value_scanner s (rq.parameters (0 /* limit */, true /* url_only */));
+
+ // We only expect the upload=<type> parameter in URL.
+ //
+ params::upload params (
+ params::upload (s, unknown_mode::fail, unknown_mode::fail));
+
+ type = move (params.type ());
+
+ if (type.empty ())
+ return respond_manifest (400, "upload type expected");
+
+ // Check if this upload type is enabled. While at it, cache the upload
+ // data directory path.
+ //
+ const map<string, dir_path>& ud (options_->upload_data ());
+ auto i (ud.find (type));
+
+ if (i == ud.end ())
+ return respond_manifest (404, type + " upload disabled");
+
+ dir = i->second;
+ }
+ catch (const cli::exception&)
+ {
+ return respond_manifest (400, "invalid parameter");
+ }
+
+ try
+ {
+ const map<string, size_t>& us (options_->upload_max_size ());
+ auto i (us.find (type));
+ rq.parameters (i != us.end () ? i->second : 10485760); // 10M by default.
+ }
+ catch (const invalid_request& e)
+ {
+ if (e.status == 413) // Payload too large?
+ return respond_manifest (e.status, type + " upload size exceeds limit");
+
+ throw;
+ }
+
+ // The request parameters are now parsed and the limit doesn't really matter.
+ //
+ const name_values& rps (rq.parameters (0 /* limit */));
+
+ // Verify the upload parameters we expect. The unknown ones will be
+ // serialized to the upload manifest.
+ //
+ params::upload params;
+
+ try
+ {
+ name_value_scanner s (rps);
+ params = params::upload (s, unknown_mode::skip, unknown_mode::skip);
+ }
+ catch (const cli::exception&)
+ {
+ return respond_manifest (400, "invalid parameter");
+ }
+
+ const string& session (params.session ());
+ const string& instance (params.instance ());
+ const string& archive (params.archive ());
+ const string& sha256sum (params.sha256sum ());
+
+ if (session.empty ())
+ return respond_manifest (400, "upload session expected");
+
+ optional<vector<char>> challenge;
+
+ if (params.challenge_specified ())
+ try
+ {
+ challenge = base64_decode (params.challenge ());
+ }
+ catch (const invalid_argument&)
+ {
+ return respond_manifest (400, "invalid challenge");
+ }
+
+ if (instance.empty ())
+ return respond_manifest (400, "upload instance expected");
+
+ if (archive.empty ())
+ return respond_manifest (400, "upload archive expected");
+
+ if (sha256sum.empty ())
+ return respond_manifest (400, "upload archive checksum expected");
+
+ if (sha256sum.size () != 64)
+ return respond_manifest (400, "invalid upload archive checksum");
+
+ // Verify that unknown parameter values satisfy the requirements (contain
+ // only UTF-8 encoded graphic characters plus '\t', '\r', and '\n').
+ //
+ // Actually, the expected ones must satisfy too, so check them as well.
+ //
+ string what;
+ for (const name_value& nv: rps)
+ {
+ if (nv.value &&
+ !utf8 (*nv.value, what, codepoint_types::graphic, U"\n\r\t"))
+ return respond_manifest (400,
+ "invalid parameter " + nv.name + ": " + what);
+ }
+
+ parse_session_result sess;
+
+ try
+ {
+ sess = parse_session (session);
+ }
+ catch (const invalid_argument& e)
+ {
+ return respond_manifest (400, string ("invalid session: ") + e.what ());
+ }
+
+ // If the session expired (no such configuration, etc) then, similar to the
+ // build result module, we log this case with the warning severity and
+ // respond with manifest with the 200 status as if the session is valid (see
+ // the build result module for the reasoning).
+ //
+ auto warn_expired = [&session, &warn] (const string& d)
+ {
+ warn << "session '" << session << "' expired: " << d;
+ };
+
+ const build_id& id (sess.id);
+
+ // Make sure the build configuration still exists.
+ //
+ const build_target_config* tc;
+ {
+ auto i (target_conf_map_->find (
+ build_target_config_id {id.target, id.target_config_name}));
+
+ if (i == target_conf_map_->end ())
+ {
+ warn_expired ("no build configuration");
+ return respond_manifest (200, type + " upload is queued");
+ }
+
+ tc = i->second;
+ }
+
+ // Note that if the session authentication fails (probably due to the
+ // authentication settings change), then we log this case with the warning
+ // severity and respond with manifest with the 200 status as if the
+ // challenge is valid (see the build result module for the reasoning).
+ //
+ shared_ptr<build> bld;
+ shared_ptr<build_package> pkg;
+ shared_ptr<build_repository> rep;
+ {
+ transaction t (build_db_->begin ());
+
+ package_build pb;
+ shared_ptr<build> b;
+ if (!build_db_->query_one<package_build> (
+ query<package_build>::build::id == id, pb))
+ {
+ warn_expired ("no package build");
+ }
+ else if ((b = move (pb.build))->state != build_state::building)
+ {
+ warn_expired ("package configuration state is " + to_string (b->state));
+ }
+ else if (b->timestamp != sess.timestamp)
+ {
+ warn_expired ("non-matching timestamp");
+ }
+ else if (authenticate_session (*options_, challenge, *b, session))
+ {
+ bld = move (b);
+ pkg = build_db_->load<build_package> (id.package);
+ rep = pkg->internal_repository.load ();
+ }
+
+ t.commit ();
+ }
+
+ // Note that from now on the result manifest we respond with will contain
+ // the reference value.
+ //
+ try
+ {
+ request_id = uuid::generate ().string ();
+ }
+ catch (const system_error& e)
+ {
+ error << "unable to generate request id: " << e;
+ return respond_error ();
+ }
+
+ if (bld == nullptr)
+ return respond_manifest (200, type + " upload is queued");
+
+ // Create the upload data directory.
+ //
+ dir_path dd (dir / dir_path (request_id));
+
+ try
+ {
+ // It's highly unlikely but still possible that the directory already
+ // exists. This can only happen if the generated uuid is not unique.
+ //
+ if (try_mkdir (dd) == mkdir_status::already_exists)
+ throw_generic_error (EEXIST);
+ }
+ catch (const system_error& e)
+ {
+ error << "unable to create directory '" << dd << "': " << e;
+ return respond_error ();
+ }
+
+ auto_rmdir ddr (dd);
+
+ // Save the package archive into the temporary directory and verify its
+ // checksum.
+ //
+ // Note that the archive file name can potentially contain directory path in
+ // the POSIX form, so let's strip it if that's the case.
+ //
+ path a;
+ path af;
+
+ try
+ {
+ size_t n (archive.find_last_of ('/'));
+ a = path (n != string::npos ? string (archive, n + 1) : archive);
+ af = dd / a;
+ }
+ catch (const invalid_path&)
+ {
+ return respond_manifest (400, "invalid package archive name");
+ }
+
+ try
+ {
+ istream& is (rq.open_upload ("archive"));
+
+ // Note that istream::read() sets failbit if unable to read the requested
+ // number of bytes.
+ //
+ is.exceptions (istream::badbit);
+
+ sha256 sha;
+ char buf[8192];
+ ofdstream os (af, fdopen_mode::binary);
+
+ while (!eof (is))
+ {
+ is.read (buf, sizeof (buf));
+
+ if (size_t n = is.gcount ())
+ {
+ sha.append (buf, n);
+ os.write (buf, n);
+ }
+ }
+
+ os.close ();
+
+ // Respond with the unprocessable entity (422) code for the archive
+ // checksum mismatch.
+ //
+ if (sha.string () != sha256sum)
+ return respond_manifest (422, "upload archive checksum mismatch");
+ }
+ // Note that invalid_argument (thrown by open_upload() function call) can
+ // mean both no archive upload or multiple archive uploads.
+ //
+ catch (const invalid_argument&)
+ {
+ return respond_manifest (400, "archive upload expected");
+ }
+ catch (const io_error& e)
+ {
+ error << "unable to write package archive '" << af << "': " << e;
+ return respond_error ();
+ }
+
+ // Serialize the upload request manifest to a stream. On the serialization
+ // error respond to the client with the manifest containing the bad request
+ // (400) code and return false, on the stream error pass through the
+ // io_error exception, otherwise return true.
+ //
+ timestamp ts (system_clock::now ());
+
+ auto rqm = [&request_id,
+ &ts,
+ &rps,
+ &session,
+ &instance,
+ &a,
+ &sha256sum,
+ &id,
+ &bld,
+ &pkg,
+ &rep,
+ &tc,
+ &sess,
+ &respond_manifest,
+ this] (ostream& os, bool long_lines = false) -> bool
+ {
+ try
+ {
+ serializer s (os, "request", long_lines);
+
+ // Serialize the upload manifest header.
+ //
+ s.next ("", "1"); // Start of manifest.
+ s.next ("id", request_id);
+ s.next ("session", session);
+ s.next ("instance", instance);
+ s.next ("archive", a.string ());
+ s.next ("sha256sum", sha256sum);
+
+ s.next ("timestamp",
+ butl::to_string (ts,
+ "%Y-%m-%dT%H:%M:%SZ",
+ false /* special */,
+ false /* local */));
+
+ s.next ("name", id.package.name.string ());
+ s.next ("version", pkg->version.string ());
+ s.next ("project", pkg->project.string ());
+ s.next ("target-config", tc->name);
+ s.next ("package-config", id.package_config_name);
+ s.next ("target", tc->target.string ());
+
+ if (!tenant.empty ())
+ s.next ("tenant", tenant);
+
+ s.next ("toolchain-name", id.toolchain_name);
+ s.next ("toolchain-version", sess.toolchain_version.string ());
+ s.next ("repository-name", rep->canonical_name);
+
+ s.next ("machine-name", bld->machine.name);
+ s.next ("machine-summary", bld->machine.summary);
+
+ // Serialize the request parameters.
+ //
+ // Note that the serializer constraints the parameter names (can't start
+ // with '#', can't contain ':' and the whitespaces, etc.).
+ //
+ for (const name_value& nv: rps)
+ {
+ // Note that the upload parameter is renamed to '_' by the root
+ // handler (see the request_proxy class for details).
+ //
+ const string& n (nv.name);
+ if (n != "_" &&
+ n != "session" &&
+ n != "challenge" &&
+ n != "instance" &&
+ n != "archive" &&
+ n != "sha256sum")
+ s.next (n, nv.value ? *nv.value : "");
+ }
+
+ s.next ("", ""); // End of manifest.
+ return true;
+ }
+ catch (const serialization& e)
+ {
+ respond_manifest (400, string ("invalid parameter: ") + e.what ());
+ return false;
+ }
+ };
+
+ // Serialize the upload request manifest to the upload directory.
+ //
+ path rqf (dd / "request.manifest");
+
+ try
+ {
+ ofdstream os (rqf);
+ bool r (rqm (os));
+ os.close ();
+
+ if (!r)
+ return true; // The client is already responded with the manifest.
+ }
+ catch (const io_error& e)
+ {
+ error << "unable to write to '" << rqf << "': " << e;
+ return respond_error ();
+ }
+
+ // Given that the upload data is now successfully persisted we are no longer
+ // in charge of removing it, except for the cases when the upload
+ // handler terminates with an error (see below for details).
+ //
+ ddr.cancel ();
+
+ // If the handler terminates with non-zero exit status or specifies 5XX
+ // (HTTP server error) upload result manifest status value, then we stash
+ // the upload data directory for troubleshooting. Otherwise, if it's the 4XX
+ // (HTTP client error) status value, then we remove the directory.
+ //
+ auto stash_upload_dir = [&dd, error] ()
+ {
+ if (dir_exists (dd))
+ try
+ {
+ mvdir (dd, dir_path (dd + ".fail"));
+ }
+ catch (const system_error& e)
+ {
+ // Not much we can do here. Let's just log the issue and bail out
+ // leaving the directory in place.
+ //
+ error << "unable to rename directory '" << dd << "': " << e;
+ }
+ };
+
+ // Run the upload handler, if specified, reading the result manifest from
+ // its stdout and caching it as a name/value pair list for later use
+ // (forwarding to the client, sending via email, etc). Otherwise, create
+ // implied result manifest.
+ //
+ status_code sc;
+ vector<manifest_name_value> rvs;
+
+ const map<string, path>& uh (options_->upload_handler ());
+ auto hi (uh.find (type));
+
+ if (hi != uh.end ())
+ {
+ auto range (options_->upload_handler_argument ().equal_range (type));
+
+ strings args;
+ for (auto i (range.first); i != range.second; ++i)
+ args.push_back (i->second);
+
+ const map<string, size_t>& ht (options_->upload_handler_timeout ());
+ auto i (ht.find (type));
+
+ optional<external_handler::result_manifest> r (
+ external_handler::run (hi->second,
+ args,
+ dd,
+ i != ht.end () ? i->second : 0,
+ error,
+ warn,
+ verb_ ? &trace : nullptr));
+
+ if (!r)
+ {
+ stash_upload_dir ();
+ return respond_error (); // The diagnostics is already issued.
+ }
+
+ sc = r->status;
+ rvs = move (r->values);
+ }
+ else // Create the implied result manifest.
+ {
+ sc = 200;
+
+ auto add = [&rvs] (string n, string v)
+ {
+ manifest_name_value nv {
+ move (n), move (v),
+ 0 /* name_line */, 0 /* name_column */,
+ 0 /* value_line */, 0 /* value_column */,
+ 0 /* start_pos */, 0 /* colon_pos */, 0 /* end_pos */};
+
+ rvs.emplace_back (move (nv));
+ };
+
+ add ("status", "200");
+ add ("message", type + " upload is queued");
+ add ("reference", request_id);
+ }
+
+ assert (!rvs.empty ()); // Produced by the handler or is implied.
+
+ // Serialize the upload result manifest to a stream. On the serialization
+ // error log the error description and return false, on the stream error
+ // pass through the io_error exception, otherwise return true.
+ //
+ auto rsm = [&rvs,
+ &error,
+ &request_id,
+ &type] (ostream& os, bool long_lines = false) -> bool
+ {
+ try
+ {
+ serializer s (os, "result", long_lines);
+ serialize_manifest (s, rvs);
+ return true;
+ }
+ catch (const serialization& e)
+ {
+ error << "ref " << request_id << ": unable to serialize " << type
+ << " upload handler's output: " << e;
+ return false;
+ }
+ };
+
+ // If the upload data directory still exists then perform an appropriate
+ // action on it, depending on the upload result status. Note that the
+ // handler could move or remove the directory.
+ //
+ if (dir_exists (dd))
+ {
+ // Remove the directory if the client error is detected.
+ //
+ if (sc >= 400 && sc < 500)
+ {
+ rmdir_r (dd);
+ }
+ //
+ // Otherwise, save the result manifest, into the directory. Also stash the
+ // directory for troubleshooting in case of the server error.
+ //
+ else
+ {
+ path rsf (dd / "result.manifest");
+
+ try
+ {
+ ofdstream os (rsf);
+
+ // Not being able to stash the result manifest is not a reason to
+ // claim the upload failed. The error is logged nevertheless.
+ //
+ rsm (os);
+
+ os.close ();
+ }
+ catch (const io_error& e)
+ {
+ // Not fatal (see above).
+ //
+ error << "unable to write to '" << rsf << "': " << e;
+ }
+
+ if (sc >= 500 && sc < 600)
+ stash_upload_dir ();
+ }
+ }
+
+ // Send email, if configured. Use the long lines manifest serialization mode
+ // for the convenience of copying/clicking URLs they contain.
+ //
+ // Note that we don't consider the email sending failure to be an upload
+ // failure as the upload data is successfully persisted and the handler is
+ // successfully executed, if configured. One can argue that email can be
+ // essential for the upload processing and missing it would result in the
+ // incomplete upload. In this case it's natural to assume that the web
+ // server error log is monitored and the email sending failure will be
+ // noticed.
+ //
+ const map<string, string>& ue (options_->upload_email ());
+ auto ei (ue.find (type));
+
+ if (ei != ue.end ())
+ try
+ {
+ // Redirect the diagnostics to the web server error log.
+ //
+ sendmail sm ([&trace, this] (const char* args[], size_t n)
+ {
+ l2 ([&]{trace << process_args {args, n};});
+ },
+ 2 /* stderr */,
+ options_->email (),
+ type + " upload (" + request_id + ')',
+ {ei->second});
+
+ // Write the upload request manifest.
+ //
+ bool r (rqm (sm.out, true /* long_lines */));
+ assert (r); // The serialization succeeded once, so can't fail now.
+
+ // Write the upload result manifest.
+ //
+ sm.out << "\n\n";
+
+ // We don't care about the result (see above).
+ //
+ rsm (sm.out, true /* long_lines */);
+
+ sm.out.close ();
+
+ if (!sm.wait ())
+ error << "sendmail " << *sm.exit;
+ }
+ // Handle process_error and io_error (both derive from system_error).
+ //
+ catch (const system_error& e)
+ {
+ error << "sendmail error: " << e;
+ }
+
+ if (!rsm (rs.content (sc, "text/manifest;charset=utf-8")))
+ return respond_error (); // The error description is already logged.
+
+ return true;
+}
diff --git a/mod/mod-upload.hxx b/mod/mod-upload.hxx
new file mode 100644
index 0000000..6cc723b
--- /dev/null
+++ b/mod/mod-upload.hxx
@@ -0,0 +1,41 @@
+// file : mod/mod-upload.hxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#ifndef MOD_MOD_UPLOAD_HXX
+#define MOD_MOD_UPLOAD_HXX
+
+#include <libbrep/types.hxx>
+#include <libbrep/utility.hxx>
+
+#include <mod/module-options.hxx>
+#include <mod/build-result-module.hxx>
+
+namespace brep
+{
+ class upload: public build_result_module
+ {
+ public:
+ upload () = default;
+
+ // Create a shallow copy (handling instance) if initialized and a deep
+ // copy (context exemplar) otherwise.
+ //
+ explicit
+ upload (const upload&);
+
+ virtual bool
+ handle (request&, response&);
+
+ virtual const cli::options&
+ cli_options () const {return options::upload::description ();}
+
+ private:
+ virtual void
+ init (cli::scanner&);
+
+ private:
+ shared_ptr<options::upload> options_;
+ };
+}
+
+#endif // MOD_MOD_UPLOAD_HXX
diff --git a/mod/options.cli b/mod/module.cli
index 4004e79..a107ffe 100644
--- a/mod/options.cli
+++ b/mod/module.cli
@@ -1,10 +1,13 @@
// file : mod/options.cli -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
+include <map>;
+include <regex>;
+
include <libbpkg/manifest.hxx>; // repository_location
+include <libbbot/manifest.hxx>; // interactive_mode
-include <web/xhtml-fragment.hxx>;
+include <web/xhtml/fragment.hxx>;
include <libbrep/types.hxx>;
@@ -18,7 +21,7 @@ namespace brep
{
// Option groups.
//
- class handler
+ class repository_email
{
string email
{
@@ -26,7 +29,10 @@ namespace brep
"Repository email. This email is used for the \cb{From:} header in
emails send by \cb{brep} (for example, build failure notifications)."
}
+ };
+ class repository_url
+ {
string host
{
"<host>",
@@ -38,14 +44,36 @@ namespace brep
dir_path root = "/"
{
- "<path>"
+ "<path>",
"Repository root. That is, this is the part of the URL between the
host name and the start of the repository. For example, root value
'\cb{/pkg}' means the repository URL is \cb{http://example.org/pkg/}.
Specify '\cb{/}' to use the web server root
(\cb{http://example.org/})."
}
+ };
+
+ class build_email_notification: repository_email, repository_url
+ {
+ std::map<string, build_email> build-toolchain-email
+ {
+ "<name>=<mode>",
+ "Enable or disable package build notification emails. The valid <mode>
+ values are \cb{none}, \cb{latest}, and \cb{all}. If \cb{all} is
+ specified for a toolchain name, then emails are sent according to the
+ \cb{build-*email} package manifest values when all versions of a
+ package are built with this toolchain. If \cb{latest} is specified,
+ then for this toolchain name the emails are only sent for the latest
+ version of a package. If \cb{none} is specified, then no emails are
+ sent for this toolchain name. By default the \cb{latest} mode is
+ assumed. Repeat this option to enable/disable emails for multiple
+ toolchains. See \l{bpkg#manifest-package Package Manifest} for
+ details on \cb{build-*email} values."
+ }
+ };
+ class handler
+ {
string tenant-name = "tenant"
{
"<name>",
@@ -103,14 +131,14 @@ namespace brep
{
"<user>",
"Package database login user name. If not specified, then operating
- system (login) name is used. See also \c{package-db-role}."
+ system (login) name is used. See also \cb{package-db-role}."
}
string package-db-role = "brep"
{
"<user>",
"Package database execution user name. If not empty then the login
- user will be switched (with \c{SET ROLE}) to this user prior to
+ user will be switched (with \cb{SET ROLE}) to this user prior to
executing any statements. If not specified, then \cb{brep} is used."
}
@@ -194,11 +222,96 @@ namespace brep
be specified in seconds. Default is 10 minutes."
}
- size_t build-normal-rebuild-timeout = 86400
+ size_t build-soft-rebuild-timeout = 86400
+ {
+ "<seconds>",
+ "Time to wait before considering a package for a soft rebuild (only to
+ be performed if the build environment or any of the package
+ dependencies have changed). Must be specified in seconds. The special
+ zero value disables soft rebuilds. Default is 24 hours"
+ }
+
+ size_t build-alt-soft-rebuild-timeout
+ {
+ "<seconds>",
+ "Alternative package soft rebuild timeout to use instead of the soft
+ rebuild timeout (see \cb{build-soft-rebuild-timeout} for details)
+ during the time interval specified with the
+ \cb{build-alt-soft-rebuild-start} and
+ \cb{build-alt-soft-rebuild-stop} options. Must be specified in
+ seconds. Default is the time interval length plus
+ \c{(\b{build-soft-rebuild-timeout} - 24h)} if soft rebuild timeout
+ is greater than 24 hours (thus the rebuild is only triggered within
+ the last 24 hours of the \cb{build-soft-rebuild-timeout} expiration)."
+ }
+
+ duration build-alt-soft-rebuild-start
+ {
+ "<hours>:<minutes>",
+ "The start time of the alternative package soft rebuild timeout (see
+ \cb{build-alt-soft-rebuild-timeout} for details). Must be specified
+ as a time of day in the local timezone. The
+ \cb{build-alt-soft-rebuild-start} and
+ \cb{build-alt-soft-rebuild-stop} options must be either both
+ specified or absent. If unspecified, then no alternative rebuild
+ timeout will be used."
+ }
+
+ duration build-alt-soft-rebuild-stop
+ {
+ "<hours>:<minutes>",
+ "The end time of the alternative package soft rebuild timeout (see
+ \cb{build-alt-soft-rebuild-timeout} for details). Must be specified
+ as a time of day in the local timezone. If it is less than the
+ \cb{build-alt-soft-rebuild-start} option value, then the time
+ interval extends through midnight. The
+ \cb{build-alt-soft-rebuild-start} and
+ \cb{build-alt-soft-rebuild-stop} options must be either both
+ specified or absent. If unspecified, then no alternative rebuild
+ timeout will be used."
+ }
+
+ size_t build-hard-rebuild-timeout = 604800
+ {
+ "<seconds>",
+ "Time to wait before considering a package for a hard rebuild (to be
+ performed unconditionally). Must be specified in seconds. The special
+ zero value disables hard rebuilds. Default is 7 days."
+ }
+
+ size_t build-alt-hard-rebuild-timeout
+ {
+ "<seconds>",
+ "Alternative package hard rebuild timeout. The semantics is the
+ same as for the \cb{build-alt-soft-rebuild-timeout} option but
+ for the \cb{build-hard-rebuild-timeout} option."
+ }
+
+ duration build-alt-hard-rebuild-start
+ {
+ "<hours>:<minutes>",
+ "The start time of the alternative package hard rebuild timeout (see
+ \cb{build-alt-hard-rebuild-timeout} for details). The semantics is
+ the same as for the \cb{build-alt-soft-rebuild-start} option but
+ for the \cb{build-hard-rebuild-timeout} option."
+ }
+
+ duration build-alt-hard-rebuild-stop
+ {
+ "<hours>:<minutes>",
+ "The end time of the alternative package hard rebuild timeout (see
+ \cb{build-alt-hard-rebuild-timeout} for details). The semantics is
+ the same as for the \cb{build-alt-soft-rebuild-stop} option but
+ for the \cb{build-hard-rebuild-timeout} option."
+ }
+
+ size_t build-queued-timeout = 30
{
"<seconds>",
- "Time to wait before considering a package for a normal rebuild. Must
- be specified in seconds. Default is 24 hours."
+ "Time to wait before assuming the \cb{queued} notifications are
+ delivered for package CI requests submitted via third-party services
+ (GitHub, etc). During this time a package is not considered for a
+ build. Must be specified in seconds. Default is 30 seconds."
}
};
@@ -208,14 +321,14 @@ namespace brep
{
"<user>",
"Build database login user name. If not specified, then operating
- system (login) name is used. See also \c{build-db-role}."
+ system (login) name is used. See also \cb{build-db-role}."
}
string build-db-role = "brep"
{
"<user>",
"Build database execution user name. If not empty then the login
- user will be switched (with \c{SET ROLE}) to this user prior to
+ user will be switched (with \cb{SET ROLE}) to this user prior to
executing any statements. If not specified, then \cb{brep} is used."
}
@@ -265,6 +378,82 @@ namespace brep
}
};
+ class build_upload
+ {
+ std::map<string, dir_path> upload-data
+ {
+ "<type>=<dir>",
+ "The directory to save upload data to for the specified upload type.
+ If unspecified, the build artifacts upload functionality will be
+ disabled for this type. See \l{brep The \cb{build2} Repository
+ Interface Manual} for more information on build artifacts upload.
+
+ Note that the directory path must be absolute and the directory
+ itself must exist and have read, write, and execute permissions
+ granted to the user that runs the web server."
+ }
+
+ std::map<string, size_t> upload-max-size
+ {
+ "<type>=<bytes>",
+ "The maximum size of the upload data accepted for the specified upload
+ type. Note that currently the entire upload request is read into
+ memory. The default is 10M."
+ }
+
+ std::map<string, string> upload-email
+ {
+ "<type>=<email>",
+ "The build artifacts upload email. If specified, the upload request
+ and result manifests will be sent to this address. See \l{brep The
+ \cb{build2} Repository Interface Manual} for more information."
+ }
+
+ std::map<string, path> upload-handler
+ {
+ "<type>=<path>",
+ "The handler program to be executed on build artifacts upload of the
+ specified type. The handler is executed as part of the HTTP request
+ and is passed additional arguments that can be specified with
+ \cb{upload-handler-argument} followed by the absolute path to the
+ upload directory (\cb{upload-data}). See \l{brep The \cb{build2}
+ Repository Interface Manual} for more information. Note that the
+ program path must be absolute."
+ }
+
+ std::multimap<string, string> upload-handler-argument
+ {
+ "<type>=<arg>",
+ "Additional arguments to be passed to the upload handler program for
+ the specified upload type (see \cb{upload-handler} for details).
+ Repeat this option to specify multiple arguments."
+ }
+
+ std::map<string, size_t> upload-handler-timeout
+ {
+ "<type>=<seconds>",
+ "The upload handler program timeout in seconds for the specified
+ upload type. If specified and the handler does not exit in the
+ allotted time, then it is killed and its termination is treated as
+ abnormal."
+ }
+
+ std::multimap<string, string> upload-toolchain-exclude
+ {
+ "<type>=<name>",
+ "Disable upload of the specified type for the specified toolchain
+ name. Repeat this option to disable uploads for multiple toolchains."
+ }
+
+ std::multimap<string, string> upload-repository-exclude
+ {
+ "<type>=<name>",
+ "Disable upload of the specified type for packages from the repository
+ with the specified canonical name. Repeat this option to disable
+ uploads for multiple repositories."
+ }
+ };
+
class page
{
web::xhtml::fragment logo
@@ -274,7 +463,7 @@ namespace brep
edge. The value is treated as an XHTML5 fragment."
}
- vector<page_menu> menu;
+ vector<page_menu> menu
{
"<label=link>",
"Web page menu. Each entry is displayed in the page header in the
@@ -308,7 +497,7 @@ namespace brep
The default is 500 (~ 80 characters * 6 lines)."
}
- uint16_t package-changes = 5000;
+ uint16_t package-changes = 5000
{
"<len>",
"Number of package changes characters to display in brief pages. The
@@ -319,7 +508,7 @@ namespace brep
// Handler options.
//
- class packages: search, package_db, page, handler
+ class packages: search, package_db, page, repository_url, handler
{
string search-title = "Packages"
{
@@ -327,24 +516,63 @@ namespace brep
"Package search page title. It is placed inside XHTML5 <title>
element."
}
+
+ web::xhtml::fragment search-description
+ {
+ "<xhtml>",
+ "Package search page description. If specified, it is displayed
+ before the search form on the first page only. The value is
+ treated as an XHTML5 fragment."
+ }
};
- class package_details: package, search, package_db, page, handler
+ class package_details: package, package_db,
+ search,
+ page,
+ repository_url,
+ handler
{
};
class package_version_details: package, package_db,
build, build_db,
page,
+ repository_url,
handler
{
+ dir_path bindist-root
+ {
+ "<dir>",
+ "The root directory where the uploaded binary distribution packages
+ are saved to under the following directory hierarchy:
+
+ \
+ [<tenant>/]<distribution>/<os-release>/<project>/<package>/<version>/<package-config>
+ \
+
+ The package configuration directory symlinks that match these paths
+ are mapped to web URLs based on the \cb{bindist-url} value and
+ displayed on the package version details page. If this option is
+ specified, then \cb{bindist-url} must be specified as well."
+ }
+
+ string bindist-url
+ {
+ "<url>",
+ "The root URL of the directory specified with the \cb{bindist-root}
+ option. This option must be specified if \cb{bindist-root} is
+ specified."
+ }
};
- class repository_details: package_db, page, handler
+ class repository_details: package_db, page, repository_url, handler
{
};
- class build_task: build, build_db, handler
+ class build_task: build, build_db,
+ build_upload,
+ build_email_notification,
+ handler
{
size_t build-task-request-max-size = 102400
{
@@ -361,11 +589,35 @@ namespace brep
"Time to wait before considering the expected task result lost. Must be
specified in seconds. The default is 3 hours."
}
+
+ vector<pair<std::regex, string>> build-interactive-login
+ {
+ "</regex/replacement/>",
+ "Regular expressions for transforming the interactive build login
+ information, for example, into the actual command that can be used
+ by the user. The regular expressions are matched against the
+ \"<agent>\ <interactive-login>\" string containing the respective
+ task request manifest values. The first matching expression is used
+ for the transformation. If no expression matches, then the task
+ request is considered invalid, unless no expressions are specified.
+ Repeat this option to specify multiple expressions."
+ }
+
+ build_order build-package-order = build_order::stable
+ {
+ "<order>",
+ "Order in which packages are considered for build. The valid <order>
+ values are \cb{stable} and \cb{random}. If not specified, then
+ \cb{stable} is assumed. Note that interactive builds are always
+ preferred."
+ }
};
- class build_result: build, package_db, build_db, handler
+ class build_result: build, build_db,
+ build_email_notification,
+ handler
{
- size_t build-result-request-max-size = 10240000
+ size_t build-result-request-max-size = 10485760
{
"<bytes>",
"The maximum size of the build result manifest accepted. Note that the
@@ -375,7 +627,7 @@ namespace brep
}
};
- class build_log: build, build_db, handler
+ class build_log: build, build_db, repository_url, handler
{
};
@@ -383,7 +635,7 @@ namespace brep
{
};
- class builds: build, build_db, page, handler
+ class builds: build, build_db, page, repository_url, handler
{
uint16_t build-page-entries = 20
{
@@ -398,7 +650,7 @@ namespace brep
}
};
- class build_configs: build, page, handler
+ class build_configs: build, page, repository_url, handler
{
uint16_t build-config-page-entries = 20
{
@@ -413,7 +665,7 @@ namespace brep
}
};
- class submit: page, handler
+ class submit: page, repository_email, repository_url, handler
{
dir_path submit-data
{
@@ -493,7 +745,7 @@ namespace brep
}
};
- class ci: page, handler
+ class ci_start: repository_email
{
dir_path ci-data
{
@@ -508,15 +760,6 @@ namespace brep
granted to the user that runs the web server."
}
- path ci-form
- {
- "<file>",
- "The package CI form fragment. If specified, then its contents are
- treated as an XHTML5 fragment that is inserted into the <body>
- element of the CI page. If unspecified, then no CI page will be
- displayed. Note that the file path must be absolute."
- }
-
string ci-email
{
"<email>",
@@ -553,23 +796,53 @@ namespace brep
}
};
- class repository_root: handler
+ class ci_cancel
+ {
+ };
+
+ class ci: ci_start, page, repository_url, handler
+ {
+ // Classic CI-specific options.
+ //
+
+ path ci-form
+ {
+ "<file>",
+ "The package CI form fragment. If specified, then its contents are
+ treated as an XHTML5 fragment that is inserted into the <body>
+ element of the CI page. If unspecified, then no CI page will be
+ displayed. Note that the file path must be absolute."
+ }
+ };
+
+ class ci_github: ci_start, ci_cancel, build_db, handler
+ {
+ // GitHub CI-specific options (e.g., request timeout when invoking
+ // GitHub APIs).
+ //
+ };
+
+ class upload: build, build_db, build_upload, repository_email, handler
+ {
+ };
+
+ class repository_root: repository_url, handler
{
string root-global-view = "packages"
{
"<service>",
"The default view to display for the global repository root. The
- <service> argument is one of the supported services (\c{packages},
- \c{builds}, \c{submit}, \c{ci}, etc). The default service is
+ <service> argument is one of the supported services (\cb{packages},
+ \cb{builds}, \cb{submit}, \cb{ci}, etc). The default service is
packages."
}
string root-tenant-view = "packages"
{
- "<service>"
+ "<service>",
"The default view to display for the tenant repository root. The
- <service> argument is one of the supported services (\c{packages},
- \c{builds}, \c{submit}, \c{ci}, etc). The default service is
+ <service> argument is one of the supported services (\cb{packages},
+ \cb{builds}, \cb{submit}, \cb{ci}, etc). The default service is
packages."
}
};
@@ -626,9 +899,14 @@ namespace brep
class build_task
{
- // Package repository canonical name (note: including pkg: type).
+ // Only consider packages from repositories with these canonical names
+ // (note: including pkg: type).
//
vector<string> repository | r;
+
+ // Only consider tenants with this interactive build mode.
+ //
+ bbot::interactive_mode interactive = bbot::interactive_mode::both;
};
class build_result
@@ -661,9 +939,17 @@ namespace brep
//
string version | pv;
+ // Package build target.
+ //
+ string target | tg;
+
+ // Target build configuration.
+ //
+ string target_config | tc;
+
// Package build configuration.
//
- string configuration | cf;
+ string package_config | pc;
// Toolchain name.
//
@@ -697,13 +983,10 @@ namespace brep
//
// https://cppget.org/?builds=bbot
//
- // To support the already distributed URLs the name_legacy (pn) parameter
- // overrides the name (builds) parameter, if present. Note that the
- // builds parameter is renamed to '_' by the root handler (see the
- // request_proxy class for details).
+ // Note that the builds parameter is renamed to '_' by the root handler
+ // (see the request_proxy class for details).
//
string name | _;
- string name_legacy | pn;
// Package version. If empty or *, then no version constraint is applied.
// Otherwise the build package version must match the value exactly.
@@ -714,22 +997,22 @@ namespace brep
// toolchain constraint is applied. Otherwise the build toolchain name
// and version must match the value exactly.
//
- string toolchain | tc = "*";
+ string toolchain | th = "*";
- // Package build configuration name wildcard. An empty value is treated
- // the same way as *.
+ // Package build target wildcard. An empty value is treated the same way
+ // as *.
//
- string configuration | cf;
+ string target | tg;
- // Package build machine name wildcard. An empty value is treated the
- // same way as *.
+ // Package build target configuration name wildcard. An empty value is
+ // treated the same way as *.
//
- string machine | mn;
+ string target_config | tc;
- // Package build target wildcard. An empty value is treated the same way
- // as *.
+ // Package build package configuration name wildcard. An empty value is
+ // treated the same way as *.
//
- string target | tg;
+ string package_config | pc;
// Package build result. If *, then no build result constraint is
// applied. Otherwise the value is supposed to be the one of the
@@ -742,10 +1025,13 @@ namespace brep
class build_configs
{
+ // By default, display all build configurations except those which
+ // belong to the 'hidden' class.
+ //
// Note that the build-configs parameter is renamed to '_' by the root
// handler (see the request_proxy class for details).
//
- string class_name | _ = "all";
+ string class_name | _;
// Display build configurations list starting from this page.
//
@@ -804,9 +1090,53 @@ namespace brep
//
string overrides;
+ // Interactive build execution breakpoint.
+ //
+ string interactive;
+
// Submission simulation outcome.
//
string simulate;
};
+
+ // Parameters other than challenge must be all present.
+ //
+ // Note also that besides these parameters there can be others. We don't
+ // recognize their semantics and just save them to the upload request
+ // manifest.
+ //
+ class upload
+ {
+ // Upload type.
+ //
+ // Note that the upload parameter is renamed to '_' by the root handler
+ // (see the request_proxy class for details).
+ //
+ string type | _;
+
+ // Session id as returned by brep in the task response.
+ //
+ string session;
+
+ // Answer to the private key challenge as posed by brep in the task
+ // response. It must be present only if the challenge value was present
+ // in the task response.
+ //
+ string challenge;
+
+ // Upload instance name.
+ //
+ string instance;
+
+ // Package archive file name. Must be <input type="file"/>.
+ //
+ // Note that it can potentially be not just a name but a file path.
+ //
+ string archive;
+
+ // Package archive file SHA256 checksum.
+ //
+ string sha256sum;
+ };
}
}
diff --git a/mod/module.cxx b/mod/module.cxx
index e0e4de1..c8d0595 100644
--- a/mod/module.cxx
+++ b/mod/module.cxx
@@ -1,5 +1,4 @@
// file : mod/module.cxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
#include <mod/module.hxx>
@@ -11,10 +10,10 @@
#include <cstring> // strchr()
#include <functional> // bind()
-#include <web/module.hxx>
-#include <web/apache/log.hxx>
+#include <web/server/module.hxx>
+#include <web/server/apache/log.hxx>
-#include <mod/options.hxx>
+#include <mod/module-options.hxx>
using namespace std;
using namespace placeholders; // For std::bind's _1, etc.
@@ -242,23 +241,46 @@ namespace brep
initialized_ = m.initialized_;
}
-// For function func declared like this:
-// using B = std::string (*)(int);
-// using A = B (*)(int,int);
-// A func(B (*)(char),B (*)(wchar_t));
-// __PRETTY_FUNCTION__ looks like this:
-// virtual std::string (* (* brep::search::func(std::string (* (*)(char))(int)
-// ,std::string (* (*)(wchar_t))(int)) const)(int, int))(int)
-//
+ // Here are examples of __PRETTY_FUNCTION__ for some function declarations:
+ //
+ // 1) virtual bool brep::search::handle (web::request&, web::response&);
+ //
+ // virtual bool brep::search::handle(web::request&, web::response&)
+ //
+ // 2) using B = std::string (*) (int);
+ // virtual B brep::search::func ();
+ //
+ // virtual std::string (* brep::search::func())(int)
+ //
+ // 3) using B = std::string (*) (int);
+ // using A = B (*) (int,int);
+ // virtual A brep::search::func (B (*) (char), B (*) (wchar_t));
+ //
+ // virtual std::string (* (* brep::search::func(std::string (* (*)(char))(int), std::string (* (*)(wchar_t))(int)))(int, int))(int)
+ //
+ // 4) using X = std::function<butl::optional<std::string> (int)> (*) (std::function<butl::optional<std::string> (long)>);
+ // X brep::search::func (std::function<butl::optional<std::string> (char)> (*) (std::function<butl::optional<std::string> (wchar_t)>));
+ //
+ // std::function<std::optional<std::__cxx11::basic_string<char> >(int)> (* brep::search::func(std::function<std::optional<std::__cxx11::basic_string<char> >(char)> (*)(std::function<std::optional<std::__cxx11::basic_string<char> >(wchar_t)>)))(std::function<std::optional<std::__cxx11::basic_string<char> >(long int)>)
+ //
+ // 5) using X = std::function<butl::optional<std::string> (int)> (*) (std::function<butl::optional<std::string> (long)>);
+ // using Y = X (*) (int);
+ // Y brep::search::func (const char*);
+ //
+ // std::function<std::optional<std::__cxx11::basic_string<char> >(int)> (* (* brep::search::func(const char*))(int))(std::function<std::optional<std::__cxx11::basic_string<char> >(long int)>)
+ //
string handler::
func_name (const char* pretty_name)
{
- const char* e (strchr (pretty_name, ')'));
+ // Position at the last ')' character, which is either the end of the
+ // function's arguments list or the returned function type argument list.
+ //
+ const char* e (strrchr (pretty_name, ')'));
if (e && e > pretty_name)
{
- // Position e at last matching '(' which is the beginning of the
- // argument list..
+ // Position e at the matching '(' character which is the beginning of
+ // the mentioned argument list.
//
size_t d (1);
@@ -274,11 +296,15 @@ namespace brep
if (!d && e > pretty_name)
{
- // Position e at the character following the function name.
+ // Position e at the character which follows the function name.
//
- while (e > pretty_name &&
- (*e != '(' || *(e - 1) == ' ' || *(e - 1) == ')'))
- --e;
+ // Specifically, go further to the left and stop at the '(' character
+ // which is preceded by the character other than ' ', ')', of '>'.
+ //
+ for (char c;
+ e > pretty_name &&
+ !(*e == '(' && (c = *(e - 1)) != ' ' && c != ')' && c != '>');
+ --e) ;
if (e > pretty_name)
{
@@ -407,4 +433,10 @@ namespace brep
else
throw cli::eos_reached ();
}
+
+ size_t handler::name_value_scanner::
+ position ()
+ {
+ return (i_ - name_values_.begin ()) * 2 + (name_ ? 0 : 1);
+ }
}
diff --git a/mod/module.hxx b/mod/module.hxx
index d8ffc5c..f3e062e 100644
--- a/mod/module.hxx
+++ b/mod/module.hxx
@@ -1,18 +1,17 @@
// file : mod/module.hxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
#ifndef MOD_MODULE_HXX
#define MOD_MODULE_HXX
-#include <web/module.hxx>
+#include <web/server/module.hxx>
#include <libbrep/types.hxx>
#include <libbrep/utility.hxx>
#include <mod/utility.hxx>
-#include <mod/options.hxx>
#include <mod/diagnostics.hxx>
+#include <mod/module-options.hxx>
namespace brep
{
@@ -136,6 +135,9 @@ namespace brep
virtual void
skip ();
+ virtual std::size_t
+ position ();
+
private:
const name_values& name_values_;
name_values::const_iterator i_;
@@ -192,7 +194,7 @@ namespace brep
log* log_ {nullptr}; // Diagnostics backend provided by the web server.
private:
- // Extract function name from a __PRETTY_FUNCTION__.
+ // Extract the full-qualified function name from a __PRETTY_FUNCTION__.
// Throw invalid_argument if fail to parse.
//
static string
diff --git a/mod/options-types.hxx b/mod/options-types.hxx
index 8707f7f..f2b059b 100644
--- a/mod/options-types.hxx
+++ b/mod/options-types.hxx
@@ -1,5 +1,4 @@
// file : mod/options-types.hxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
#ifndef MOD_OPTIONS_TYPES_HXX
@@ -26,6 +25,19 @@ namespace brep
page_menu () = default;
page_menu (string b, string l): label (move (b)), link (move (l)) {}
};
+
+ enum class build_order
+ {
+ stable,
+ random
+ };
+
+ enum class build_email
+ {
+ none,
+ latest, // Only send emails for the latest package versions.
+ all
+ };
}
#endif // MOD_OPTIONS_TYPES_HXX
diff --git a/mod/page.cxx b/mod/page.cxx
index e34e568..bc2e42d 100644
--- a/mod/page.cxx
+++ b/mod/page.cxx
@@ -1,5 +1,4 @@
// file : mod/page.cxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
#include <mod/page.hxx>
@@ -8,18 +7,17 @@
#include <cmark-gfm-extension_api.h>
#include <set>
-#include <ios> // hex, uppercase, right
+#include <ios> // hex, uppercase, right
#include <sstream>
-#include <iomanip> // setw(), setfill()
-#include <algorithm> // min(), find()
+#include <iomanip> // setw(), setfill()
+#include <iterator> // back_inserter()
#include <libstudxml/serializer.hxx>
-#include <libbutl/url.mxx>
+#include <web/xhtml/fragment.hxx>
+#include <web/xhtml/serialization.hxx>
-#include <web/xhtml.hxx>
-#include <web/xhtml-fragment.hxx>
-#include <web/mime-url-encoding.hxx>
+#include <web/server/mime-url-encoding.hxx>
#include <libbrep/package.hxx>
#include <libbrep/package-odb.hxx>
@@ -38,6 +36,20 @@ using namespace web::xhtml;
//
namespace brep
{
+ static inline string
+ label_to_class (const string& label)
+ {
+ if (label.find (' ') == string::npos)
+ return label;
+
+ string r;
+ transform (label.begin (), label.end (),
+ back_inserter (r),
+ [] (char c) {return c != ' ' ? c : '-';});
+
+ return r;
+ }
+
// CSS_LINKS
//
static const dir_path css_path ("@");
@@ -125,9 +137,17 @@ namespace brep
void DIV_COUNTER::
operator() (serializer& s) const
{
- s << DIV(ID="count")
- << count_ << " "
- << (count_ % 10 == 1 && count_ % 100 != 11 ? singular_ : plural_)
+ s << DIV(ID="count");
+
+ if (count_)
+ s << *count_;
+ else
+ s << '?';
+
+ s << ' '
+ << (count_ && *count_ % 10 == 1 && *count_ % 100 != 11
+ ? singular_
+ : plural_)
<< ~DIV;
}
@@ -136,7 +156,8 @@ namespace brep
void TR_VALUE::
operator() (serializer& s) const
{
- s << TR(CLASS=label_)
+ string c (label_to_class (label_));
+ s << TR(CLASS=c)
<< TH << label_ << ~TH
<< TD << SPAN(CLASS="value") << value_ << ~SPAN << ~TD
<< ~TR;
@@ -147,7 +168,8 @@ namespace brep
void TR_INPUT::
operator() (serializer& s) const
{
- s << TR(CLASS=label_)
+ string c (label_to_class (label_));
+ s << TR(CLASS=c)
<< TH << label_ << ~TH
<< TD
<< INPUT(TYPE="text", NAME=name_);
@@ -171,7 +193,8 @@ namespace brep
void TR_SELECT::
operator() (serializer& s) const
{
- s << TR(CLASS=label_)
+ string c (label_to_class (label_));
+ s << TR(CLASS=c)
<< TH << label_ << ~TH
<< TD
<< SELECT(NAME=name_);
@@ -222,15 +245,9 @@ namespace brep
<< A
<< HREF
<< tenant_dir (root_, tenant_) /
- path (mime_url_encode (name_.string (), false));
-
- // Propagate search criteria to the package details page.
- //
- if (!query_.empty ())
- s << "?q=" << query_;
-
- s << ~HREF
- << name_
+ path (mime_url_encode (name_.string (), false))
+ << ~HREF
+ << name_
<< ~A
<< ~SPAN
<< ~TD
@@ -418,47 +435,75 @@ namespace brep
if (!dependencies_.empty ())
s << "; ";
- for (const auto& d: dependencies_)
+ for (const dependency_alternatives& das: dependencies_)
{
- if (&d != &dependencies_[0])
+ if (&das != &dependencies_[0])
s << ", ";
- if (d.conditional)
- s << "?";
-
- if (d.buildtime)
+ if (das.buildtime)
s << "*";
- // Suppress package name duplicates.
+ // Suppress dependency alternative duplicates, like in
+ // `{foo bar} < 1.1 | {foo bar} > 1.5`.
+ //
+ // Return the dependency package name space-separated list.
//
- set<package_name> names;
- for (const auto& da: d)
- names.emplace (da.name);
+ auto deps_list = [] (const dependency_alternative& da)
+ {
+ string r;
+ for (const dependency& d: da)
+ {
+ if (!r.empty ())
+ r += ' ';
+
+ r += d.name.string ();
+ }
- bool mult (names.size () > 1);
+ return r;
+ };
+
+ set<string> alternatives;
+ for (const dependency_alternative& da: das)
+ alternatives.insert (deps_list (da));
+
+ // Note that we may end up with a single package name in parenthesis, if
+ // its duplicates were suppresses. This, however, may be helpful,
+ // indicating that there some alternatives for the package.
+ //
+ bool mult (das.size () > 1 ||
+ (das.size () == 1 && das[0].size () > 1));
if (mult)
- s << "(";
+ s << '(';
bool first (true);
- for (const auto& da: d)
+ for (const dependency_alternative& da: das)
{
- const package_name& n (da.name);
- if (names.find (n) != names.end ())
- {
- names.erase (n);
+ auto i (alternatives.find (deps_list (da)));
- if (first)
- first = false;
- else
- s << " | ";
+ if (i == alternatives.end ())
+ continue;
+
+ alternatives.erase (i);
+
+ if (!first)
+ s << " | ";
+ else
+ first = false;
+
+ for (const dependency& d: da)
+ {
+ if (&d != &da[0])
+ s << ' ';
// Try to display the dependency as a link if it is resolved.
// Otherwise display it as plain text.
//
- if (da.package != nullptr)
+ const package_name& n (d.name);
+
+ if (d.package != nullptr)
{
- shared_ptr<package> p (da.package.load ());
+ shared_ptr<package> p (d.package.load ());
assert (p->internal () || !p->other_repositories.empty ());
shared_ptr<repository> r (
@@ -481,10 +526,13 @@ namespace brep
else
s << n;
}
+
+ if (da.enable)
+ s << " ?";
}
if (mult)
- s << ")";
+ s << ')';
}
s << ~SPAN
@@ -509,25 +557,25 @@ namespace brep
<< SPAN(CLASS="value")
<< requirements_.size () << "; ";
- for (const auto& r: requirements_)
+ for (const auto& ras: requirements_)
{
- if (&r != &requirements_[0])
+ if (&ras != &requirements_[0])
s << ", ";
- if (r.conditional)
- s << "?";
+ if (ras.buildtime)
+ s << '*';
- if (r.buildtime)
- s << "*";
-
- if (r.empty ())
+ // If this is a simple requirement without id, then print the comment
+ // first word.
+ //
+ if (ras.simple () && ras[0][0].empty ())
{
- // If there is no requirement alternatives specified, then print the
- // comment first word.
- //
- const auto& c (r.comment);
+ const auto& c (ras.comment);
if (!c.empty ())
{
+ if (ras[0].enable)
+ s << "? ";
+
auto n (c.find (' '));
s << string (c, 0, n);
@@ -537,21 +585,31 @@ namespace brep
}
else
{
- bool mult (r.size () > 1);
+ bool mult (ras.size () > 1 ||
+ (ras.size () == 1 && ras[0].size () > 1));
if (mult)
- s << "(";
+ s << '(';
- for (const auto& ra: r)
+ for (const auto& ra: ras)
{
- if (&ra != &r[0])
+ if (&ra != &ras[0])
s << " | ";
- s << ra;
+ for (const string& r: ra)
+ {
+ if (&r != &ra[0])
+ s << ' ';
+
+ s << r;
+ }
+
+ if (ra.enable)
+ s << " ?";
}
if (mult)
- s << ")";
+ s << ')';
}
}
@@ -565,7 +623,8 @@ namespace brep
void TR_URL::
operator() (serializer& s) const
{
- s << TR(CLASS=label_)
+ string c (label_to_class (label_));
+ s << TR(CLASS=c)
<< TH << label_ << ~TH
<< TD
<< SPAN(CLASS="value");
@@ -576,7 +635,7 @@ namespace brep
if (icasecmp (url_.scheme, "https") == 0 ||
icasecmp (url_.scheme, "http") == 0)
{
- butl::url u (url_);
+ url u (url_);
u.scheme.clear ();
s << A(HREF=url_) << u << ~A;
@@ -595,7 +654,8 @@ namespace brep
void TR_EMAIL::
operator() (serializer& s) const
{
- s << TR(CLASS=label_)
+ string c (label_to_class (label_));
+ s << TR(CLASS=c)
<< TH << label_ << ~TH
<< TD
<< SPAN(CLASS="value")
@@ -645,32 +705,22 @@ namespace brep
<< A
<< HREF
<< tenant_dir (root_, tenant_) << "?about#"
- << mime_url_encode (html_id (name_), false)
+ << mime_url_encode (html_id (location_.canonical_name ()), false)
<< ~HREF
- << name_
+ << location_
<< ~A
<< ~SPAN
<< ~TD
<< ~TR;
}
- // TR_LOCATION
- //
- void TR_LOCATION::
- operator() (serializer& s) const
- {
- s << TR(CLASS="location")
- << TH << "location" << ~TH
- << TD << SPAN(CLASS="value") << location_ << ~SPAN << ~TD
- << ~TR;
- }
-
// TR_LINK
//
void TR_LINK::
operator() (serializer& s) const
{
- s << TR(CLASS=label_)
+ string c (label_to_class (label_));
+ s << TR(CLASS=c)
<< TH << label_ << ~TH
<< TD
<< SPAN(CLASS="value") << A(HREF=url_) << text_ << ~A << ~SPAN
@@ -699,8 +749,24 @@ namespace brep
<< TD
<< SPAN(CLASS="value");
+ // Print the ' | ' separator if this is not the first item and reset the
+ // `first` flag to false otherwise.
+ //
+ bool first (true);
+ auto separate = [&s, &first] ()
+ {
+ if (first)
+ first = false;
+ else
+ s << " | ";
+ };
+
if (build_.state == build_state::building)
- s << SPAN(CLASS="building") << "building" << ~SPAN << " | ";
+ {
+ separate ();
+
+ s << SPAN(CLASS="building") << "building" << ~SPAN;
+ }
else
{
// If no unsuccessful operation results available, then print the
@@ -713,7 +779,10 @@ namespace brep
if (build_.results.empty () || *build_.status == result_status::success)
{
assert (build_.status);
- s << SPAN_BUILD_RESULT_STATUS (*build_.status) << " | ";
+
+ separate ();
+
+ s << SPAN_BUILD_RESULT_STATUS (*build_.status);
}
if (!build_.results.empty ())
@@ -721,6 +790,9 @@ namespace brep
for (const auto& r: build_.results)
{
if (r.status != result_status::success)
+ {
+ separate ();
+
s << SPAN_BUILD_RESULT_STATUS (r.status) << " ("
<< A
<< HREF
@@ -728,26 +800,33 @@ namespace brep
<< ~HREF
<< r.operation
<< ~A
- << ") | ";
+ << ")";
+ }
}
+ separate ();
+
s << A
<< HREF << build_log_url (host_, root_, build_) << ~HREF
<< "log"
- << ~A
- << " | ";
+ << ~A;
}
}
- if (build_.force == (build_.state == build_state::building
- ? force_state::forcing
- : force_state::forced))
- s << SPAN(CLASS="pending") << "pending" << ~SPAN;
- else
- s << A
- << HREF << build_force_url (host_, root_, build_) << ~HREF
- << "rebuild"
- << ~A;
+ if (!archived_)
+ {
+ separate ();
+
+ if (build_.force == (build_.state == build_state::building
+ ? force_state::forcing
+ : force_state::forced))
+ s << SPAN(CLASS="pending") << "pending" << ~SPAN;
+ else
+ s << A
+ << HREF << build_force_url (host_, root_, build_) << ~HREF
+ << "rebuild"
+ << ~A;
+ }
s << ~SPAN
<< ~TD
@@ -875,14 +954,16 @@ namespace brep
void DIV_TEXT::
operator() (serializer& s) const
{
- switch (type_)
+ const string& t (text_.text);
+
+ switch (text_.type)
{
case text_type::plain:
{
// To keep things regular we wrap the preformatted text into <div>.
//
s << DIV(ID=id_, CLASS="plain");
- serialize_pre_text (s, text_, length_, url_, "" /* id */);
+ serialize_pre_text (s, t, length_, url_, "" /* id */);
s << ~DIV;
break;
}
@@ -902,9 +983,9 @@ namespace brep
// calls to fail is the inability to allocate memory. Unfortunately,
// instead of reporting the failure to the caller, the API issues
// diagnostics to stderr and aborts the process. Let's decrease the
- // probability of such an event by limiting the text size to 64K.
+ // probability of such an event by limiting the text size to 1M.
//
- if (text_.size () > 64 * 1024)
+ if (t.size () > 1024 * 1024)
{
print_error (what_ + " is too long");
return;
@@ -916,37 +997,38 @@ namespace brep
{
// Parse Markdown into the AST.
//
+ // Note that the footnotes extension needs to be enabled via the
+ // CMARK_OPT_FOOTNOTES flag rather than the
+ // cmark_parser_attach_syntax_extension() function call.
+ //
unique_ptr<cmark_parser, void (*)(cmark_parser*)> parser (
- cmark_parser_new (CMARK_OPT_DEFAULT | CMARK_OPT_VALIDATE_UTF8),
+ cmark_parser_new (CMARK_OPT_DEFAULT |
+ CMARK_OPT_FOOTNOTES |
+ CMARK_OPT_VALIDATE_UTF8),
[] (cmark_parser* p) {cmark_parser_free (p);});
// Enable GitHub extensions in the parser, if requested.
//
- if (type_ == text_type::github_mark)
+ if (text_.type == text_type::github_mark)
{
auto add = [&parser] (const char* ext)
- {
- cmark_syntax_extension* e (
- cmark_find_syntax_extension (ext));
+ {
+ cmark_syntax_extension* e (
+ cmark_find_syntax_extension (ext));
- // Built-in extension is only expected.
- //
- assert (e != nullptr);
+ // Built-in extension is only expected.
+ //
+ assert (e != nullptr);
- cmark_parser_attach_syntax_extension (parser.get (), e);
- };
+ cmark_parser_attach_syntax_extension (parser.get (), e);
+ };
add ("table");
add ("strikethrough");
add ("autolink");
-
- // Somehow feels unsafe (there are some nasty warnings when
- // upstream's tasklist.c is compiled), so let's disable for now.
- //
- // add ("tasklist");
}
- cmark_parser_feed (parser.get (), text_.c_str (), text_.size ());
+ cmark_parser_feed (parser.get (), t.c_str (), t.size ());
unique_ptr<cmark_node, void (*)(cmark_node*)> doc (
cmark_parser_finish (parser.get ()),
diff --git a/mod/page.hxx b/mod/page.hxx
index cba8358..7329e2d 100644
--- a/mod/page.hxx
+++ b/mod/page.hxx
@@ -1,5 +1,4 @@
// file : mod/page.hxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
#ifndef MOD_PAGE_HXX
@@ -9,7 +8,7 @@
#include <libbbot/manifest.hxx>
-#include <web/xhtml-fragment.hxx>
+#include <web/xhtml/fragment.hxx>
#include <libbrep/types.hxx>
#include <libbrep/utility.hxx>
@@ -83,21 +82,24 @@ namespace brep
// Generate counter element.
//
- // It could be redunant to distinguish between singular and plural word forms
- // if it wouldn't be so cheap in English, and phrase '1 Packages' wouldn't
- // look that ugly.
+ // If the count argument is nullopt, then it is assumed that the count is
+ // unknown and the '?' character is printed instead of the number.
+ //
+ // Note that it could be redunant to distinguish between singular and plural
+ // word forms if it wouldn't be so cheap in English, and phrase '1 Packages'
+ // wouldn't look that ugly.
//
class DIV_COUNTER
{
public:
- DIV_COUNTER (size_t c, const char* s, const char* p)
+ DIV_COUNTER (optional<size_t> c, const char* s, const char* p)
: count_ (c), singular_ (s), plural_ (p) {}
void
operator() (xml::serializer&) const;
private:
- size_t count_;
+ optional<size_t> count_;
const char* singular_;
const char* plural_;
};
@@ -194,24 +196,19 @@ namespace brep
const string& tenant_;
};
- // Generate package name element with an optional search criteria. The
- // search string should be url-encoded, if specified.
+ // Generate package name element.
//
class TR_NAME
{
public:
- TR_NAME (const package_name& n,
- const string& q,
- const dir_path& r,
- const string& t)
- : name_ (n), query_ (q), root_ (r), tenant_ (t) {}
+ TR_NAME (const package_name& n, const dir_path& r, const string& t)
+ : name_ (n), root_ (r), tenant_ (t) {}
void
operator() (xml::serializer&) const;
private:
const package_name& name_;
- const string& query_;
const dir_path& root_;
const string& tenant_;
};
@@ -379,13 +376,14 @@ namespace brep
class TR_URL
{
public:
- TR_URL (const url& u, const char* l = "url"): url_ (u), label_ (l) {}
+ TR_URL (const manifest_url& u, const char* l = "url")
+ : url_ (u), label_ (l) {}
void
operator() (xml::serializer&) const;
private:
- const url& url_;
+ const manifest_url& url_;
const char* label_;
};
@@ -424,32 +422,20 @@ namespace brep
class TR_REPOSITORY
{
public:
- TR_REPOSITORY (const string& n, const dir_path& r, const string& t)
- : name_ (n), root_ (r), tenant_ (t) {}
+ TR_REPOSITORY (const repository_location& l,
+ const dir_path& r,
+ const string& t)
+ : location_ (l), root_ (r), tenant_ (t) {}
void
operator() (xml::serializer&) const;
private:
- const string& name_;
+ const repository_location& location_;
const dir_path& root_;
const string& tenant_;
};
- // Generate repository location element.
- //
- class TR_LOCATION
- {
- public:
- TR_LOCATION (const repository_location& l): location_ (l) {}
-
- void
- operator() (xml::serializer&) const;
-
- private:
- const repository_location& location_;
- };
-
// Generate link element.
//
class TR_LINK
@@ -486,14 +472,23 @@ namespace brep
class TR_BUILD_RESULT
{
public:
- TR_BUILD_RESULT (const build& b, const string& h, const dir_path& r):
- build_ (b), host_ (h), root_ (r) {}
+ TR_BUILD_RESULT (const build& b,
+ bool a,
+ const string& h,
+ const dir_path& r):
+ build_ (b), archived_ (a), host_ (h), root_ (r)
+ {
+ // We don't expect a queued build to ever be displayed.
+ //
+ assert (build_.state != build_state::queued);
+ }
void
operator() (xml::serializer&) const;
private:
const build& build_;
+ bool archived_;
const string& host_;
const dir_path& root_;
};
@@ -599,16 +594,14 @@ namespace brep
public:
// Generate a full text element.
//
- DIV_TEXT (const string& t,
- text_type tp,
+ DIV_TEXT (const typed_text& t,
bool st,
const string& id,
const string& what,
const basic_mark& diag)
: text_ (t),
- type_ (tp),
strip_title_ (st),
- length_ (t.size ()),
+ length_ (t.text.size ()),
url_ (nullptr),
id_ (id),
what_ (what),
@@ -618,8 +611,7 @@ namespace brep
// Generate a brief text element.
//
- DIV_TEXT (const string& t,
- text_type tp,
+ DIV_TEXT (const typed_text& t,
bool st,
size_t l,
const string& u,
@@ -627,7 +619,6 @@ namespace brep
const string& what,
const basic_mark& diag)
: text_ (t),
- type_ (tp),
strip_title_ (st),
length_ (l),
url_ (&u),
@@ -641,8 +632,7 @@ namespace brep
operator() (xml::serializer&) const;
private:
- const string& text_;
- text_type type_;
+ const typed_text& text_;
bool strip_title_;
size_t length_;
const string* url_; // Full page url.
diff --git a/mod/services.cxx b/mod/services.cxx
index a50e157..b17e32e 100644
--- a/mod/services.cxx
+++ b/mod/services.cxx
@@ -1,10 +1,9 @@
// file : mod/services.cxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
#include <ap_config.h> // AP_MODULE_DECLARE_DATA
-#include <web/apache/service.hxx>
+#include <web/server/apache/service.hxx>
#include <libbrep/types.hxx>
#include <libbrep/utility.hxx>
diff --git a/mod/tenant-service.hxx b/mod/tenant-service.hxx
new file mode 100644
index 0000000..9205f76
--- /dev/null
+++ b/mod/tenant-service.hxx
@@ -0,0 +1,155 @@
+// file : mod/tenant-service.hxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#ifndef MOD_TENANT_SERVICE_HXX
+#define MOD_TENANT_SERVICE_HXX
+
+#include <map>
+
+#include <libbrep/types.hxx>
+#include <libbrep/utility.hxx>
+
+#include <libbrep/build.hxx>
+
+#include <mod/diagnostics.hxx>
+
+namespace brep
+{
+ class tenant_service_base
+ {
+ public:
+ virtual ~tenant_service_base () = default;
+ };
+
+ // Possible build notifications:
+ //
+ // queued
+ // building
+ // built
+ //
+ // Possible transitions:
+ //
+ // -> queued
+ // queued -> building
+ // building -> queued (interrupted & re-queued due to higher priority task)
+ // building -> built
+ // built -> queued (periodic or user-forced rebuild)
+ //
+ // While the implementation tries to make sure the notifications arrive in
+ // the correct order, this is currently done by imposing delays (some
+ // natural, such as building->built, and some artificial, such as
+ // queued->building). As result, it is unlikely but possible to be notified
+ // about the state transitions in the wrong order, especially if the
+ // notifications take a long time. To minimize the chance of this happening,
+ // the service implementation should strive to batch the queued state
+ // notifications (or which there could be hundreds) in a single request if
+ // at all possible. Also, if supported by the third-party API, it makes
+ // sense for the implementation to protect against overwriting later states
+ // with earlier. For example, if it's possible to place a condition on a
+ // notification, it makes sense to only set the state to queued if none of
+ // the later states (e.g., building) are already in effect.
+ //
+ // Note also that it's possible for the build to get deleted at any stage
+ // without any further notifications. This can happen, for example, due to
+ // data retention timeout or because the build configuration (buildtab
+ // entry) is no longer present. There is no explicit `deleted` transition
+ // notification because such situations (i.e., when a notification sequence
+ // is abandoned half way) are not expected to arise ordinarily in a
+ // properly-configured brep instance. And the third-party service is
+ // expected to deal with them using some overall timeout/expiration
+ // mechanism which it presumably has.
+ //
+ // Each build notification is in its own interface since a service may not
+ // be interested in all of them while computing the information to pass is
+ // expensive.
+
+ class tenant_service_build_queued: public virtual tenant_service_base
+ {
+ public:
+ // If the returned function is not NULL, it is called to update the
+ // service data. It should return the new data or nullopt if no update is
+ // necessary. Note: tenant_service::data passed to the callback and to the
+ // returned function may not be the same. Also, the returned function may
+ // be called multiple times (on transaction retries).
+ //
+ // The passed initial_state indicates the logical initial state and is
+ // either absent, `building` (interrupted), or `built` (rebuild). Note
+ // that all the passed build objects are for the same package version and
+ // have the same initial state.
+ //
+ // The implementation of this and the below functions should normally not
+ // need to make any decisions based on the passed build::state. Rather,
+ // the function name suffix (_queued, _building, _built) signify the
+ // logical end state.
+ //
+ // The build_queued_hints can be used to omit certain components from the
+ // build id. If single_package_version is true, then this tenant contains
+ // a single (non-test) package version and this package name and package
+ // version can be omitted. If single_package_config is true, then the
+ // package version being built only has the default package configuration
+ // and thus it can be omitted.
+ //
+ struct build_queued_hints
+ {
+ bool single_package_version;
+ bool single_package_config;
+ };
+
+ virtual function<optional<string> (const tenant_service&)>
+ build_queued (const tenant_service&,
+ const vector<build>&,
+ optional<build_state> initial_state,
+ const build_queued_hints&,
+ const diag_epilogue& log_writer) const noexcept = 0;
+ };
+
+ class tenant_service_build_building: public virtual tenant_service_base
+ {
+ public:
+ virtual function<optional<string> (const tenant_service&)>
+ build_building (const tenant_service&,
+ const build&,
+ const diag_epilogue& log_writer) const noexcept = 0;
+ };
+
+ class tenant_service_build_built: public virtual tenant_service_base
+ {
+ public:
+ virtual function<optional<string> (const tenant_service&)>
+ build_built (const tenant_service&,
+ const build&,
+ const diag_epilogue& log_writer) const noexcept = 0;
+ };
+
+ // Map of service type (tenant_service::type) to service.
+ //
+ using tenant_service_map = std::map<string, shared_ptr<tenant_service_base>>;
+
+ // Every notification callback function that needs to produce any
+ // diagnostics shall begin with:
+ //
+ // NOTIFICATION_DIAG (log_writer);
+ //
+ // This will instantiate the error, warn, info, and trace diagnostics
+ // streams with the function's name.
+ //
+ // Note that a callback function is not expected to throw any exceptions.
+ // This is, in particular, why this macro doesn't instantiate the fail
+ // diagnostics stream.
+ //
+#define NOTIFICATION_DIAG(log_writer) \
+ const basic_mark error (severity::error, \
+ log_writer, \
+ __PRETTY_FUNCTION__); \
+ const basic_mark warn (severity::warning, \
+ log_writer, \
+ __PRETTY_FUNCTION__); \
+ const basic_mark info (severity::info, \
+ log_writer, \
+ __PRETTY_FUNCTION__); \
+ const basic_mark trace (severity::trace, \
+ log_writer, \
+ __PRETTY_FUNCTION__)
+}
+
+#endif // MOD_TENANT_SERVICE_HXX
diff --git a/mod/types-parsers.cxx b/mod/types-parsers.cxx
index dad1c02..f135608 100644
--- a/mod/types-parsers.cxx
+++ b/mod/types-parsers.cxx
@@ -1,13 +1,19 @@
// file : mod/types-parsers.cxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
#include <mod/types-parsers.hxx>
-#include <mod/options.hxx>
+#include <sstream>
+
+#include <libbutl/regex.hxx>
+#include <libbutl/timestamp.hxx> // from_string()
+
+#include <mod/module-options.hxx>
using namespace std;
+using namespace butl;
using namespace bpkg;
+using namespace bbot;
using namespace web::xhtml;
namespace brep
@@ -51,6 +57,40 @@ namespace brep
parse_path (x, s);
}
+ // Parse time of day.
+ //
+ void parser<duration>::
+ parse (duration& x, bool& xs, scanner& s)
+ {
+ xs = true;
+
+ const char* o (s.next ());
+
+ if (!s.more ())
+ throw missing_value (o);
+
+ const char* v (s.next ());
+
+ // To avoid the manual time of day parsing and validation, let's parse
+ // it as the first Epoch day time and convert the result (timestamp) to
+ // the time elapsed since Epoch (duration).
+ //
+ try
+ {
+ string t ("1970-01-01 ");
+ t += v;
+
+ x = from_string (t.c_str (),
+ "%Y-%m-%d %H:%M",
+ false /* local */).time_since_epoch ();
+ return;
+ }
+ catch (const invalid_argument&) {}
+ catch (const system_error&) {}
+
+ throw invalid_value (o, v);
+ }
+
// Parse repository_location.
//
void parser<repository_location>::
@@ -75,6 +115,29 @@ namespace brep
}
}
+ // Parse interactive_mode.
+ //
+ void parser<interactive_mode>::
+ parse (interactive_mode& x, bool& xs, scanner& s)
+ {
+ xs = true;
+ const char* o (s.next ());
+
+ if (!s.more ())
+ throw missing_value (o);
+
+ const string v (s.next ());
+
+ try
+ {
+ x = to_interactive_mode (v);
+ }
+ catch (const invalid_argument&)
+ {
+ throw invalid_value (o, v);
+ }
+ }
+
// Parse page_form.
//
void parser<page_form>::
@@ -141,10 +204,84 @@ namespace brep
{
x = fragment (v, o);
}
- catch (const xml::parsing&)
+ catch (const xml::parsing& e)
{
- throw invalid_value (o, v);
+ throw invalid_value (o, v, e.what ());
}
}
+
+ // Parse the '/regex/replacement/' string into the regex/replacement pair.
+ //
+ void parser<pair<std::regex, string>>::
+ parse (pair<std::regex, string>& x, bool& xs, scanner& s)
+ {
+ xs = true;
+ const char* o (s.next ());
+
+ if (!s.more ())
+ throw missing_value (o);
+
+ const char* v (s.next ());
+
+ try
+ {
+ x = regex_replace_parse (v);
+ }
+ catch (const invalid_argument& e)
+ {
+ throw invalid_value (o, v, e.what ());
+ }
+ catch (const regex_error& e)
+ {
+ // Sanitize the description.
+ //
+ ostringstream os;
+ os << e;
+
+ throw invalid_value (o, v, os.str ());
+ }
+ }
+
+ // Parse build_order.
+ //
+ void parser<build_order>::
+ parse (build_order& x, bool& xs, scanner& s)
+ {
+ xs = true;
+ const char* o (s.next ());
+
+ if (!s.more ())
+ throw missing_value (o);
+
+ const string v (s.next ());
+ if (v == "stable")
+ x = build_order::stable;
+ else if (v == "random")
+ x = build_order::random;
+ else
+ throw invalid_value (o, v);
+ }
+
+ // Parse build_email.
+ //
+ void parser<build_email>::
+ parse (build_email& x, bool& xs, scanner& s)
+ {
+ xs = true;
+ const char* o (s.next ());
+
+ if (!s.more ())
+ throw missing_value (o);
+
+ const string v (s.next ());
+ if (v == "none")
+ x = build_email::none;
+ else if (v == "latest")
+ x = build_email::latest;
+ else if (v == "all")
+ x = build_email::all;
+ else
+ throw invalid_value (o, v);
+ }
}
}
diff --git a/mod/types-parsers.hxx b/mod/types-parsers.hxx
index 1d8dbe4..d48ae0b 100644
--- a/mod/types-parsers.hxx
+++ b/mod/types-parsers.hxx
@@ -1,5 +1,4 @@
// file : mod/types-parsers.hxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
// CLI parsers, included into the generated source files.
@@ -8,9 +7,12 @@
#ifndef MOD_TYPES_PARSERS_HXX
#define MOD_TYPES_PARSERS_HXX
+#include <regex>
+
#include <libbpkg/manifest.hxx> // repository_location
+#include <libbbot/manifest.hxx> // interactive_mode
-#include <web/xhtml-fragment.hxx>
+#include <web/xhtml/fragment.hxx>
#include <libbrep/types.hxx>
#include <libbrep/utility.hxx>
@@ -40,6 +42,15 @@ namespace brep
parse (dir_path&, bool&, scanner&);
};
+ // Parse time of day specified in the `hh:mm` form.
+ //
+ template <>
+ struct parser<duration>
+ {
+ static void
+ parse (duration&, bool&, scanner&);
+ };
+
template <>
struct parser<bpkg::repository_location>
{
@@ -48,6 +59,13 @@ namespace brep
};
template <>
+ struct parser<bbot::interactive_mode>
+ {
+ static void
+ parse (bbot::interactive_mode&, bool&, scanner&);
+ };
+
+ template <>
struct parser<page_form>
{
static void
@@ -67,6 +85,27 @@ namespace brep
static void
parse (web::xhtml::fragment&, bool&, scanner&);
};
+
+ template <>
+ struct parser<pair<std::regex, string>>
+ {
+ static void
+ parse (pair<std::regex, string>&, bool&, scanner&);
+ };
+
+ template <>
+ struct parser<build_order>
+ {
+ static void
+ parse (build_order&, bool&, scanner&);
+ };
+
+ template <>
+ struct parser<build_email>
+ {
+ static void
+ parse (build_email&, bool&, scanner&);
+ };
}
}
diff --git a/mod/utility.hxx b/mod/utility.hxx
index beda8c9..43527ae 100644
--- a/mod/utility.hxx
+++ b/mod/utility.hxx
@@ -1,5 +1,4 @@
// file : mod/utility.hxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
#ifndef MOD_UTILITY_HXX
diff --git a/monitor/.gitignore b/monitor/.gitignore
new file mode 100644
index 0000000..21c0e0b
--- /dev/null
+++ b/monitor/.gitignore
@@ -0,0 +1,2 @@
+*-options.?xx
+brep-monitor
diff --git a/monitor/buildfile b/monitor/buildfile
new file mode 100644
index 0000000..dc49a98
--- /dev/null
+++ b/monitor/buildfile
@@ -0,0 +1,45 @@
+# file : monitor/buildfile
+# license : MIT; see accompanying LICENSE file
+
+import libs = libodb%lib{odb}
+import libs += libodb-pgsql%lib{odb-pgsql}
+import libs += libbutl%lib{butl}
+import libs += libbbot%lib{bbot}
+
+include ../libbrep/
+include ../mod/
+
+exe{brep-monitor}: {hxx ixx cxx}{* -*-options} \
+ {hxx ixx cxx}{monitor-options module-options} \
+ ../mod/libue{mod} ../libbrep/lib{brep} $libs
+
+# Build options.
+#
+obj{monitor}: cxx.poptions += -DBREP_COPYRIGHT=\"$copyright\"
+
+# Generated options parser.
+#
+if $cli.configured
+{
+ cli.cxx{monitor-options}: cli{monitor}
+ cli.cxx{module-options}: cli{module}
+
+ cli.options += --std c++11 -I $src_root --include-with-brackets \
+--include-prefix monitor --guard-prefix MONITOR --generate-specifier \
+--cli-namespace brep::cli
+
+ cli.cxx{monitor-options}: cli.options += \
+--page-usage print_ --ansi-color --long-usage
+
+ cli.cxx{module-options}: cli.options += --suppress-usage --generate-parse
+
+ # Include the generated cli files into the distribution and don't remove
+ # them when cleaning in src (so that clean results in a state identical to
+ # distributed).
+ #
+ cli.cxx{*}:
+ {
+ dist = true
+ clean = ($src_root != $out_root)
+ }
+}
diff --git a/monitor/module.cli b/monitor/module.cli
new file mode 100644
index 0000000..c299c5f
--- /dev/null
+++ b/monitor/module.cli
@@ -0,0 +1,16 @@
+// file : monitor/module.cli
+// license : MIT; see accompanying LICENSE file
+
+include <mod/module.cli>;
+
+namespace brep
+{
+ namespace options
+ {
+ // brep web module configuration options we are interested in.
+ //
+ class module: build_task
+ {
+ };
+ }
+}
diff --git a/monitor/monitor.cli b/monitor/monitor.cli
new file mode 100644
index 0000000..3a58a1d
--- /dev/null
+++ b/monitor/monitor.cli
@@ -0,0 +1,208 @@
+// file : monitor/monitor.cli
+// license : MIT; see accompanying LICENSE file
+
+include <vector>;
+include <string>;
+include <cstddef>; // size_t
+include <cstdint>; // uint16_t
+
+include <mod/module.cli>; // Reuse CLI support types.
+
+"\section=1"
+"\name=brep-monitor"
+"\summary=monitor brep infrastructure"
+
+namespace brep
+{
+ namespace options
+ {
+ {
+ "<options> <brep-config> <toolchain> <name> <version>",
+
+ "\h|SYNOPSIS|
+
+ \c{\b{brep-monitor --help}\n
+ \b{brep-monitor --version}\n
+ \b{brep-monitor} [<options>] <brep-config> <toolchain> [<toolchain>...]}
+
+ \c{<toolchain> = <name>[\b{/}<version>]}
+
+ \h|DESCRIPTION|
+
+ \cb{brep-monitor} analyzes the \cb{brep} internal state and reports the
+ infrastructure issues printing their descriptions to \cb{stderr}.
+
+ The specified \cb{brep} module configuration file (<brep-config>) is
+ used to retrieve information required to access the databases and
+ deduce the expected behavior. Most of this information can be
+ overridden via the command line options.
+
+ Currently, only delayed package builds for the specified toolchains are
+ reported. If toolchain version is omitted then all package builds with
+ this toolchain name are considered.
+
+ \cb{brep-monitor} maintains its own state in the brep \cb{build}
+ database. In particular, it records timestamps of the reported package
+ build delays and optionally omits them from being reported again during
+ the timeout specified with the \cb{--report-timeout} option. If the
+ timeout is unspecified, then the report timestamps are not updated. To
+ report all delays and still update the timestamps specify the zero
+ report timeout.
+
+ By default, a brief report is printed. Use the \cb{--full-report}
+ option to obtain the full report (which may be large).
+
+ Note that \cb{brep-monitor} expects the \cb{build} database schema to
+ have already been created using \l{brep-migrate(1)}."
+ }
+
+ class monitor
+ {
+ "\h|OPTIONS|"
+
+ std::size_t --soft-rebuild-timeout
+ {
+ "<seconds>",
+ "Time to wait (in seconds) before considering a package soft (re)build as
+ delayed. If unspecified, it is the sum of the package rebuild timeout
+ (soft rebuild timeout if the alternative timeout is unspecified and
+ the maximum of two otherwise) and the build result timeout (see
+ the \cb{build-soft-rebuild-timeout}, \cb{build-alt-soft-rebuild-*},
+ and \cb{build-result-timeout} \cb{brep} module configuration options
+ for details). The special zero value disables monitoring of soft
+ rebuilds.
+
+ Note that if both soft and hard rebuilds are disabled in the
+ \cb{brep} module configuration, then \cb{brep-monitor} is unable to
+ come up with a reasonable build timeout on its own. In this case, to
+ monitor the initial package build delays, you may need to specify
+ either \cb{--soft-rebuild-timeout} or \cb{--hard-rebuild-timeout}
+ explicitly.
+
+ Also note that a package that was not built before it was archived is
+ always considered as delayed. However, to distinguish this case from
+ a situation where a package was archived before a configuration have
+ been added, \cb{brep-monitor} needs to observe the package as
+ buildable for this configuration before it is archived. As result, if
+ you run \cb{brep-monitor} periodically (for example, as a cron job),
+ then make sure its running period is less than the tenant archive
+ timeout."
+ }
+
+ std::size_t --hard-rebuild-timeout
+ {
+ "<seconds>",
+ "Time to wait (in seconds) before considering a package hard (re)build
+ as delayed. If unspecified, it is calculated in the same way as for
+ \cb{--soft-rebuild-timeout} but using the
+ \cb{build-hard-rebuild-timeout} and \cb{build-alt-hard-rebuild-*}
+ \cb{brep} module configuration options."
+ }
+
+ std::size_t --report-timeout
+ {
+ "<seconds>",
+ "Time to wait (in seconds) before repeating a report of a package
+ build delay. By default there is no timeout and all reports are
+ repeated."
+ }
+
+ bool --full-report
+ {
+ "Print the list of delayed package builds rather than just their number
+ per build configuration."
+ }
+
+ bool --clean
+ {
+ "Additionally clean the monitor state removing outdated information
+ related to non-existent packages, configurations, etc."
+ }
+
+ // Note that the web service would normally logs in under a different
+ // user (and potentially switch the role afterwords) and so falling back
+ // to brep's user name and password wouldn't make much sense.
+ //
+ std::string --build-db-user|-u
+ {
+ "<user>",
+ "\cb{build} database user name. If unspecified, then operating system
+ (login) name is used."
+ }
+
+ std::string --build-db-password
+ {
+ "<pass>",
+ "\cb{build} database password. If unspecified, then login without
+ password is expected to work."
+ }
+
+ std::string --build-db-name|-n = "brep_package"
+ {
+ "<name>",
+ "\cb{build} database name. If unspecified, then \cb{brep}'s
+ \cb{build-db-name} configuration option value is used."
+ }
+
+ std::string --build-db-host|-h
+ {
+ "<host>",
+ "\cb{build} database host name, address, or socket. If unspecified,
+ then \cb{brep}'s \cb{build-db-host} configuration option value is
+ used."
+ }
+
+ std::uint16_t --build-db-port|-p
+ {
+ "<port>",
+ "\cb{build} database port number. If unspecified, then \cb{brep}'s
+ \cb{build-db-port} configuration option value is used."
+ }
+
+ std::string --pager // String to allow empty value.
+ {
+ "<path>",
+ "The pager program to be used to show long text. Commonly used pager
+ programs are \cb{less} and \cb{more}. You can also specify additional
+ options that should be passed to the pager program with
+ \cb{--pager-option}. If an empty string is specified as the pager
+ program, then no pager will be used. If the pager program is not
+ explicitly specified, then \cb{brep-monitor} will try to use
+ \cb{less}. If it is not available, then no pager will be used."
+ }
+
+ std::vector<std::string> --pager-option
+ {
+ "<opt>",
+ "Additional option to be passed to the pager program. See \cb{--pager}
+ for more information on the pager program. Repeat this option to
+ specify multiple pager options."
+ }
+
+ bool --help {"Print usage information and exit."}
+ bool --version {"Print version and exit."}
+ };
+
+ "\h|EXIT STATUS|
+
+ \dl|
+
+ \li|\cb{0}
+
+ Success.|
+
+ \li|\cb{1}
+
+ Fatal error.|
+
+ \li|\cb{2}
+
+ An instance of \cb{brep-monitor} or some other \cb{brep} utility is
+ already running. Try again.|
+
+ \li|\cb{3}
+
+ Recoverable database error. Try again.||
+ "
+ }
+}
diff --git a/monitor/monitor.cxx b/monitor/monitor.cxx
new file mode 100644
index 0000000..42d481d
--- /dev/null
+++ b/monitor/monitor.cxx
@@ -0,0 +1,1174 @@
+// file : monitor/monitor.cxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#include <map>
+#include <set>
+#include <chrono>
+#include <iostream>
+
+#include <odb/database.hxx>
+#include <odb/transaction.hxx>
+#include <odb/schema-catalog.hxx>
+
+#include <odb/pgsql/database.hxx>
+
+#include <libbutl/pager.hxx>
+
+#include <libbrep/build.hxx>
+#include <libbrep/common.hxx>
+#include <libbrep/build-odb.hxx>
+#include <libbrep/build-package.hxx>
+#include <libbrep/build-package-odb.hxx>
+#include <libbrep/database-lock.hxx>
+
+#include <mod/build-target-config.hxx>
+
+#include <monitor/module-options.hxx>
+#include <monitor/monitor-options.hxx>
+
+using namespace std;
+using namespace butl;
+using namespace odb::core;
+
+namespace brep
+{
+ // Operation failed, diagnostics has already been issued.
+ //
+ struct failed {};
+
+ // We will collect and report build delays as separate steps not to hold
+ // database locks while printing to stderr. Also we need to order delays
+ // properly, so while printing reports we could group delays by toolchain
+ // and target configuration.
+ //
+ // To achieve that, we will iterate through all possible package builds
+ // creating the list of delays with the following sort priority:
+ //
+ // 1: toolchain name
+ // 2: toolchain version (descending)
+ // 3: target configuration name
+ // 4: target
+ // 5: tenant
+ // 6: package name
+ // 7: package version (descending)
+ // 8: package configuration name
+ //
+ struct compare_delay
+ {
+ bool
+ operator() (const shared_ptr<const build_delay>& x,
+ const shared_ptr<const build_delay>& y) const
+ {
+ if (int r = x->toolchain_name.compare (y->toolchain_name))
+ return r < 0;
+
+ if (int r = x->toolchain_version.compare (y->toolchain_version))
+ return r > 0;
+
+ if (int r = x->target_config_name.compare (y->target_config_name))
+ return r < 0;
+
+ if (int r = x->target.compare (y->target))
+ return r < 0;
+
+ if (int r = x->tenant.compare (y->tenant))
+ return r < 0;
+
+ if (int r = x->package_name.compare (y->package_name))
+ return r < 0;
+
+ if (int r = x->package_version.compare (y->package_version))
+ return r > 0;
+
+ return x->package_config_name.compare (y->package_config_name) < 0;
+ }
+ };
+
+ // The ordered list of delays to report.
+ //
+ class delay_report
+ {
+ public:
+ // Note that in the brief mode we also need to print the total number of
+ // delays (reported or not) per target configuration. Thus, we add all
+ // delays to the report object, marking them if we need to report them or
+ // not.
+ //
+ void
+ add_delay (shared_ptr<build_delay>, bool custom_bot, bool report);
+
+ bool
+ empty () const {return reported_delay_count_ == 0;}
+
+ // In the brief mode (if full is false) print the number of reported/total
+ // (if total is true) delayed package configuration builds per target
+ // configuration rather than the package configurations themselves.
+ //
+ void
+ print (const char* header, bool total, bool full) const;
+
+ private:
+ // Maps delays to the custom bot and report flag.
+ //
+ struct delay_info
+ {
+ bool custom_bot;
+ bool report;
+ };
+
+ map<shared_ptr<const build_delay>, delay_info, compare_delay> delays_;
+ size_t reported_delay_count_ = 0;
+
+ // Number of reported/total delayed package configurations which need to
+ // be built with the custom build bots.
+ //
+ size_t custom_total_delay_count_ = 0;
+ size_t custom_reported_delay_count_ = 0;
+ };
+
+ void delay_report::
+ add_delay (shared_ptr<build_delay> delay, bool custom_bot, bool report)
+ {
+ delays_.emplace (move (delay), delay_info {custom_bot, report});
+
+ if (custom_bot)
+ ++custom_total_delay_count_;
+
+ if (report)
+ {
+ ++reported_delay_count_;
+
+ if (custom_bot)
+ ++custom_reported_delay_count_;
+ }
+ }
+
+ void delay_report::
+ print (const char* header, bool total, bool full) const
+ {
+ if (empty ())
+ return;
+
+ cerr << header << " (" << reported_delay_count_;
+
+ if (total)
+ cerr << '/' << delays_.size ();
+
+ if (custom_reported_delay_count_ != 0 ||
+ (total && custom_total_delay_count_ != 0))
+ {
+ cerr << " including " << custom_reported_delay_count_;
+
+ if (total)
+ cerr << '/' << custom_total_delay_count_;
+
+ cerr << " custom";
+ }
+
+ cerr << "):" << endl;
+
+ // Group the printed delays by toolchain and target configuration.
+ //
+ const string* toolchain_name (nullptr);
+ const version* toolchain_version (nullptr);
+ const string* target_config_name (nullptr);
+ const target_triplet* target (nullptr);
+
+ size_t config_reported_delay_count (0);
+ size_t config_total_delay_count (0);
+
+ size_t config_custom_reported_delay_count (0);
+ size_t config_custom_total_delay_count (0);
+
+ auto brief_config = [&target_config_name,
+ &target,
+ &config_reported_delay_count,
+ &config_total_delay_count,
+ &config_custom_reported_delay_count,
+ &config_custom_total_delay_count,
+ total] ()
+ {
+ if (target_config_name != nullptr)
+ {
+ assert (target != nullptr);
+
+ // Only print configurations with delays that needs to be reported.
+ //
+ if (config_reported_delay_count != 0)
+ {
+ cerr << " " << *target_config_name << '/' << *target << " ("
+ << config_reported_delay_count;
+
+ if (total)
+ cerr << '/' << config_total_delay_count;
+
+ if (config_custom_reported_delay_count != 0 ||
+ (total && config_custom_total_delay_count != 0))
+ {
+ cerr << " including " << config_custom_reported_delay_count;
+
+ if (total)
+ cerr << '/' << config_custom_total_delay_count;
+
+ cerr << " custom";
+ }
+
+ cerr << ')' << endl;
+ }
+
+ config_reported_delay_count = 0;
+ config_total_delay_count = 0;
+
+ config_custom_reported_delay_count = 0;
+ config_custom_total_delay_count = 0;
+ }
+ };
+
+ for (const auto& dr: delays_)
+ {
+ bool report (dr.second.report);
+
+ if (full && !report)
+ continue;
+
+ bool custom_bot (dr.second.custom_bot);
+ const shared_ptr<const build_delay>& d (dr.first);
+
+ // Print the toolchain, if changed.
+ //
+ if (toolchain_name == nullptr ||
+ d->toolchain_name != *toolchain_name ||
+ d->toolchain_version != *toolchain_version)
+ {
+ if (!full)
+ brief_config ();
+
+ if (toolchain_name != nullptr)
+ cerr << endl;
+
+ cerr << " " << d->toolchain_name;
+
+ if (!d->toolchain_version.empty ())
+ cerr << "/" << d->toolchain_version;
+
+ cerr << endl;
+
+ toolchain_name = &d->toolchain_name;
+ toolchain_version = &d->toolchain_version;
+ target_config_name = nullptr;
+ target = nullptr;
+ }
+
+ // Print the configuration, if changed.
+ //
+ if (target_config_name == nullptr ||
+ d->target_config_name != *target_config_name ||
+ d->target != *target)
+ {
+ if (full)
+ {
+ if (target_config_name != nullptr)
+ cerr << endl;
+
+ cerr << " " << d->target_config_name << '/' << d->target << endl;
+ }
+ else
+ brief_config ();
+
+ target_config_name = &d->target_config_name;
+ target = &d->target;
+ }
+
+ // Print the delayed build package configuration in the full report mode
+ // and count configuration builds otherwise.
+ //
+ if (full)
+ {
+ // We can potentially extend this information with the archived flag
+ // or the delay duration.
+ //
+ cerr << " " << d->package_name << '/' << d->package_version
+ << ' ' << d->package_config_name;
+
+ if (custom_bot)
+ cerr << " (custom bot)";
+
+ if (!d->tenant.empty ())
+ cerr << ' ' << d->tenant;
+
+ cerr << endl;
+ }
+ else
+ {
+ if (report)
+ {
+ ++config_reported_delay_count;
+
+ if (custom_bot)
+ ++config_custom_reported_delay_count;
+ }
+
+ ++config_total_delay_count;
+
+ if (custom_bot)
+ ++config_custom_total_delay_count;
+ }
+ }
+
+ if (!full)
+ brief_config ();
+ }
+
+ static const char* help_info (
+ " info: run 'brep-monitor --help' for more information");
+
+ static int
+ main (int argc, char* argv[])
+ try
+ {
+ cli::argv_scanner scan (argc, argv);
+ options::monitor ops (scan);
+
+ // Version.
+ //
+ if (ops.version ())
+ {
+ cout << "brep-monitor " << BREP_VERSION_ID << endl
+ << "libbrep " << LIBBREP_VERSION_ID << endl
+ << "libbbot " << LIBBBOT_VERSION_ID << endl
+ << "libbpkg " << LIBBPKG_VERSION_ID << endl
+ << "libbutl " << LIBBUTL_VERSION_ID << endl
+ << "Copyright (c) " << BREP_COPYRIGHT << "." << endl
+ << "This is free software released under the MIT license." << endl;
+
+ return 0;
+ }
+
+ // Help.
+ //
+ if (ops.help ())
+ {
+ pager p ("brep-monitor help",
+ false,
+ ops.pager_specified () ? &ops.pager () : nullptr,
+ &ops.pager_option ());
+
+ print_usage (p.stream ());
+
+ // If the pager failed, assume it has issued some diagnostics.
+ //
+ return p.wait () ? 0 : 1;
+ }
+
+ // Parse the brep module configuration.
+ //
+ options::module mod_ops;
+ {
+ if (!scan.more ())
+ {
+ cerr << "error: brep module configuration file is expected" << endl
+ << help_info << endl;
+ return 1;
+ }
+
+ string f (scan.next ());
+
+ try
+ {
+ cli::argv_file_scanner scan (f, "" /* option */);
+
+ // Parse the brep module options skipping those we don't recognize.
+ //
+ while (scan.more ())
+ {
+ // Parse until an unknown option is encountered.
+ //
+ mod_ops.parse (scan,
+ cli::unknown_mode::stop,
+ cli::unknown_mode::stop);
+
+ // Skip the unknown option, unless we are done.
+ //
+ if (scan.more ())
+ {
+ // Skip the option name.
+ //
+ size_t l (scan.peek_line ());
+ scan.skip ();
+
+ // Skip the option value, if present.
+ //
+ // Note that here we rely on the configuration file having both
+ // the option name and its value on the same line.
+ //
+ if (scan.more () && scan.peek_line () == l)
+ scan.skip ();
+ }
+ }
+ }
+ catch (const cli::file_io_failure& e)
+ {
+ cerr << "error: unable to parse brep module configuration: " << e
+ << endl;
+ return 1;
+ }
+ catch (const cli::exception& e)
+ {
+ cerr << "error: unable to parse brep module configuration file '" << f
+ << "': " << e << endl;
+ return 1;
+ }
+
+ auto bad_alt = [&f] (const char* what)
+ {
+ cerr << "build-alt-" << what << "-rebuild-start and build-alt-"
+ << what << "-rebuild-stop configuration options must both be "
+ << "either specified or not in '" << f << "'" << endl;
+ };
+
+ if (mod_ops.build_alt_hard_rebuild_start_specified () !=
+ mod_ops.build_alt_hard_rebuild_stop_specified ())
+ {
+ bad_alt("hard");
+ return 1;
+ }
+
+ if (mod_ops.build_alt_soft_rebuild_start_specified () !=
+ mod_ops.build_alt_soft_rebuild_stop_specified ())
+ {
+ bad_alt("soft");
+ return 1;
+ }
+ }
+
+ // Parse the toolchains suppressing duplicates.
+ //
+ // Note that specifying a toolchain both with and without version doesn't
+ // make sense, so we fail if that's the case.
+ //
+ vector<pair<string, version>> toolchains;
+
+ if (!scan.more ())
+ {
+ cerr << "error: toolchain is expected" << endl << help_info << endl;
+ return 1;
+ }
+
+ while (scan.more ())
+ {
+ string s (scan.next ());
+
+ string tn;
+ version tv;
+
+ try
+ {
+ size_t p (s.find ('/'));
+
+ if (p == string::npos)
+ tn = move (s);
+ else
+ {
+ tn.assign (s, 0, p);
+ tv = version (string (s, p + 1));
+ }
+
+ bool dup (false);
+ for (const pair<string, version>& t: toolchains)
+ {
+ if (tn == t.first)
+ {
+ if (tv == t.second)
+ {
+ dup = true;
+ break;
+ }
+
+ if (tv.empty () != t.second.empty ())
+ {
+ cerr << "error: toolchain '" << tn << "' is specified both "
+ << "with and without version" << endl;
+ return 1;
+ }
+ }
+ }
+
+ if (!dup)
+ toolchains.emplace_back (move (tn), move (tv));
+ }
+ catch (const invalid_argument& e)
+ {
+ cerr << "error: invalid toolchain '" << s << "': " << e << endl;
+ return 1;
+ }
+ }
+
+ // Parse buildtab.
+ //
+ if (!mod_ops.build_config_specified ())
+ {
+ cerr << "warning: package building functionality is disabled" << endl;
+ return 0;
+ }
+
+ build_target_configs configs;
+
+ try
+ {
+ configs = bbot::parse_buildtab (mod_ops.build_config ());
+ }
+ catch (const tab_parsing& e)
+ {
+ cerr << "error: unable to parse buildtab: " << e << endl;
+ return 1;
+ }
+ catch (const io_error& e)
+ {
+ cerr << "error: unable to read '" << mod_ops.build_config () << "': "
+ << e << endl;
+ return 1;
+ }
+
+ // Create the database instance.
+ //
+ odb::pgsql::database db (
+ ops.build_db_user (),
+ ops.build_db_password (),
+ (ops.build_db_name_specified ()
+ ? ops.build_db_name ()
+ : mod_ops.build_db_name ()),
+ (ops.build_db_host_specified ()
+ ? ops.build_db_host ()
+ : mod_ops.build_db_host ()),
+ (ops.build_db_port_specified ()
+ ? ops.build_db_port ()
+ : mod_ops.build_db_port ()),
+ "options='-c default_transaction_isolation=serializable'");
+
+ // Prevent several brep utility instances from updating the build database
+ // simultaneously.
+ //
+ database_lock l (db);
+
+ // Check that the database schema matches the current one.
+ //
+ const string ds ("build");
+ if (schema_catalog::current_version (db, ds) != db.schema_version (ds))
+ {
+ cerr << "error: build database schema differs from the current one"
+ << endl
+ << " info: use brep-migrate to migrate the database" << endl;
+ return 1;
+ }
+
+ // If requested, cleanup delays for package builds that are not expected
+ // anymore (build configuration is not present, etc).
+ //
+ if (ops.clean ())
+ {
+ using config_map = map<build_target_config_id,
+ const build_target_config*>;
+
+ config_map conf_map;
+ for (const build_target_config& c: configs)
+ conf_map[build_target_config_id {c.target, c.name}] = &c;
+
+ // Prepare the build delay prepared query.
+ //
+ // Query package build delays in chunks in order not to hold locks for
+ // too long. Sort the result by package version as a first priority to
+ // minimize number of queries to the package database. Note that we
+ // still need to sort by configuration and toolchain to make sure that
+ // build delays are sorted consistently across queries and we don't miss
+ // any of them.
+ //
+ using query = query<build_delay>;
+ using prep_query = prepared_query<build_delay>;
+
+ // Specify the portion.
+ //
+ size_t offset (0);
+
+ query q ("ORDER BY" +
+ query::id.package.tenant + "," +
+ query::id.package.name +
+ order_by_version (query::id.package.version,
+ false /* first */) + "," +
+ query::id.target + "," +
+ query::id.target_config_name + "," +
+ query::id.package_config_name + "," +
+ query::id.toolchain_name +
+ order_by_version (query::id.toolchain_version,
+ false /* first */) +
+ "OFFSET" + query::_ref (offset) + "LIMIT 2000");
+
+ connection_ptr conn (db.connection ());
+
+ prep_query pq (
+ conn->prepare_query<build_delay> ("build-delay-query", q));
+
+ // Cache the delayed build package object to reuse it in case the next
+ // delay refers to the same package (which is often the case due to the
+ // query result sorting criteria we use).
+ //
+ package_id pid;
+ shared_ptr<build_package> p;
+
+ for (bool ne (true); ne; )
+ {
+ transaction t (conn->begin ());
+
+ // Query delays.
+ //
+ auto delays (pq.execute ());
+
+ if ((ne = !delays.empty ()))
+ {
+ // Iterate over the build delays and cleanup the outdated ones.
+ //
+ for (const build_delay& d: delays)
+ {
+ config_map::const_iterator ci;
+
+ bool cleanup (
+ // Check that the toolchain is still used.
+ //
+ find_if (toolchains.begin (), toolchains.end (),
+ [&d] (const pair<string, version>& t)
+ {
+ return t.first == d.toolchain_name &&
+ t.second == d.toolchain_version;
+ }) == toolchains.end () ||
+ //
+ // Check that the build configuration is still present.
+ //
+ (ci = conf_map.find (
+ build_target_config_id {d.target,
+ d.target_config_name})) ==
+ conf_map.end ());
+
+ // Check that the package still present, is buildable and doesn't
+ // exclude the build configuration.
+ //
+ if (!cleanup)
+ {
+ if (d.id.package != pid)
+ {
+ pid = d.id.package;
+ p = db.find<build_package> (pid);
+ }
+
+ const build_package_config* pc (p != nullptr
+ ? find (d.package_config_name,
+ p->configs)
+ : nullptr);
+
+ cleanup = (pc == nullptr || !p->buildable);
+
+ if (!cleanup)
+ {
+ db.load (*p, p->constraints_section);
+
+ cleanup = exclude (*pc,
+ p->builds,
+ p->constraints,
+ *ci->second,
+ configs.class_inheritance_map);
+ }
+ }
+
+ if (cleanup)
+ db.erase (d);
+ else
+ ++offset;
+ }
+ }
+
+ t.commit ();
+ }
+ }
+
+ delay_report hard_delays_report;
+ delay_report soft_delays_report;
+ set<shared_ptr<const build_delay>, compare_delay> update_delays;
+ {
+ connection_ptr conn (db.connection ());
+
+ // Prepare the buildable package prepared query.
+ //
+ // Query buildable packages in chunks in order not to hold locks for too
+ // long.
+ //
+ using pquery = query<buildable_package>;
+ using prep_pquery = prepared_query<buildable_package>;
+
+ // Specify the portion.
+ //
+ size_t offset (0);
+
+ pquery pq ("ORDER BY" +
+ pquery::build_package::id.tenant + "," +
+ pquery::build_package::id.name +
+ order_by_version (pquery::build_package::id.version,
+ false /* first */) +
+ "OFFSET" + pquery::_ref (offset) + "LIMIT 50");
+
+ prep_pquery ppq (
+ conn->prepare_query<buildable_package> ("buildable-package-query",
+ pq));
+
+ // Prepare the package configuration build prepared queries.
+ //
+ using bquery = query<build>;
+ using prep_bquery = prepared_query<build>;
+
+ build_id id;
+
+ // This query will only be used for toolchains that have no version
+ // specified on the command line to obtain the latest completed build
+ // across all toolchain versions, if present, and the latest incomplete
+ // build otherwise.
+ //
+ // Why don't we pick the latest toolchain version? We don't want to
+ // stuck with it on the toolchain rollback. Instead we prefer the
+ // toolchain that built the package last and if there are none, pick the
+ // one for which the build task was issued last.
+ //
+ // @@ TMP Check if we can optimize this query by adding index for
+ // soft_timestamp and/or by setting enable_nestloop=off (or some
+ // such) as we do in mod/mod-builds.cxx.
+ //
+ bquery lbq ((equal<build> (bquery::id,
+ id,
+ false /* toolchain_version */) &&
+ bquery::state != "queued") +
+ "ORDER BY" +
+ bquery::soft_timestamp + "DESC, " +
+ bquery::timestamp + "DESC" +
+ "LIMIT 1");
+
+ prep_bquery plbq (
+ conn->prepare_query<build> ("package-latest-build-query", lbq));
+
+ // This query will only be used to retrieve a specific build by id.
+ //
+ bquery bq (equal<build> (bquery::id, id) && bquery::state != "queued");
+ prep_bquery pbq (conn->prepare_query<build> ("package-build-query", bq));
+
+ timestamp now (system_clock::now ());
+
+ // Calculate the build/rebuild expiration time, based on the respective
+ // --{soft,hard}-rebuild-timeout monitor options and the
+ // build-{soft,hard}-rebuild-timeout and
+ // build-alt-{soft,hard}-rebuild-{start,stop,timeout} brep module
+ // configuration options.
+ //
+ // If the --*-rebuild-timeout monitor option is zero or is not specified
+ // and the respective build-*-rebuild-timeout brep's configuration
+ // option is zero, then return timestamp_unknown to indicate 'never
+ // expire'. Note that this value is less than any build timestamp value,
+ // including timestamp_nonexistent.
+ //
+ // NOTE: there is a similar code in mod/mod-build-task.cxx.
+ //
+ auto build_expiration = [&now, &mod_ops] (
+ optional<size_t> rebuild_timeout,
+ const optional<pair<duration, duration>>& alt_interval,
+ optional<size_t> alt_timeout,
+ size_t normal_timeout)
+ {
+ duration t;
+
+ // If the rebuild timeout is not specified explicitly, then calculate
+ // it as the sum of the package rebuild timeout (normal rebuild
+ // timeout if the alternative timeout is unspecified and the maximum
+ // of two otherwise) and the build result timeout.
+ //
+ if (!rebuild_timeout)
+ {
+ if (normal_timeout == 0)
+ return timestamp_unknown;
+
+ chrono::seconds nt (normal_timeout);
+
+ if (alt_interval)
+ {
+ // Calculate the alternative timeout, unless it is specified
+ // explicitly.
+ //
+ if (!alt_timeout)
+ {
+ const duration& start (alt_interval->first);
+ const duration& stop (alt_interval->second);
+
+ // Note that if the stop time is less than the start time then
+ // the interval extends through the midnight.
+ //
+ t = start <= stop ? (stop - start) : ((24h - start) + stop);
+
+ // If the normal rebuild time out is greater than 24 hours, then
+ // increase the default alternative timeout by (normal - 24h)
+ // (see build-alt-soft-rebuild-timeout configuration option for
+ // details).
+ //
+ if (nt > 24h)
+ t += nt - 24h;
+ }
+ else
+ t = chrono::seconds (*alt_timeout);
+
+ // Take the maximum of the alternative and normal rebuild
+ // timeouts.
+ //
+ if (t < nt)
+ t = nt;
+ }
+ else
+ t = nt;
+
+ // Summarize the rebuild and build result timeouts.
+ //
+ t += chrono::seconds (mod_ops.build_result_timeout ());
+ }
+ else
+ {
+ if (*rebuild_timeout == 0)
+ return timestamp_unknown;
+
+ t = chrono::seconds (*rebuild_timeout);
+ }
+
+ return now - t;
+ };
+
+ timestamp hard_rebuild_expiration (
+ build_expiration (
+ (ops.hard_rebuild_timeout_specified ()
+ ? ops.hard_rebuild_timeout ()
+ : optional<size_t> ()),
+ (mod_ops.build_alt_hard_rebuild_start_specified ()
+ ? make_pair (mod_ops.build_alt_hard_rebuild_start (),
+ mod_ops.build_alt_hard_rebuild_stop ())
+ : optional<pair<duration, duration>> ()),
+ (mod_ops.build_alt_hard_rebuild_timeout_specified ()
+ ? mod_ops.build_alt_hard_rebuild_timeout ()
+ : optional<size_t> ()),
+ mod_ops.build_hard_rebuild_timeout ()));
+
+ timestamp soft_rebuild_expiration (
+ build_expiration (
+ (ops.soft_rebuild_timeout_specified ()
+ ? ops.soft_rebuild_timeout ()
+ : optional<size_t> ()),
+ (mod_ops.build_alt_soft_rebuild_start_specified ()
+ ? make_pair (mod_ops.build_alt_soft_rebuild_start (),
+ mod_ops.build_alt_soft_rebuild_stop ())
+ : optional<pair<duration, duration>> ()),
+ (mod_ops.build_alt_soft_rebuild_timeout_specified ()
+ ? mod_ops.build_alt_soft_rebuild_timeout ()
+ : optional<size_t> ()),
+ mod_ops.build_soft_rebuild_timeout ()));
+
+ timestamp report_expiration (
+ now - chrono::seconds (ops.report_timeout ()));
+
+ for (bool ne (true); ne; )
+ {
+ transaction t (conn->begin ());
+
+ // Query buildable packages (and cache the result).
+ //
+ auto bps (ppq.execute ());
+
+ if ((ne = !bps.empty ()))
+ {
+ offset += bps.size ();
+
+ for (auto& bp: bps)
+ {
+ shared_ptr<build_package>& p (bp.package);
+
+ db.load (*p, p->constraints_section);
+
+ for (const build_package_config& pc: p->configs)
+ {
+ for (const build_target_config& tc: configs)
+ {
+ // Note that we also don't build a package configuration if we
+ // are unable to assign all the required auxiliary machines
+ // for the build (see mod/mod-build-task.cxx for details).
+ // That means that we will also report delays which happen due
+ // to such an inability, which can potentially be not only
+ // because of the infrastructural problem but also because of
+ // an error in the package manifest (build auxiliary
+ // configuration pattern doesn't match any machine
+ // configuration anymore, etc). It doesn't seem easy to
+ // distinguish here which type of problem causes a delay.
+ // Thus, for now let's wait and see if it ever becomes a
+ // problem.
+ //
+ if (exclude (pc,
+ p->builds,
+ p->constraints,
+ tc,
+ configs.class_inheritance_map))
+ continue;
+
+ for (const pair<string, version>& t: toolchains)
+ {
+ id = build_id (p->id,
+ tc.target, tc.name,
+ pc.name,
+ t.first, t.second);
+
+ // If the toolchain version is unspecified then search for
+ // the latest build across all toolchain versions and search
+ // for a specific build otherwise.
+ //
+ shared_ptr<build> b (id.toolchain_version.empty ()
+ ? plbq.execute_one ()
+ : pbq.execute_one ());
+
+ // Note that we consider a build as delayed if it is not
+ // completed in the expected timeframe. So even if the build
+ // task have been issued recently we may still consider the
+ // build as delayed.
+ //
+ timestamp bht (b != nullptr
+ ? b->hard_timestamp
+ : timestamp_nonexistent);
+
+ timestamp bst (b != nullptr
+ ? b->soft_timestamp
+ : timestamp_nonexistent);
+
+ // Create the delay object to record a timestamp when the
+ // package configuration build could have potentially been
+ // started, unless it already exists.
+ //
+ shared_ptr<build_delay> d (db.find<build_delay> (id));
+
+ if (d == nullptr)
+ {
+ // If the archived package has no build nor build delay
+ // for this configuration, then we assume that the
+ // configuration was added after the package tenant has
+ // been archived and so the package could have never been
+ // built for this configuration. Thus, we don't consider
+ // this build as delayed and so skip it.
+ //
+ if (bp.archived && b == nullptr)
+ continue;
+
+ // Use the build hard, soft, or status change timestamp
+ // (see the timestamps description for their ordering
+ // information) as the build delay tracking starting point
+ // and fallback to the current time if there is no build
+ // yet.
+ //
+ timestamp pts (b == nullptr ? now :
+ bht != timestamp_nonexistent ? bht :
+ bst != timestamp_nonexistent ? bst :
+ b->timestamp);
+
+ d = make_shared<build_delay> (move (id.package.tenant),
+ move (id.package.name),
+ p->version,
+ move (id.target),
+ move (id.target_config_name),
+ move (id.package_config_name),
+ move (id.toolchain_name),
+ t.second,
+ pts);
+ db.persist (d);
+ }
+
+ // Handle package builds differently based on their tenant's
+ // archive status.
+ //
+ // If the package is not archived then consider it as
+ // delayed if it is not (re-)built by the expiration
+ // time. Otherwise, consider it as delayed if it is unbuilt.
+ //
+ // We also don't need to report an unbuilt archived package
+ // twice, as both soft and hard build delays.
+ //
+ bool hard_delayed;
+ bool soft_delayed;
+
+ if (!bp.archived)
+ {
+ auto delayed = [&d] (timestamp bt, timestamp be)
+ {
+ timestamp t (bt != timestamp_nonexistent
+ ? bt
+ : d->package_timestamp);
+ return t <= be;
+ };
+
+ hard_delayed = delayed (bht, hard_rebuild_expiration);
+ soft_delayed = delayed (bst, soft_rebuild_expiration);
+ }
+ else
+ {
+ hard_delayed = (bst == timestamp_nonexistent);
+ soft_delayed = false;
+ }
+
+ // If there is a delay, then deduce if this package
+ // configuration needs to be built with a custom build bot.
+ //
+ // Note: only meaningful if there is a delay.
+ //
+ bool custom_bot (false);
+
+ if (hard_delayed || soft_delayed)
+ {
+ if (!p->bot_keys_section.loaded ())
+ db.load (*p, p->bot_keys_section);
+
+ custom_bot = !pc.effective_bot_keys (p->bot_keys).empty ();
+ }
+
+ // Add hard/soft delays to the respective reports and
+ // collect the delay for update, if it is reported.
+ //
+ // Note that we update the delay objects persistent state
+ // later, after we successfully print the reports.
+ //
+ bool reported (false);
+
+ if (hard_delayed)
+ {
+ // If the report timeout is zero then report the delay
+ // unconditionally. Otherwise, report the active package
+ // build delay if the report timeout is expired and the
+ // archived package build delay if it was never reported.
+ // Note that fixing the building infrastructure won't help
+ // building an archived package, so reporting its build
+ // delays repeatedly is meaningless.
+ //
+ bool report (
+ ops.report_timeout () == 0 ||
+ (!bp.archived
+ ? d->report_hard_timestamp <= report_expiration
+ : d->report_hard_timestamp == timestamp_nonexistent));
+
+ if (report)
+ {
+ d->report_hard_timestamp = now;
+ reported = true;
+ }
+
+ hard_delays_report.add_delay (d, custom_bot, report);
+ }
+
+ if (soft_delayed)
+ {
+ bool report (ops.report_timeout () == 0 ||
+ d->report_soft_timestamp <= report_expiration);
+
+ if (report)
+ {
+ d->report_soft_timestamp = now;
+ reported = true;
+ }
+
+ soft_delays_report.add_delay (d, custom_bot, report);
+ }
+
+ // If we don't consider the report timestamps for reporting
+ // delays, it seems natural not to update these timestamps
+ // either. Note that reporting all delays and still updating
+ // the report timestamps can be achieved by specifying the
+ // zero report timeout.
+ //
+ if (reported && ops.report_timeout_specified ())
+ update_delays.insert (move (d));
+ }
+ }
+ }
+ }
+ }
+
+ t.commit ();
+ }
+ }
+
+ // Print delay reports, if not empty.
+ //
+ if (!hard_delays_report.empty () || !soft_delays_report.empty ())
+ try
+ {
+ cerr.exceptions (ostream::badbit | ostream::failbit);
+
+ // Don't print the total delay count if the report timeout is zero since
+ // all delays are reported in this case.
+ //
+ bool total (ops.report_timeout () != 0);
+
+ hard_delays_report.print ("Package hard rebuild delays",
+ total,
+ ops.full_report ());
+
+ // Separate reports with an empty line.
+ //
+ if (!hard_delays_report.empty () && !soft_delays_report.empty ())
+ cerr << endl;
+
+ soft_delays_report.print ("Package soft rebuild delays",
+ total,
+ ops.full_report ());
+ }
+ catch (const io_error&)
+ {
+ return 1; // Not much we can do on stderr writing failure.
+ }
+
+ // Persist the delay report timestamps.
+ //
+ if (!update_delays.empty ())
+ {
+ transaction t (db.begin ());
+
+ for (shared_ptr<const build_delay> d: update_delays)
+ db.update (d);
+
+ t.commit ();
+ }
+
+ return 0;
+ }
+ catch (const database_locked&)
+ {
+ cerr << "brep-monitor or some other brep utility is running" << endl;
+ return 2;
+ }
+ catch (const recoverable& e)
+ {
+ cerr << "recoverable database error: " << e << endl;
+ return 3;
+ }
+ catch (const cli::exception& e)
+ {
+ cerr << "error: " << e << endl << help_info << endl;
+ return 1;
+ }
+ catch (const failed&)
+ {
+ return 1; // Diagnostics has already been issued.
+ }
+ // Fully qualified to avoid ambiguity with odb exception.
+ //
+ catch (const std::exception& e)
+ {
+ cerr << "error: " << e << endl;
+ return 1;
+ }
+}
+
+int
+main (int argc, char* argv[])
+{
+ return brep::main (argc, argv);
+}
diff --git a/repositories.manifest b/repositories.manifest
index f6ba123..da9ee2b 100644
--- a/repositories.manifest
+++ b/repositories.manifest
@@ -19,6 +19,10 @@ location: ../libbutl.bash.git##HEAD
:
role: prerequisite
+location: ../bpkg-util.git##HEAD
+
+:
+role: prerequisite
location: https://git.build2.org/packaging/libapr/libapr1.git##HEAD
:
diff --git a/tests/ci/buildfile b/tests/ci/buildfile
index 280e364..6f6e82d 100644
--- a/tests/ci/buildfile
+++ b/tests/ci/buildfile
@@ -1,5 +1,4 @@
# file : tests/ci/buildfile
-# copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
# license : MIT; see accompanying LICENSE file
dir = ../../brep/handler/ci/
diff --git a/tests/ci/ci-dir.testscript b/tests/ci/ci-dir.testscript
index c850df0..be5a9b9 100644
--- a/tests/ci/ci-dir.testscript
+++ b/tests/ci/ci-dir.testscript
@@ -1,5 +1,4 @@
# file : tests/ci/ci-dir.testscript
-# copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
# license : MIT; see accompanying LICENSE file
.include data.testscript
diff --git a/tests/ci/ci-load.testscript b/tests/ci/ci-load.testscript
index bc0d521..eb9ba7c 100644
--- a/tests/ci/ci-load.testscript
+++ b/tests/ci/ci-load.testscript
@@ -1,5 +1,4 @@
# file : tests/ci/ci-load.testscript
-# copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
# license : MIT; see accompanying LICENSE file
.include data.testscript
@@ -106,6 +105,14 @@
email: user@example.org
%depends: \\* build2 .+%
%depends: \\* bpkg .+%
+ bootstrap-build:\\
+ project = libhello
+ %.+
+ \\
+ root-build:\\
+ cxx.std = latest
+ %.+
+ \\
location: libhello
%fragment: .+%
:
@@ -117,6 +124,14 @@
email: user@example.org
%depends: \\* build2 .+%
%depends: \\* bpkg .+%
+ bootstrap-build:\\
+ project = hello
+ %.+
+ \\
+ root-build:\\
+ cxx.std = latest
+ %.+
+ \\
location: hello
%fragment: .+%
EOE
@@ -149,6 +164,14 @@
email: user@example.org
%depends: \\* build2 .+%
%depends: \\* bpkg .+%
+ bootstrap-build:\\
+ project = hello
+ %.+
+ \\
+ root-build:\\
+ cxx.std = latest
+ %.+
+ \\
location: hello
%fragment: .+%
EOE
@@ -182,6 +205,14 @@
email: user@example.org
%depends: \\* build2 .+%
%depends: \\* bpkg .+%
+ bootstrap-build:\\
+ project = libhello
+ %.+
+ \\
+ root-build:\\
+ cxx.std = latest
+ %.+
+ \\
location: libhello
%fragment: .+%
EOE
@@ -201,7 +232,11 @@
%.
reference: $request_id
EOO
- %.*:.*%+
+ %.+cache:cache%
+ : 1
+ %.+
+ : 1
+ %.+
EOE
}
}
diff --git a/tests/ci/data.testscript b/tests/ci/data.testscript
index f584bc6..6f44c85 100644
--- a/tests/ci/data.testscript
+++ b/tests/ci/data.testscript
@@ -1,14 +1,13 @@
# file : tests/ci/data.testscript
-# copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
# license : MIT; see accompanying LICENSE file
# Pre-created CI request data directory that will be copied by subsequent
# tests and scope setup commands. The common approach will be that group
-# scopes copy and modify the parent scope submission directory as required
+# scopes copy and modify the parent scope request data directory as required
# by the nested tests and scopes. Tests will also clone the parent scope
-# submission data directory to optionally modify it, use and cleanup at the
-# end. Note that configuration can not be shared between multiple submission
-# handler processes. Also we need to make sure that submission data
+# request data data directory to optionally modify it, use and cleanup at the
+# end. Note that request data directory can not be shared between multiple
+# submission handler processes. Also we need to make sure that request data
# directories are not cloned while being used by submission handler scripts.
#
data_dir = $regex.replace($path_search('*/request.manifest', $src_base), \
@@ -35,10 +34,10 @@ root_data_dir = $~/$data_dir
# The most commonly used submission data directory cloning command that copies
# it from the parent scope working directory.
#
-clone_data = cp --no-cleanup -r ../$data_dir ./
-clone_data_clean = cp --no-cleanup -r ../$data_dir ./ &$data_dir/***
+clone_data = [cmdline] cp --no-cleanup -r ../$data_dir ./
+clone_data_clean = [cmdline] cp --no-cleanup -r ../$data_dir ./ &$data_dir/***
# Clones the original submission data directory.
#
-clone_root_data = cp --no-cleanup -r $root_data_dir ./
-clone_root_data_clean = cp --no-cleanup -r $root_data_dir ./ &$data_dir/***
+clone_root_data = [cmdline] cp --no-cleanup -r $root_data_dir ./
+clone_root_data_clean = [cmdline] cp --no-cleanup -r $root_data_dir ./ &$data_dir/***
diff --git a/tests/load/1/math/libexp-+2-1.2+1.tar.gz b/tests/load/1/math/libexp-+2-1.2+1.tar.gz
index 5beeb84..b223d9f 100644
--- a/tests/load/1/math/libexp-+2-1.2+1.tar.gz
+++ b/tests/load/1/math/libexp-+2-1.2+1.tar.gz
Binary files differ
diff --git a/tests/load/1/math/libfoo-+0-X.Y.tar.gz b/tests/load/1/math/libfoo-+0-X.Y.tar.gz
index 6867d4f..95364bb 100644
--- a/tests/load/1/math/libfoo-+0-X.Y.tar.gz
+++ b/tests/load/1/math/libfoo-+0-X.Y.tar.gz
Binary files differ
diff --git a/tests/load/1/math/libfoo-1.0.tar.gz b/tests/load/1/math/libfoo-1.0.tar.gz
index 2d445ec..3f23ab9 100644
--- a/tests/load/1/math/libfoo-1.0.tar.gz
+++ b/tests/load/1/math/libfoo-1.0.tar.gz
Binary files differ
diff --git a/tests/load/1/math/libfoo-1.2.4+1.tar.gz b/tests/load/1/math/libfoo-1.2.4+1.tar.gz
index 74678eb..db22a19 100644
--- a/tests/load/1/math/libfoo-1.2.4+1.tar.gz
+++ b/tests/load/1/math/libfoo-1.2.4+1.tar.gz
Binary files differ
diff --git a/tests/load/1/math/libfoo-benchmarks-1.2.4.tar.gz b/tests/load/1/math/libfoo-benchmarks-1.2.4.tar.gz
index 606893a..f1c9ba0 100644
--- a/tests/load/1/math/libfoo-benchmarks-1.2.4.tar.gz
+++ b/tests/load/1/math/libfoo-benchmarks-1.2.4.tar.gz
Binary files differ
diff --git a/tests/load/1/math/libfoo-examples-1.2.4.tar.gz b/tests/load/1/math/libfoo-examples-1.2.4.tar.gz
index c1d5fbd..00164e6 100644
--- a/tests/load/1/math/libfoo-examples-1.2.4.tar.gz
+++ b/tests/load/1/math/libfoo-examples-1.2.4.tar.gz
Binary files differ
diff --git a/tests/load/1/math/libfoo-tests-1.2.4.tar.gz b/tests/load/1/math/libfoo-tests-1.2.4.tar.gz
index 8e5105f..84a7913 100644
--- a/tests/load/1/math/libfoo-tests-1.2.4.tar.gz
+++ b/tests/load/1/math/libfoo-tests-1.2.4.tar.gz
Binary files differ
diff --git a/tests/load/1/math/libpq-0.tar.gz b/tests/load/1/math/libpq-0.tar.gz
index a689660..d4beb18 100644
--- a/tests/load/1/math/libpq-0.tar.gz
+++ b/tests/load/1/math/libpq-0.tar.gz
Binary files differ
diff --git a/tests/load/1/math/libstudxml-1.0.0+1.tar.gz b/tests/load/1/math/libstudxml-1.0.0+1.tar.gz
index 41c9637..dcf0ee5 100644
--- a/tests/load/1/math/libstudxml-1.0.0+1.tar.gz
+++ b/tests/load/1/math/libstudxml-1.0.0+1.tar.gz
Binary files differ
diff --git a/tests/load/1/math/packages.manifest b/tests/load/1/math/packages.manifest
index de3b1c5..574370a 100644
--- a/tests/load/1/math/packages.manifest
+++ b/tests/load/1/math/packages.manifest
@@ -1,5 +1,5 @@
: 1
-sha256sum: b85ba3a0ba45b98e1fbb2507f199bc4b218a4a413ec6ba4094e214a7507490a2
+sha256sum: 521d17cbd396275aa9eb9b00d456beaaaabae1c004eff6de712bb615c18bb59b
:
name: libexp
version: +2-1.2+1
@@ -19,15 +19,23 @@ builds: default legacy
build-include: windows**d/x86_64**
build-include: windows-vc_13**
build-exclude: **; Only supported on Windows.
+bootstrap-build:\
+project = libexp
+
+\
location: libexp-+2-1.2+1.tar.gz
-sha256sum: 317c8c6f45d9dfdfdef3a823411920cecd51729c7c4f58f9a0b0bbd681c07bd6
+sha256sum: d90cfe583890cd0c05cdfc204e69dd3b986c2da49851f7a87fa0ca870788ff79
:
name: libfoo
version: +0-X.Y
summary: The Foo Library
license: MIT
+bootstrap-build:\
+project = libfoo
+
+\
location: libfoo-+0-X.Y.tar.gz
-sha256sum: c994fd49f051ab7fb25f3a4e68ca878e484c5d3c2cb132b37d41224b0621b618
+sha256sum: c25e5cae2f72664a3961c3ef88a82e67150c4bcc2a5e1fb4d250e621c5574187
:
name: libfoo
version: 1.0
@@ -37,8 +45,12 @@ build-email: foo-builds@example.com
builds: default legacy; Stable configurations only.
builds: -32; 64-bit targets only
builds: &msvc_13_up; Not too old MSVC.
+bootstrap-build:\
+project = libfoo
+
+\
location: libfoo-1.0.tar.gz
-sha256sum: e89c6d746f8b1ea3ec58d294946d2f683d133438d2ac8c88549ba24c19627e76
+sha256sum: 7382152bac5b4ce10215a5ecd6c94c490d0efc007031d3b03f407d068b74e624
:
name: libfoo
version: 1.2.4+1
@@ -48,24 +60,39 @@ license: LGPLv2, MIT; If using with GNU TLS.
license: BSD; If using with OpenSSL.
topics: math library, math API, libbaz fork
keywords: c++ foo math best
-description: \
+description:\
A modern C++ library with easy to use linear algebra and lot of optimization
tools.
There are over 100 functions in total with an extensive test suite. The API is
-similar to ~~mathlab~~ **MATLAB**.
+similar to ~~mathlab~~ **MATLAB**.[^mathlab]
Useful for conversion of research code into production environments.
+[^mathlab]: MATLAB Capabilities: TODO
\
description-type: text/markdown
-changes: \
-1.2.4+1
+package-description:\
+This project builds and defines the build2 package for the libfoo library.
+
+A modern C++ library with easy to use linear algebra and lot of optimization
+tools.
+
+There are over 100 functions in total with an extensive test suite. The API is
+similar to ~~mathlab~~ **MATLAB**.[^mathlab]
+
+Useful for conversion of research code into production environments.
+[^mathlab]: MATLAB Capabilities: TODO
+\
+package-description-type: text/markdown
+changes:\
+**1.2.4+1**
* applied patch for critical bug-219
* regenerated documentation
-1.2.4
+**1.2.4**
* test suite extended significantly
\
+changes-type: text/markdown
url: http://www.example.com/foo/; Project home page.
doc-url: http://www.example.org/projects/libfoo/man.xhtml; Documentation page.
src-url: http://scm.example.com/?p=odb/libodb.git\;a=tree; Source tree url.
@@ -74,47 +101,94 @@ email: foo-users@example.com; Public mailing list. Read FAQ before posting.
package-email: pack@example.com; Current packager.
depends: libmisc < 1.1 | libmisc > 2.3.0+0; Crashes with 1.1.0-2.3.0.
depends: libexp >= 1.0
-depends: ? libstudxml | libexpat; The newer the better.
+depends: libstudxml ? ($cxx.target.class == 'windows') | libexpat ?\
+ ($cxx.target.class != 'windows'); The newer the better.
requires: linux | windows | macosx; Symbian support is coming.
requires: c++11
requires: ? ; libc++ standard library if using Clang on Mac OS X.
-requires: ? vc++ >= 12.0; Only if using VC++ on Windows.
-tests: libfoo-tests == 1.2.4
+requires: ; X11 libs.
+requires: ? ($windows); Only 64-bit.
+requires: x86_64 ? ; Only if on Windows.
+requires: * vc++ >= 12.0 ? (windows); Only if using VC++ on Windows.
+requires: host
+tests: * libfoo-tests == 1.2.4 ? (!$defined(config.libfoo_tests.test))\
+ config.libfoo_tests.test=libfoo
examples: libfoo-examples
benchmarks: libfoo-benchmarks > 0.0.1
+builds: all
+network-build-include: windows-vc_14d/x86_64-microsoft-win32-msvc14.0
+network-build-exclude: **
+network-build-config: config.libfoo.network=true; Enable networking API.
+cache-builds: default
+cache-builds: -linux
+cache-build-include: windows-vc_14d/x86_64-microsoft-win32-msvc14.0
+cache-build-exclude: **
+cache-build-config:\
+config.libfoo.cache=true
+config.libfoo.buffer=4096
+;
+Enable caching.
+\
+bootstrap-build:\
+project = libfoo
+
+\
+root-build:\
+config [bool] config.libfoo.network ?= false
+
+config [bool] config.libfoo.cache ?= false
+config [uint64] config.libfoo.buffer ?= 1024
+
+\
location: libfoo-1.2.4+1.tar.gz
-sha256sum: c02b6033107387e05f48aa62ee6498152c967deb0e91a62f1e618fe9fd1bc644
+sha256sum: ffce9d3e3ca9899d3fd6da1f6b93c07cce2c3f6b7004948b59757dae420f801b
:
name: libfoo-benchmarks
version: 1.2.4
summary: The Foo Math Library benchmarks
license: MIT
-builds: none; Is only build to benchmark libfoo.
+builds: 64; Fails building for 32 bits.
+bootstrap-build:\
+project = libfoo-benchmarks
+
+\
location: libfoo-benchmarks-1.2.4.tar.gz
-sha256sum: 2ec3985a540ca5bf74786d0792820cfa8a2790964a5aeaba443dfa91f2a54c04
+sha256sum: 8392db99b1ea0c78fe2c73d8c0ae35f8a31d798c8ed26ebf09b4bf557b4e3ce0
:
name: libfoo-examples
version: 1.2.4
summary: The Foo Math Library examples
license: MIT
-builds: none; Is only built to demo libfoo usage.
+builds: 64; Fails building for 32 bits.
+bootstrap-build:\
+project = libfoo-examples
+
+\
location: libfoo-examples-1.2.4.tar.gz
-sha256sum: 99658b9a5a5b834047b692b93ded9f9af3d255eb5ea3b27594f600b902039995
+sha256sum: de1bf595994a63361262727594de94edbd77fff8234066da74672e44eb4349f2
:
name: libfoo-tests
version: 1.2.4
summary: The Foo Math Library tests
license: MIT
-builds: none; Is only built to test libfoo.
+builds: 64; Fails building for 32 bits.
+bootstrap-build:\
+project = libfoo-tests
+
+\
+root-build:\
+config [strings] config.libfoo_tests.test
+
+\
location: libfoo-tests-1.2.4.tar.gz
-sha256sum: 16712c90df5ba2ffb920d29c9c25a29564f8ae01f167359c4651572789e6cd6c
+sha256sum: 29a97b3356c42602dd81ee2766c242f8974b0a92d8560cb107dd464655d3d527
:
name: libpq
version: 0
summary: PostgreSQL C API client library
license: PostgreSQL License; Permissive free software license.
keywords: postgresql database client library c
-description: \
+description:\
PostgreSQL is an object-relational SQL database management system with libpq
being its C client library. Applications can use this library to pass queries
to the PostgreSQL backend server and to receive the results of those queries
@@ -142,8 +216,12 @@ package-url: https://git.build2.org/cgit/packaging/postgresql/
email: pgsql-general@postgresql.org; Mailing list.
package-email: packaging@build2.org; Mailing list.
requires: build2 >= 0.4.0
+bootstrap-build:\
+project = libpq
+
+\
location: libpq-0.tar.gz
-sha256sum: 75958d000b641c588cdf48e3574584e070104097702dccffdad77947e37f9bd0
+sha256sum: 2aee2bb1d58d51c657903bbab6253c5d4566b6f3f299ba118da24c7756caebfd
:
name: libstudxml
version: 1.0.0+1
@@ -158,5 +236,9 @@ build-warning-email: studxml-warnings@example.com
build-error-email: studxml-errors@example.com
depends: libexpat >= 2.0.0
depends: libgenx
+bootstrap-build:\
+project = libstudxml
+
+\
location: libstudxml-1.0.0+1.tar.gz
-sha256sum: 1833906dd93ccc0cda832d6a1b3ef9ed7877bb9958b46d9b2666033d4a7919c9
+sha256sum: aa52d5b49ee1bad825cd6bca554f72636e8451f93c74f9a443bafce3c2bf82c0
diff --git a/tests/load/1/math/repositories.manifest b/tests/load/1/math/repositories.manifest
index c0293c4..177fcad 100644
--- a/tests/load/1/math/repositories.manifest
+++ b/tests/load/1/math/repositories.manifest
@@ -8,7 +8,7 @@ role: prerequisite
#
email: repoman@dev.cppget.org
summary: Math C++ package repository
-description: \
+description:\
This is the awesome C++ package repository full of remarkable algorithms and
APIs.
\
diff --git a/tests/load/1/misc/packages.manifest b/tests/load/1/misc/packages.manifest
index f02ce01..86620dd 100644
--- a/tests/load/1/misc/packages.manifest
+++ b/tests/load/1/misc/packages.manifest
@@ -15,6 +15,7 @@ depends: libfoo
depends: libmath >= 2.0.0
requires: linux | windows | macosx
changes: some changes
+changes-type: text/plain
location: libbar-2.4.0+3.tar.gz
sha256sum: 70ccba3da34dd0296866027a26b6bacf08cacc80f54516d3b8d8eeccbe31ab93
:
@@ -25,7 +26,7 @@ license: MIT
url: http://www.example.com/foo/
email: foo-users@example.com
location: libfoo-1.0.tar.gz
-sha256sum: 754cba3da34dd0296866027a26b6bacf08cacc80f54516d3b8d8eeccbe31ab93
+sha256sum: e89c6d746f8b1ea3ec58d294946d2f683d133438d2ac8c88549ba24c19627e76
:
name: libfoo
version: 0.1
@@ -43,7 +44,7 @@ license: MIT
url: http://www.example.com/foo/
email: foo-users@example.com
location: libfoo-1.2.4+1.tar.gz
-sha256sum: 35ccba3da34dd0296866027a26b6bacf08cacc80f54516d3b8d8eeccbe31ab93
+sha256sum: 6692a487e0908598e36bdeb9c25ed1e4a35bb99587dbc475807d314fa0719ac6
:
name: libfoo
version: 1.2.4+2
diff --git a/tests/load/1/stable/libfoo-1.0.tar.gz b/tests/load/1/stable/libfoo-1.0.tar.gz
index 2d445ec..3f23ab9 100644
--- a/tests/load/1/stable/libfoo-1.0.tar.gz
+++ b/tests/load/1/stable/libfoo-1.0.tar.gz
Binary files differ
diff --git a/tests/load/1/stable/libfoo-1.2.2-alpha.1.tar.gz b/tests/load/1/stable/libfoo-1.2.2-alpha.1.tar.gz
index aa5665e..1dfff70 100644
--- a/tests/load/1/stable/libfoo-1.2.2-alpha.1.tar.gz
+++ b/tests/load/1/stable/libfoo-1.2.2-alpha.1.tar.gz
Binary files differ
diff --git a/tests/load/1/stable/libfoo-1.2.2.tar.gz b/tests/load/1/stable/libfoo-1.2.2.tar.gz
index 94aca23..22eb89b 100644
--- a/tests/load/1/stable/libfoo-1.2.2.tar.gz
+++ b/tests/load/1/stable/libfoo-1.2.2.tar.gz
Binary files differ
diff --git a/tests/load/1/stable/libfoo-1.2.3+4.tar.gz b/tests/load/1/stable/libfoo-1.2.3+4.tar.gz
index 254f355..76439b0 100644
--- a/tests/load/1/stable/libfoo-1.2.3+4.tar.gz
+++ b/tests/load/1/stable/libfoo-1.2.3+4.tar.gz
Binary files differ
diff --git a/tests/load/1/stable/libfoo-1.2.4.tar.gz b/tests/load/1/stable/libfoo-1.2.4.tar.gz
index dc64431..da70cd3 100644
--- a/tests/load/1/stable/libfoo-1.2.4.tar.gz
+++ b/tests/load/1/stable/libfoo-1.2.4.tar.gz
Binary files differ
diff --git a/tests/load/1/stable/packages.manifest b/tests/load/1/stable/packages.manifest
index 85109f6..f15ab90 100644
--- a/tests/load/1/stable/packages.manifest
+++ b/tests/load/1/stable/packages.manifest
@@ -1,5 +1,5 @@
: 1
-sha256sum: 17ae44db4b176fc7629fe9a6a5e43aa9ab4da0fda2b93f3b1fe6e993ba92cd8b
+sha256sum: 2fe1a38177da668bb79d1912ecb5e935e0a77c984b9db522c9321ca205b2863b
:
name: libfoo
version: 1.0
@@ -9,8 +9,12 @@ build-email: foo-builds@example.com
builds: default legacy; Stable configurations only.
builds: -32; 64-bit targets only
builds: &msvc_13_up; Not too old MSVC.
+bootstrap-build:\
+project = libfoo
+
+\
location: libfoo-1.0.tar.gz
-sha256sum: e89c6d746f8b1ea3ec58d294946d2f683d133438d2ac8c88549ba24c19627e76
+sha256sum: 7382152bac5b4ce10215a5ecd6c94c490d0efc007031d3b03f407d068b74e624
:
name: libfoo
version: 1.2.2-alpha.1
@@ -23,8 +27,12 @@ email: foo-users@example.com
depends: libmisc [0.1 2.0-) | libmisc [2.0 5.0]
depends: libgenx (0.2 3.0)
depends: libexpat < 5.2 | libexpat (1 5.1]
+bootstrap-build:\
+project = libfoo
+
+\
location: libfoo-1.2.2-alpha.1.tar.gz
-sha256sum: f5d3e9e6e8f9621a638b1375d31f0eb50e6279d8066170b25da21e84198cfd82
+sha256sum: 71321f6616036380ac5c9c5dc81efa04b23577ef9dc18f1ce413587bb57677c9
:
name: libfoo
version: 1.2.2
@@ -35,8 +43,12 @@ url: http://www.example.com/foo/
email: foo-users@example.com
depends: libbar <= 2.4.0
depends: libexp == +2-1.2
+bootstrap-build:\
+project = libfoo
+
+\
location: libfoo-1.2.2.tar.gz
-sha256sum: 088068ea3d69542a153f829cf836013374763148fba0a43d8047974f58b5efd7
+sha256sum: 75d2a7d3eec62d63afd3d3a84d91bd02b05ecb16cd0907d5b0db1fc654e3753f
:
name: libfoo
version: 1.2.3+4
@@ -47,8 +59,12 @@ keywords: c++ foo
url: http://www.example.com/foo/
email: foo-users@example.com
depends: libmisc >= 2.0.0
+bootstrap-build:\
+project = libfoo
+
+\
location: libfoo-1.2.3+4.tar.gz
-sha256sum: f2ebecac6cac8addd7c623bc1becf055e76b13a0d2dd385832b92c38c58956d8
+sha256sum: 24c53899bd4dbfdde6a727e07724984bfb4ca7f20142291c40e30304f15434c3
:
name: libfoo
version: 1.2.4
@@ -59,8 +75,13 @@ description: Very good foo library.
description-type: text/plain
changes: some changes 1
changes: some changes 2
+changes-type: text/plain
url: http://www.example.com/foo/
email: foo-users@example.com
depends: libmisc >= 2.0.0
+bootstrap-build:\
+project = libfoo
+
+\
location: libfoo-1.2.4.tar.gz
-sha256sum: aa1606323bfc59b70de642629dc5d8318cc5348e3646f90ed89406d975db1e1d
+sha256sum: 98f80ca0cd1c053fd45ab37f72a6a31f1a0304747c636822df8d573420284642
diff --git a/tests/load/1/stable/repositories.manifest b/tests/load/1/stable/repositories.manifest
index 49a0685..1907ed6 100644
--- a/tests/load/1/stable/repositories.manifest
+++ b/tests/load/1/stable/repositories.manifest
@@ -14,35 +14,36 @@ role: prerequisite
email: repoman@dev.cppget.org; public mailing list
summary: General C++ package stable repository
description: This is the awesome C++ package repository full of exciting stuff.
-certificate: \
+certificate:\
-----BEGIN CERTIFICATE-----
-MIIFOzCCAyOgAwIBAgIJAIsajMs6HOxHMA0GCSqGSIb3DQEBCwUAMDcxFzAVBgNV
-BAoMDkNvZGUgU3ludGhlc2lzMRwwGgYDVQQDDBNuYW1lOmRldi5jcHBnZXQub3Jn
-MB4XDTE3MDcwNzA2MzgzNFoXDTIyMDcwNjA2MzgzNFowNzEXMBUGA1UECgwOQ29k
-ZSBTeW50aGVzaXMxHDAaBgNVBAMME25hbWU6ZGV2LmNwcGdldC5vcmcwggIiMA0G
-CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDK8NqNbQckQpv9B3hBRjnTcpkgKq2e
-1HOFLQJxgS1TS2QfqUTKePpd10LbDgXOhI2iycKCf7Zv/uf3RE+VyQ/BthNUvQ0O
-bWPsEKo+DQOLPjqIaS+u2bmMXzCDjwjufbd9ruPY2PYRTBOsXgTL1+GGIQu0bP5u
-i1mEGn95xuYhEJ4x1UUsVWV0l0D37orV/OaOVffPY3xhlQE++aiXLptof1gzM2D8
-lsQPvWLizrtDAHpiwb4oXQQbifDyeXj+qh7OdIqL10rxZZ/0Q0GqrTOyeSlXuo5i
-C3MdNSlRmWNGqvPwpushFBQec04exXI3AjQZ/DUlMxtDx2xIqQMtaYOQ5iqm9426
-crgrUoXZG/5ePYTCmnSbpZVak9md44inJWqSESTL0+EfWuLdXop0QV7LZrIaV2pV
-BJba0/jiS5mltR/ikiJ7gaP/bbfutJGGfzyk1PrvyehhK/snGUh6Nr0NMHozS+J+
-7QXdSEMjLXbmF5hBsvEfrGub+YSexEEODA34YnBIA453ph4CIo/3nTpDLrm3EkSF
-1jV5vGhg3vzB6v+TIP9MXALm4/NUurn8I643KMoNSS9RCDuiqLnE8V1uCmSP8LR8
-OO7vxlmaM/OfqHehAALgsU/KFT1lgpAfHE2x5YBxT6s407DJJpaPkbHMiCNHScWQ
-5ezqnH0UMNwsawIDAQABo0owSDAOBgNVHQ8BAf8EBAMCB4AwFgYDVR0lAQH/BAww
-CgYIKwYBBQUHAwMwHgYDVR0RBBcwFYETaW5mb0BkZXYuY3BwZ2V0Lm9yZzANBgkq
-hkiG9w0BAQsFAAOCAgEAlJnw+1TMlY8GGKwF0MBfH0bAW+AhkHd7E33DSFoU12LT
-mx99QaiFd4r2dkE3dA4c8OJXK+vzhi2R+kWvzR+ZF4XEGfkM6Z6pZWUK8/5hymiq
-pXbPQA21QHJh1RkN0xOxaPwjB4BWj2hk2aUqiuH/R4c2u6U4RfSwimBSbI+QSqF3
-Ho5eAuaezicxWKRQya70FpXGFn+vN6E9HZ8mlp+7eSV3A4eYKaGliqfoVHagYaFz
-EM/SFueGhynAHtWzx21f3RhlPWJ1QZcLQayZT8980KJKWO70abKZdcuOTpYBDiYZ
-SKcAu4fhCWuhkxlKltwxdRx1FqE/UZpoj2LJnw5pEzVmF9X30VC1f5F6YWicedJr
-GCmdQhK3qPZKvNM7i19IBlizo5BKuVB6TsdxWgTTzmOZN6oEwsbVtGTxPek7jGJj
-V0vi3zeCCaGJ5K+t6MahAT47CpA/+lJVLCGT6Clw9DvFEJmIr01bmD9uUGZwIgc3
-w88Hh4ap5/u7w07cNwYtncA7cKQCBG9vXi2cXpudBL6uLeM5rqYBSD5hj4zDjzpd
-VglIFXohfyMfGh3kDPkQ4dw627S0NuxzmocE0jjdsXfQuLNeg+JRNEHB8QPwTC8X
-EY1xZfPv9XzlVQxd7gLDKA8QbbKWpNe73XMoZXUyeyVuf5q1g+c6m1uPB5jJpdw=
+MIIFRjCCAy6gAwIBAgIUc9xEjZAXCpw+00SGYGDTY0t2nLUwDQYJKoZIhvcNAQEL
+BQAwNzEXMBUGA1UECgwOQ29kZSBTeW50aGVzaXMxHDAaBgNVBAMME25hbWU6ZGV2
+LmNwcGdldC5vcmcwHhcNMjExMTI0MTIxNjMwWhcNMzExMTIyMTIxNjMwWjA3MRcw
+FQYDVQQKDA5Db2RlIFN5bnRoZXNpczEcMBoGA1UEAwwTbmFtZTpkZXYuY3BwZ2V0
+Lm9yZzCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMrw2o1tByRCm/0H
+eEFGOdNymSAqrZ7Uc4UtAnGBLVNLZB+pRMp4+l3XQtsOBc6EjaLJwoJ/tm/+5/dE
+T5XJD8G2E1S9DQ5tY+wQqj4NA4s+OohpL67ZuYxfMIOPCO59t32u49jY9hFME6xe
+BMvX4YYhC7Rs/m6LWYQaf3nG5iEQnjHVRSxVZXSXQPfuitX85o5V989jfGGVAT75
+qJcum2h/WDMzYPyWxA+9YuLOu0MAemLBvihdBBuJ8PJ5eP6qHs50iovXSvFln/RD
+QaqtM7J5KVe6jmILcx01KVGZY0aq8/Cm6yEUFB5zTh7FcjcCNBn8NSUzG0PHbEip
+Ay1pg5DmKqb3jbpyuCtShdkb/l49hMKadJullVqT2Z3jiKclapIRJMvT4R9a4t1e
+inRBXstmshpXalUEltrT+OJLmaW1H+KSInuBo/9tt+60kYZ/PKTU+u/J6GEr+ycZ
+SHo2vQ0wejNL4n7tBd1IQyMtduYXmEGy8R+sa5v5hJ7EQQ4MDfhicEgDjnemHgIi
+j/edOkMuubcSRIXWNXm8aGDe/MHq/5Mg/0xcAubj81S6ufwjrjcoyg1JL1EIO6Ko
+ucTxXW4KZI/wtHw47u/GWZoz85+od6EAAuCxT8oVPWWCkB8cTbHlgHFPqzjTsMkm
+lo+RscyII0dJxZDl7OqcfRQw3CxrAgMBAAGjSjBIMA4GA1UdDwEB/wQEAwIHgDAW
+BgNVHSUBAf8EDDAKBggrBgEFBQcDAzAeBgNVHREEFzAVgRNpbmZvQGRldi5jcHBn
+ZXQub3JnMA0GCSqGSIb3DQEBCwUAA4ICAQBvVUHRUj9vR+QgDQGOtXBcOB1G/1xX
+1gU6ivjP9UzZEmXmqukgx0aYcjxctAm7Yf0lsj2xOwmVPGcMC3tGYJG8yOvpW5dQ
+8uOGmNqNaRq7AJN4gio6uc9dkipNhcBmVilY08yv67wGaIGhHt4kbDiF/3YAzsMR
+/YfVQ3Kb4EyQpC/p7dsio2/m8gRb5lUr6K22fdPr2AfQbdNsAF6EMAfEfCDC9yAa
+uzB5Dc+wpqWvsPp+ohiroJqC99hwmfzPMxAYTB2cmEqmVHGSwqoC9bn7fI59t9l6
+N2fynRhenLookYfy7jqb4a6O702fAHefGD6teHYbTpg6dwlVY/PLI+T1SPSisH3k
+jS2WV03FK8aTNGe7E2RBxH2smca0Z1oaJ9RaluZ9HIRrw434m9+z01DL5w0EWRpC
+qa62iYSgGFcxkXRwb8VeWPtGb9/CPXtVFDtW19mOeeHqr8/xVOCjR2MCKAsxLazl
+yGQhAUu19n2y7vuj4FOEeJ1mwaaUyu2MPqZWZ3loM14muk/ZJfFsJRfdHg7+hSel
+alE0ujce0to39AApGZRIwozut17hYjl5m3314+46KaNuxRmo2xw5wNl8UslEgQYC
+fK3CY+6p9n64QJXnE+2+KGrDuYYXQP3TClHLv/IftgAlf6cZxu4RsNdvUsU15M0J
+BGQjsz7h0bI3fw==
-----END CERTIFICATE-----
\ \ No newline at end of file
diff --git a/tests/load/1/stable/signature.manifest b/tests/load/1/stable/signature.manifest
index 45d160e..bb18e13 100644
--- a/tests/load/1/stable/signature.manifest
+++ b/tests/load/1/stable/signature.manifest
@@ -1,13 +1,13 @@
: 1
-sha256sum: f4fadfdc3586c8b94dce871328625f43636e43b4a073c4ee577af41780eeb8f3
-signature: \
-J40HIcLQQI+1rzXzRo0OOJRk1NTmLNNFk+RUi/NM7M6vUFjjq4nIABR+bg8RdenpybVASTyJYNTL
-/pLStEF+hg5SnufsYvpJJmstBMY4JXAcOfngUz4UVe8QS9zu3YGL5y/MBnYU+SKBbHnjRsTbRgak
-LjoVbTKH6kkPkZ8MZMXzo/0un2L4w4uZLNDeqjJBg2OFztPIV3RYzBdCcxJ+k20UJjVfmNQWaBki
-eaXVIsOxQ1bqWfg5uvACYvSgAIB+6ZIQFf8VYAJvnSieTswyWTxBXvj0D6aNnksQisaSbsRUuVb/
-zKYNWDrZFOjlNLEt+3bv3WQ5r0RqeRu7x9tdlXMfX/z9qu3SRe/mmaEpArkO67OKuVW1EDIo4Pwd
-6zOQZUBvy3wd5RJWCfEbcuIBNBWL3jWOhWK36V9NzCWzeDzh22D0P9WEYNGd9Xn0+GeXpuiD/1w1
-IkVy9FA/2DYBb/UgNvWLaGWeTFqmv+ZcLKs8uuNHoM33EpI2jQetDS24QeOmqImmRsLROjyCxVye
-yU3Dew5HRLK3KxNtC5XewK24eEdAQID7UEesn7X/MYfuyS1hq415YUibZswG7UIChj082xKHHcME
-zrEgdJ29+I3ZQkpTBaY/77Pu6UFvcpPbak0SCc6FHPvWKwXHPv2ATsInKs0J/WLdsL8sgaM2f3I=
+sha256sum: defa57373e20beb8f22a391b1e97ec7f5e50469e050c2551e8522fab74f304e1
+signature:\
+f9b/Q+mBos6MwwFPIiIBqSEidqO+rMsktQ1ESWEkO48uHN+hjNCog0gp3Vf4QHj0p2KKU5Uz4exj
+8h7R3RB0F4B10/lDyHw4XlvAyP1uE+YS76rEXHXuGBEnGvBK9818WkCJ5yfFJYg3AuGt2Cyd3QHF
+Uxv+fDkI05KrZNGzLo9euDr6yhHOMTjwSntu/lt6ytfyzTFHcs0xOM03qEtszD5QrNdC32z7kmDE
+8ISUlMUyqOjsz8h25F04NyiojccGTpfUTgqA2zXqMAwRn+fG9wU5Vwnau/oIcAO+nUruR4i1VrDd
+D5q/gjbOpcBTt7bmbVInR5glbgdPZ7r7gpqfOVwybxeTrArj72jA/XmmYyZlaTTW1RXcJuRIWAcP
+2Z61O+cwP9CqW8ktQDNGkgDxxXJR7aEG64G8q7uZeb6v1FaQCwo2JEe/Tv0JDp+DBVKwBm7ZDZi/
+TVtltbADgISCU8bTVz/r4Q0qwHeiQo2GV+Di4h7KvWS8H2Q7sjpyWrI3/UzujOp+zB/BP+6dNZTr
+6Mf8CJ+9L5YY4lzX9jeVQLOuKOqLLZWD2VQiyaYZp79X5OtSHuNvCWcaWUa9Rpu/goLCPrk6QHD+
+wUTYcIdsEbX2jDN3YQwe53WklytPbMxy7taRF1obpGOpDDju3InD3IRXS7ch4G9XzqtsylzMIeE=
\
diff --git a/tests/load/1/testing/packages.manifest b/tests/load/1/testing/packages.manifest
index a903878..2d458f0 100644
--- a/tests/load/1/testing/packages.manifest
+++ b/tests/load/1/testing/packages.manifest
@@ -1,5 +1,5 @@
: 1
-sha256sum: 5f2b297be1eafd70fe55f179a0cf062baf8405e08b3854600801420132a206b1
+sha256sum: fac618ab9d8132777a7d2f10e338266957de02b87f233714bce2331e692f4ab6
:
name: libmisc
version: 2.4.0
diff --git a/tests/load/buildfile b/tests/load/buildfile
index 61ec037..315b59e 100644
--- a/tests/load/buildfile
+++ b/tests/load/buildfile
@@ -1,5 +1,4 @@
# file : tests/load/buildfile
-# copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
# license : MIT; see accompanying LICENSE file
import libs += libbpkg%lib{bpkg}
diff --git a/tests/load/cert b/tests/load/cert
index 893d84d..c2da7fa 100755
--- a/tests/load/cert
+++ b/tests/load/cert
@@ -6,17 +6,18 @@
# Copy cert.pem content to the certificate value of the following manifest
# files:
-# 1/stable/repositories
-# pkg/1/dev.cppget.org/signed/repositories
+# 1/stable/repositories.manifest
+# pkg/1/dev.cppget.org/signed/repositories.manifest
#
-openssl req -x509 -new -key key.pem -days 1825 -config openssl.cnf > cert.pem
+openssl req -x509 -new -key key.pem -days 3650 -config openssl.cnf > cert.pem
# To regenerate the packages and signature manifest files run:
#
-# ../../../bpkg/bpkg/bpkg rep-create 1/stable --key key.pem
-# ../../../bpkg/bpkg/bpkg rep-create pkg/1/dev.cppget.org/signed --key key.pem
+# bpkg rep-create 1/math
+# bpkg rep-create 1/stable --key key.pem
+# bpkg rep-create pkg/1/dev.cppget.org/signed --key key.pem
#
# Update certificate fingerprint in loadtab for dev.cppget.org/signed
# repository. To print the fingerprint run:
#
-# ../../../bpkg/bpkg/bpkg rep-info --cert-fingerprint pkg/1/dev.cppget.org/signed/
+# bpkg rep-info --cert-fingerprint pkg/1/dev.cppget.org/signed/
diff --git a/tests/load/cert.pem b/tests/load/cert.pem
index dc7c756..13a55f4 100644
--- a/tests/load/cert.pem
+++ b/tests/load/cert.pem
@@ -1,30 +1,31 @@
-----BEGIN CERTIFICATE-----
-MIIFOzCCAyOgAwIBAgIJAIsajMs6HOxHMA0GCSqGSIb3DQEBCwUAMDcxFzAVBgNV
-BAoMDkNvZGUgU3ludGhlc2lzMRwwGgYDVQQDDBNuYW1lOmRldi5jcHBnZXQub3Jn
-MB4XDTE3MDcwNzA2MzgzNFoXDTIyMDcwNjA2MzgzNFowNzEXMBUGA1UECgwOQ29k
-ZSBTeW50aGVzaXMxHDAaBgNVBAMME25hbWU6ZGV2LmNwcGdldC5vcmcwggIiMA0G
-CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDK8NqNbQckQpv9B3hBRjnTcpkgKq2e
-1HOFLQJxgS1TS2QfqUTKePpd10LbDgXOhI2iycKCf7Zv/uf3RE+VyQ/BthNUvQ0O
-bWPsEKo+DQOLPjqIaS+u2bmMXzCDjwjufbd9ruPY2PYRTBOsXgTL1+GGIQu0bP5u
-i1mEGn95xuYhEJ4x1UUsVWV0l0D37orV/OaOVffPY3xhlQE++aiXLptof1gzM2D8
-lsQPvWLizrtDAHpiwb4oXQQbifDyeXj+qh7OdIqL10rxZZ/0Q0GqrTOyeSlXuo5i
-C3MdNSlRmWNGqvPwpushFBQec04exXI3AjQZ/DUlMxtDx2xIqQMtaYOQ5iqm9426
-crgrUoXZG/5ePYTCmnSbpZVak9md44inJWqSESTL0+EfWuLdXop0QV7LZrIaV2pV
-BJba0/jiS5mltR/ikiJ7gaP/bbfutJGGfzyk1PrvyehhK/snGUh6Nr0NMHozS+J+
-7QXdSEMjLXbmF5hBsvEfrGub+YSexEEODA34YnBIA453ph4CIo/3nTpDLrm3EkSF
-1jV5vGhg3vzB6v+TIP9MXALm4/NUurn8I643KMoNSS9RCDuiqLnE8V1uCmSP8LR8
-OO7vxlmaM/OfqHehAALgsU/KFT1lgpAfHE2x5YBxT6s407DJJpaPkbHMiCNHScWQ
-5ezqnH0UMNwsawIDAQABo0owSDAOBgNVHQ8BAf8EBAMCB4AwFgYDVR0lAQH/BAww
-CgYIKwYBBQUHAwMwHgYDVR0RBBcwFYETaW5mb0BkZXYuY3BwZ2V0Lm9yZzANBgkq
-hkiG9w0BAQsFAAOCAgEAlJnw+1TMlY8GGKwF0MBfH0bAW+AhkHd7E33DSFoU12LT
-mx99QaiFd4r2dkE3dA4c8OJXK+vzhi2R+kWvzR+ZF4XEGfkM6Z6pZWUK8/5hymiq
-pXbPQA21QHJh1RkN0xOxaPwjB4BWj2hk2aUqiuH/R4c2u6U4RfSwimBSbI+QSqF3
-Ho5eAuaezicxWKRQya70FpXGFn+vN6E9HZ8mlp+7eSV3A4eYKaGliqfoVHagYaFz
-EM/SFueGhynAHtWzx21f3RhlPWJ1QZcLQayZT8980KJKWO70abKZdcuOTpYBDiYZ
-SKcAu4fhCWuhkxlKltwxdRx1FqE/UZpoj2LJnw5pEzVmF9X30VC1f5F6YWicedJr
-GCmdQhK3qPZKvNM7i19IBlizo5BKuVB6TsdxWgTTzmOZN6oEwsbVtGTxPek7jGJj
-V0vi3zeCCaGJ5K+t6MahAT47CpA/+lJVLCGT6Clw9DvFEJmIr01bmD9uUGZwIgc3
-w88Hh4ap5/u7w07cNwYtncA7cKQCBG9vXi2cXpudBL6uLeM5rqYBSD5hj4zDjzpd
-VglIFXohfyMfGh3kDPkQ4dw627S0NuxzmocE0jjdsXfQuLNeg+JRNEHB8QPwTC8X
-EY1xZfPv9XzlVQxd7gLDKA8QbbKWpNe73XMoZXUyeyVuf5q1g+c6m1uPB5jJpdw=
+MIIFRjCCAy6gAwIBAgIUc9xEjZAXCpw+00SGYGDTY0t2nLUwDQYJKoZIhvcNAQEL
+BQAwNzEXMBUGA1UECgwOQ29kZSBTeW50aGVzaXMxHDAaBgNVBAMME25hbWU6ZGV2
+LmNwcGdldC5vcmcwHhcNMjExMTI0MTIxNjMwWhcNMzExMTIyMTIxNjMwWjA3MRcw
+FQYDVQQKDA5Db2RlIFN5bnRoZXNpczEcMBoGA1UEAwwTbmFtZTpkZXYuY3BwZ2V0
+Lm9yZzCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMrw2o1tByRCm/0H
+eEFGOdNymSAqrZ7Uc4UtAnGBLVNLZB+pRMp4+l3XQtsOBc6EjaLJwoJ/tm/+5/dE
+T5XJD8G2E1S9DQ5tY+wQqj4NA4s+OohpL67ZuYxfMIOPCO59t32u49jY9hFME6xe
+BMvX4YYhC7Rs/m6LWYQaf3nG5iEQnjHVRSxVZXSXQPfuitX85o5V989jfGGVAT75
+qJcum2h/WDMzYPyWxA+9YuLOu0MAemLBvihdBBuJ8PJ5eP6qHs50iovXSvFln/RD
+QaqtM7J5KVe6jmILcx01KVGZY0aq8/Cm6yEUFB5zTh7FcjcCNBn8NSUzG0PHbEip
+Ay1pg5DmKqb3jbpyuCtShdkb/l49hMKadJullVqT2Z3jiKclapIRJMvT4R9a4t1e
+inRBXstmshpXalUEltrT+OJLmaW1H+KSInuBo/9tt+60kYZ/PKTU+u/J6GEr+ycZ
+SHo2vQ0wejNL4n7tBd1IQyMtduYXmEGy8R+sa5v5hJ7EQQ4MDfhicEgDjnemHgIi
+j/edOkMuubcSRIXWNXm8aGDe/MHq/5Mg/0xcAubj81S6ufwjrjcoyg1JL1EIO6Ko
+ucTxXW4KZI/wtHw47u/GWZoz85+od6EAAuCxT8oVPWWCkB8cTbHlgHFPqzjTsMkm
+lo+RscyII0dJxZDl7OqcfRQw3CxrAgMBAAGjSjBIMA4GA1UdDwEB/wQEAwIHgDAW
+BgNVHSUBAf8EDDAKBggrBgEFBQcDAzAeBgNVHREEFzAVgRNpbmZvQGRldi5jcHBn
+ZXQub3JnMA0GCSqGSIb3DQEBCwUAA4ICAQBvVUHRUj9vR+QgDQGOtXBcOB1G/1xX
+1gU6ivjP9UzZEmXmqukgx0aYcjxctAm7Yf0lsj2xOwmVPGcMC3tGYJG8yOvpW5dQ
+8uOGmNqNaRq7AJN4gio6uc9dkipNhcBmVilY08yv67wGaIGhHt4kbDiF/3YAzsMR
+/YfVQ3Kb4EyQpC/p7dsio2/m8gRb5lUr6K22fdPr2AfQbdNsAF6EMAfEfCDC9yAa
+uzB5Dc+wpqWvsPp+ohiroJqC99hwmfzPMxAYTB2cmEqmVHGSwqoC9bn7fI59t9l6
+N2fynRhenLookYfy7jqb4a6O702fAHefGD6teHYbTpg6dwlVY/PLI+T1SPSisH3k
+jS2WV03FK8aTNGe7E2RBxH2smca0Z1oaJ9RaluZ9HIRrw434m9+z01DL5w0EWRpC
+qa62iYSgGFcxkXRwb8VeWPtGb9/CPXtVFDtW19mOeeHqr8/xVOCjR2MCKAsxLazl
+yGQhAUu19n2y7vuj4FOEeJ1mwaaUyu2MPqZWZ3loM14muk/ZJfFsJRfdHg7+hSel
+alE0ujce0to39AApGZRIwozut17hYjl5m3314+46KaNuxRmo2xw5wNl8UslEgQYC
+fK3CY+6p9n64QJXnE+2+KGrDuYYXQP3TClHLv/IftgAlf6cZxu4RsNdvUsU15M0J
+BGQjsz7h0bI3fw==
-----END CERTIFICATE-----
diff --git a/tests/load/driver.cxx b/tests/load/driver.cxx
index d685c18..0f2c8de 100644
--- a/tests/load/driver.cxx
+++ b/tests/load/driver.cxx
@@ -1,18 +1,16 @@
// file : tests/load/driver.cxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
#include <iostream>
#include <exception>
-#include <algorithm> // sort(), find()
#include <odb/session.hxx>
#include <odb/transaction.hxx>
#include <odb/pgsql/database.hxx>
-#include <libbutl/process.mxx>
-#include <libbutl/filesystem.mxx>
+#include <libbutl/process.hxx>
+#include <libbutl/filesystem.hxx>
#include <libbrep/types.hxx>
#include <libbrep/utility.hxx>
@@ -20,6 +18,9 @@
#include <libbrep/package.hxx>
#include <libbrep/package-odb.hxx>
+#undef NDEBUG
+#include <cassert>
+
using std::cerr;
using std::endl;
@@ -28,7 +29,6 @@ using namespace butl;
using namespace brep;
using labels = small_vector<string, 5>;
-using req_alts = small_vector<string, 1>;
static const path packages ("packages.manifest");
static const path repositories ("repositories.manifest");
@@ -38,7 +38,7 @@ check_location (shared_ptr<package>& p)
{
if (p->internal ())
return p->location && *p->location ==
- path (p->name.string () + "-" + p->version.string () + ".tar.gz");
+ path (p->name.string () + '-' + p->version.string () + ".tar.gz");
else
return !p->location;
}
@@ -57,7 +57,7 @@ check_external (const package& p)
!p.internal () &&
p.other_repositories.size () > 0 &&
p.priority == priority () &&
- p.changes.empty () &&
+ !p.changes &&
p.license_alternatives.empty () &&
p.dependencies.empty () &&
p.requirements.empty () &&
@@ -205,13 +205,13 @@ main (int argc, char* argv[])
static inline dependency
dep (const char* n, optional<version_constraint> c)
{
- return dependency {package_name (n), move (c), nullptr};
+ return dependency {package_name (n), move (c), nullptr /* package */};
}
static inline version
dep_ver (const char* v)
{
- return version (v, false /* fold_zero_revision */);
+ return version (v, version::none);
}
static void
@@ -268,7 +268,7 @@ test_git_repos (const cstrings& loader_args,
assert (p->dependencies.size () == 1);
assert (p->dependencies[0].size () == 1);
- assert (p->dependencies[0][0] ==
+ assert (p->dependencies[0][0][0] ==
dep ("libmisc",
version_constraint (
dep_ver ("1.0"), false, dep_ver ("1.0"), false)));
@@ -384,7 +384,7 @@ test_pkg_repos (const cstrings& loader_args,
assert (fpvxy->other_repositories.empty ());
assert (fpvxy->priority == priority::low);
- assert (fpvxy->changes.empty ());
+ assert (!fpvxy->changes);
assert (fpvxy->license_alternatives.size () == 1);
assert (fpvxy->license_alternatives[0].size () == 1);
@@ -396,7 +396,7 @@ test_pkg_repos (const cstrings& loader_args,
assert (check_location (fpvxy));
assert (fpvxy->sha256sum && *fpvxy->sha256sum ==
- "c994fd49f051ab7fb25f3a4e68ca878e484c5d3c2cb132b37d41224b0621b618");
+ "c25e5cae2f72664a3961c3ef88a82e67150c4bcc2a5e1fb4d250e621c5574187");
assert (fpvxy->buildable);
@@ -420,7 +420,7 @@ test_pkg_repos (const cstrings& loader_args,
assert (fpv1->other_repositories[1].load () == cr);
assert (fpv1->priority == priority::low);
- assert (fpv1->changes.empty ());
+ assert (!fpv1->changes);
assert (fpv1->license_alternatives.size () == 1);
assert (fpv1->license_alternatives[0].size () == 1);
@@ -432,7 +432,7 @@ test_pkg_repos (const cstrings& loader_args,
assert (check_location (fpv1));
assert (fpv1->sha256sum && *fpv1->sha256sum ==
- "e89c6d746f8b1ea3ec58d294946d2f683d133438d2ac8c88549ba24c19627e76");
+ "7382152bac5b4ce10215a5ecd6c94c490d0efc007031d3b03f407d068b74e624");
assert (fpv1->buildable);
@@ -454,7 +454,7 @@ test_pkg_repos (const cstrings& loader_args,
assert (fpv2->internal_repository.load () == sr);
assert (fpv2->other_repositories.empty ());
assert (fpv2->priority == priority::low);
- assert (fpv2->changes.empty ());
+ assert (!fpv2->changes);
assert (fpv2->license_alternatives.size () == 1);
assert (fpv2->license_alternatives[0].size () == 1);
@@ -464,12 +464,12 @@ test_pkg_repos (const cstrings& loader_args,
assert (fpv2->dependencies[0].size () == 1);
assert (fpv2->dependencies[1].size () == 1);
- assert (fpv2->dependencies[0][0] ==
+ assert (fpv2->dependencies[0][0][0] ==
dep ("libbar",
version_constraint (
nullopt, true, dep_ver ("2.4.0"), false)));
- assert (fpv2->dependencies[1][0] ==
+ assert (fpv2->dependencies[1][0][0] ==
dep ("libexp",
version_constraint (
dep_ver ("+2-1.2"), false, dep_ver ("+2-1.2"), false)));
@@ -477,7 +477,7 @@ test_pkg_repos (const cstrings& loader_args,
assert (check_location (fpv2));
assert (fpv2->sha256sum && *fpv2->sha256sum ==
- "088068ea3d69542a153f829cf836013374763148fba0a43d8047974f58b5efd7");
+ "75d2a7d3eec62d63afd3d3a84d91bd02b05ecb16cd0907d5b0db1fc654e3753f");
assert (!fpv2->buildable);
@@ -500,7 +500,7 @@ test_pkg_repos (const cstrings& loader_args,
assert (fpv2a->internal_repository.load () == sr);
assert (fpv2a->other_repositories.empty ());
assert (fpv2a->priority == priority::security);
- assert (fpv2a->changes.empty ());
+ assert (!fpv2a->changes);
assert (fpv2a->license_alternatives.size () == 1);
assert (fpv2a->license_alternatives[0].size () == 1);
@@ -511,27 +511,27 @@ test_pkg_repos (const cstrings& loader_args,
assert (fpv2a->dependencies[1].size () == 1);
assert (fpv2a->dependencies[2].size () == 2);
- assert (fpv2a->dependencies[0][0] ==
+ assert (fpv2a->dependencies[0][0][0] ==
dep ("libmisc",
version_constraint (
dep_ver ("0.1"), false, dep_ver ("2.0.0-"), true)));
- assert (fpv2a->dependencies[0][1] ==
+ assert (fpv2a->dependencies[0][1][0] ==
dep ("libmisc",
version_constraint (
dep_ver ("2.0"), false, dep_ver ("5.0"), false)));
- assert (fpv2a->dependencies[1][0] ==
+ assert (fpv2a->dependencies[1][0][0] ==
dep ("libgenx",
version_constraint (
dep_ver ("0.2"), true, dep_ver ("3.0"), true)));
- assert (fpv2a->dependencies[2][0] ==
+ assert (fpv2a->dependencies[2][0][0] ==
dep ("libexpat",
version_constraint (
nullopt, true, dep_ver ("5.2"), true)));
- assert (fpv2a->dependencies[2][1] ==
+ assert (fpv2a->dependencies[2][1][0] ==
dep ("libexpat",
version_constraint (
dep_ver ("1"), true, dep_ver ("5.1"), false)));
@@ -541,7 +541,7 @@ test_pkg_repos (const cstrings& loader_args,
assert (check_location (fpv2a));
assert (fpv2a->sha256sum && *fpv2a->sha256sum ==
- "f5d3e9e6e8f9621a638b1375d31f0eb50e6279d8066170b25da21e84198cfd82");
+ "71321f6616036380ac5c9c5dc81efa04b23577ef9dc18f1ce413587bb57677c9");
assert (!fpv2a->buildable);
@@ -563,7 +563,7 @@ test_pkg_repos (const cstrings& loader_args,
assert (fpv3->other_repositories.empty ());
assert (fpv3->priority == priority::medium);
- assert (fpv3->changes.empty ());
+ assert (!fpv3->changes);
assert (fpv3->license_alternatives.size () == 1);
assert (fpv3->license_alternatives[0].size () == 1);
@@ -571,7 +571,7 @@ test_pkg_repos (const cstrings& loader_args,
assert (fpv3->dependencies.size () == 1);
assert (fpv3->dependencies[0].size () == 1);
- assert (fpv3->dependencies[0][0] ==
+ assert (fpv3->dependencies[0][0][0] ==
dep ("libmisc",
version_constraint (
dep_ver ("2.0.0"), false, nullopt, true)));
@@ -579,7 +579,7 @@ test_pkg_repos (const cstrings& loader_args,
assert (check_location (fpv3));
assert (fpv3->sha256sum && *fpv3->sha256sum ==
- "f2ebecac6cac8addd7c623bc1becf055e76b13a0d2dd385832b92c38c58956d8");
+ "24c53899bd4dbfdde6a727e07724984bfb4ca7f20142291c40e30304f15434c3");
assert (!fpv3->buildable);
@@ -591,7 +591,7 @@ test_pkg_repos (const cstrings& loader_args,
assert (fpv4->summary == "The Foo Library");
assert (fpv4->keywords == labels ({"c++", "foo"}));
- assert (*fpv4->description == "Very good foo library.");
+ assert (fpv4->description->text == "Very good foo library.");
assert (fpv4->url && fpv4->url->string () == "http://www.example.com/foo/");
assert (!fpv4->package_url);
assert (fpv4->email && *fpv4->email == "foo-users@example.com");
@@ -600,7 +600,10 @@ test_pkg_repos (const cstrings& loader_args,
assert (fpv4->internal_repository.load () == sr);
assert (fpv4->other_repositories.empty ());
assert (fpv4->priority == priority::low);
- assert (fpv4->changes == "some changes 1\n\nsome changes 2");
+
+ assert (fpv4->changes &&
+ fpv4->changes->text == "some changes 1\n\nsome changes 2" &&
+ fpv4->changes->type == text_type::plain);
assert (fpv4->license_alternatives.size () == 1);
assert (fpv4->license_alternatives[0].comment ==
@@ -610,7 +613,7 @@ test_pkg_repos (const cstrings& loader_args,
assert (fpv4->dependencies.size () == 1);
assert (fpv4->dependencies[0].size () == 1);
- assert (fpv4->dependencies[0][0] ==
+ assert (fpv4->dependencies[0][0][0] ==
dep ("libmisc",
version_constraint (
dep_ver ("2.0.0"), false, nullopt, true)));
@@ -618,7 +621,7 @@ test_pkg_repos (const cstrings& loader_args,
assert (check_location (fpv4));
assert (fpv4->sha256sum && *fpv4->sha256sum ==
- "aa1606323bfc59b70de642629dc5d8318cc5348e3646f90ed89406d975db1e1d");
+ "98f80ca0cd1c053fd45ab37f72a6a31f1a0304747c636822df8d573420284642");
assert (!fpv4->buildable);
@@ -686,7 +689,7 @@ test_pkg_repos (const cstrings& loader_args,
assert (xpv->internal_repository.load () == mr);
assert (xpv->other_repositories.empty ());
assert (xpv->priority == priority::low);
- assert (xpv->changes.empty ());
+ assert (!xpv->changes);
assert (xpv->license_alternatives.size () == 1);
assert (xpv->license_alternatives[0].size () == 1);
@@ -694,20 +697,20 @@ test_pkg_repos (const cstrings& loader_args,
assert (xpv->dependencies.size () == 2);
assert (xpv->dependencies[0].size () == 1);
- assert (xpv->dependencies[0][0] ==
+ assert (xpv->dependencies[0][0][0] ==
dep ("libexpat",
version_constraint (
dep_ver ("2.0.0"), false, nullopt, true)));
assert (xpv->dependencies[1].size () == 1);
- assert (xpv->dependencies[1][0] == dep ("libgenx", nullopt));
+ assert (xpv->dependencies[1][0][0] == dep ("libgenx", nullopt));
assert (xpv->requirements.empty ());
assert (check_location (xpv));
assert (xpv->sha256sum && *xpv->sha256sum ==
- "1833906dd93ccc0cda832d6a1b3ef9ed7877bb9958b46d9b2666033d4a7919c9");
+ "aa52d5b49ee1bad825cd6bca554f72636e8451f93c74f9a443bafce3c2bf82c0");
assert (xpv->buildable);
@@ -723,12 +726,28 @@ test_pkg_repos (const cstrings& loader_args,
assert (fpv5->topics ==
labels ({"math library", "math API", "libbaz fork"}));
assert (fpv5->keywords == labels ({"c++", "foo", "math", "best"}));
- assert (*fpv5->description ==
+
+ assert (fpv5->description->text ==
"A modern C++ library with easy to use linear algebra and lot "
"of optimization\ntools.\n\nThere are over 100 functions in "
"total with an extensive test suite. The API is\nsimilar to "
- "~~mathlab~~ **MATLAB**.\n\nUseful for conversion of research "
- "code into production environments.");
+ "~~mathlab~~ **MATLAB**.[^mathlab]\n\nUseful for conversion of "
+ "research code into production environments.\n"
+ "[^mathlab]: MATLAB Capabilities: TODO");
+
+ assert (fpv5->description->type == text_type::github_mark);
+
+ assert (fpv5->package_description->text ==
+ "This project builds and defines the build2 package for the "
+ "libfoo library.\n\n"
+ "A modern C++ library with easy to use linear algebra and lot "
+ "of optimization\ntools.\n\nThere are over 100 functions in "
+ "total with an extensive test suite. The API is\nsimilar to "
+ "~~mathlab~~ **MATLAB**.[^mathlab]\n\nUseful for conversion of "
+ "research code into production environments.\n"
+ "[^mathlab]: MATLAB Capabilities: TODO");
+
+ assert (fpv5->package_description->type == text_type::github_mark);
assert (fpv5->url && fpv5->url->string () == "http://www.example.com/foo/");
@@ -754,14 +773,16 @@ test_pkg_repos (const cstrings& loader_args,
assert (fpv5->priority.comment ==
"Critical bug fixes, performance improvement.");
- const char ch[] = R"DLM(1.2.4+1
+ const char ch[] = R"DLM(**1.2.4+1**
* applied patch for critical bug-219
* regenerated documentation
-1.2.4
+**1.2.4**
* test suite extended significantly)DLM";
- assert (fpv5->changes == ch);
+ assert (fpv5->changes &&
+ fpv5->changes->text == ch &&
+ fpv5->changes->type == text_type::github_mark);
assert (fpv5->license_alternatives.size () == 2);
assert (fpv5->license_alternatives[0].comment ==
@@ -779,12 +800,12 @@ test_pkg_repos (const cstrings& loader_args,
assert (fpv5->dependencies[0].comment ==
"Crashes with 1.1.0-2.3.0.");
- assert (fpv5->dependencies[0][0] ==
+ assert (fpv5->dependencies[0][0][0] ==
dep ("libmisc",
version_constraint (
nullopt, true, dep_ver ("1.1"), true)));
- assert (fpv5->dependencies[0][1] ==
+ assert (fpv5->dependencies[0][1][0] ==
dep ("libmisc",
version_constraint (
dep_ver ("2.3.0+0"), true, nullopt, true)));
@@ -792,7 +813,7 @@ test_pkg_repos (const cstrings& loader_args,
assert (fpv5->dependencies[1].size () == 1);
assert (fpv5->dependencies[1].comment.empty ());
- assert (fpv5->dependencies[1][0] ==
+ assert (fpv5->dependencies[1][0][0] ==
dep ("libexp",
version_constraint (
dep_ver ("1.0"), false, nullopt, true)));
@@ -800,33 +821,48 @@ test_pkg_repos (const cstrings& loader_args,
assert (fpv5->dependencies[2].size () == 2);
assert (fpv5->dependencies[2].comment == "The newer the better.");
- assert (fpv5->dependencies[2][0] == dep ("libstudxml", nullopt));
- assert (fpv5->dependencies[2][1] == dep ("libexpat", nullopt));
+ assert (fpv5->dependencies[2][0][0] == dep ("libstudxml", nullopt));
+ assert (fpv5->dependencies[2][1][0] == dep ("libexpat", nullopt));
requirements& fpvr5 (fpv5->requirements);
- assert (fpvr5.size () == 4);
+ assert (fpvr5.size () == 8);
- assert (fpvr5[0] == req_alts ({"linux", "windows", "macosx"}));
- assert (!fpvr5[0].conditional);
+ assert (fpvr5[0][0][0] == "linux");
+ assert (fpvr5[0][1][0] == "windows");
+ assert (fpvr5[0][2][0] == "macosx");
assert (fpvr5[0].comment == "Symbian support is coming.");
- assert (fpvr5[1] == req_alts ({"c++11"}));
- assert (!fpvr5[1].conditional);
+ assert (fpvr5[1][0][0] == "c++11");
assert (fpvr5[1].comment.empty ());
- assert (fpvr5[2].empty ());
- assert (fpvr5[2].conditional);
+ assert (fpvr5[2][0][0] == "");
+ assert (fpvr5[2][0].enable && *fpvr5[2][0].enable == "");
assert (fpvr5[2].comment ==
"libc++ standard library if using Clang on Mac OS X.");
- assert (fpvr5[3] == req_alts ({"vc++ >= 12.0"}));
- assert (fpvr5[3].conditional);
- assert (fpvr5[3].comment == "Only if using VC++ on Windows.");
+ assert (fpvr5[3][0][0] == "");
+ assert (!fpvr5[3][0].enable);
+ assert (fpvr5[3].comment == "X11 libs.");
+
+ assert (fpvr5[4][0][0] == "");
+ assert (fpvr5[4][0].enable && *fpvr5[4][0].enable == "$windows");
+ assert (fpvr5[4].comment == "Only 64-bit.");
+
+ assert (fpvr5[5][0][0] == "x86_64");
+ assert (fpvr5[5][0].enable && *fpvr5[5][0].enable == "");
+ assert (fpvr5[5].comment == "Only if on Windows.");
+
+ assert (fpvr5[6][0][0] == "vc++ >= 12.0");
+ assert (fpvr5[6][0].enable && *fpvr5[6][0].enable == "windows");
+ assert (fpvr5[6].buildtime);
+ assert (fpvr5[6].comment == "Only if using VC++ on Windows.");
+
+ assert (fpvr5[7][0][0] == "host");
assert (check_location (fpv5));
assert (fpv5->sha256sum && *fpv5->sha256sum ==
- "c02b6033107387e05f48aa62ee6498152c967deb0e91a62f1e618fe9fd1bc644");
+ "ffce9d3e3ca9899d3fd6da1f6b93c07cce2c3f6b7004948b59757dae420f801b");
assert (fpv5->buildable);
@@ -842,7 +878,7 @@ test_pkg_repos (const cstrings& loader_args,
assert (epv->project == "mathLab");
assert (epv->summary == "The exponent");
assert (epv->keywords == labels ({"mathlab", "c++", "exponent"}));
- assert (epv->description && *epv->description ==
+ assert (epv->description && epv->description->text ==
"The exponent math function.");
assert (epv->url && epv->url->string () == "http://exp.example.com");
assert (!epv->package_url);
@@ -853,7 +889,7 @@ test_pkg_repos (const cstrings& loader_args,
assert (epv->internal_repository.load () == mr);
assert (epv->other_repositories.empty ());
assert (epv->priority == priority (priority::low));
- assert (epv->changes.empty ());
+ assert (!epv->changes);
assert (epv->license_alternatives.size () == 1);
assert (epv->license_alternatives[0].size () == 1);
@@ -861,10 +897,10 @@ test_pkg_repos (const cstrings& loader_args,
assert (epv->dependencies.size () == 2);
assert (epv->dependencies[0].size () == 1);
- assert (epv->dependencies[0][0] == dep ("libmisc", nullopt));
+ assert (epv->dependencies[0][0][0] == dep ("libmisc", nullopt));
assert (epv->dependencies[1].size () == 1);
- assert (epv->dependencies[1][0] ==
+ assert (epv->dependencies[1][0][0] ==
dep ("libpq",
version_constraint (
dep_ver ("9.0.0"), false, nullopt, true)));
@@ -885,7 +921,7 @@ test_pkg_repos (const cstrings& loader_args,
assert (check_location (epv));
assert (epv->sha256sum && *epv->sha256sum ==
- "317c8c6f45d9dfdfdef3a823411920cecd51729c7c4f58f9a0b0bbd681c07bd6");
+ "d90cfe583890cd0c05cdfc204e69dd3b986c2da49851f7a87fa0ca870788ff79");
// Verify libpq package version.
//
@@ -976,8 +1012,8 @@ test_pkg_repos (const cstrings& loader_args,
assert (tr->location.canonical_name () == "pkg:dev.cppget.org/testing");
assert (tr->location.string () ==
"http://dev.cppget.org/1/testing");
- assert (tr->display_name.empty ());
- assert (tr->priority == 0);
+ assert (tr->display_name == "testing");
+ assert (tr->priority == 3);
assert (tr->interface_url &&
*tr->interface_url == "http://test.cppget.org/hello/");
assert (!tr->email);
@@ -995,7 +1031,7 @@ test_pkg_repos (const cstrings& loader_args,
assert (tr->repositories_timestamp ==
file_mtime (tr->cache_location.path () / repositories));
- assert (!tr->internal);
+ assert (tr->internal);
assert (tr->prerequisites.empty ());
assert (tr->complements.size () == 1);
assert (tr->complements[0].load () == gr);
@@ -1008,9 +1044,8 @@ test_pkg_repos (const cstrings& loader_args,
db.load<package> (
package_id (tenant, package_name ("libmisc"), version ("2.4.0"))));
- assert (check_external (*mpv0));
- assert (mpv0->other_repositories.size () == 1);
- assert (mpv0->other_repositories[0].load () == tr);
+ assert (mpv0->internal_repository.load () == tr);
+ assert (mpv0->other_repositories.empty ());
assert (check_location (mpv0));
assert (!mpv0->buildable);
@@ -1020,9 +1055,8 @@ test_pkg_repos (const cstrings& loader_args,
db.load<package> (
package_id (tenant, package_name ("libmisc"), version ("2.3.0+1"))));
- assert (check_external (*mpv1));
- assert (mpv1->other_repositories.size () == 1);
- assert (mpv1->other_repositories[0].load () == tr);
+ assert (mpv1->internal_repository.load () == tr);
+ assert (mpv1->other_repositories.empty ());
assert (check_location (mpv1));
assert (!mpv1->buildable);
diff --git a/tests/load/loadtab b/tests/load/loadtab
index 96e1f00..e919a32 100644
--- a/tests/load/loadtab
+++ b/tests/load/loadtab
@@ -1,4 +1,5 @@
-http://dev.cppget.org/1/stable stable cache:1/stable buildable:no
+http://dev.cppget.org/1/stable stable cache:1/stable buildable:no
http://dev.cppget.org/1/math math cache:1/math
-http://dev.cppget.org/1/signed signed cache:pkg/1/dev.cppget.org/signed fingerprint:C3:EC:12:53:AD:64:41:0E:35:3A:9A:A6:EE:57:BF:E6:05:40:42:2B:FF:AF:2C:B0:99:AD:E9:4A:9C:48:40:22
-http://dev.cppget.org/1/unsigned unsigned cache:pkg/1/dev.cppget.org/unsigned fingerprint:
+http://dev.cppget.org/1/testing testing cache:1/testing buildable:no
+http://dev.cppget.org/1/signed signed cache:pkg/1/dev.cppget.org/signed fingerprint:40:DD:B7:AD:88:87:C1:7A:11:94:45:22:2B:A2:E7:B3:F6:DE:92:6C:A0:DB:4B:EB:34:94:85:7A:C1:24:9A:E8
+http://dev.cppget.org/1/unsigned unsigned cache:pkg/1/dev.cppget.org/unsigned fingerprint:
diff --git a/tests/load/pkg/1/dev.cppget.org/signed/packages.manifest b/tests/load/pkg/1/dev.cppget.org/signed/packages.manifest
index 584c490..67d157f 100644
--- a/tests/load/pkg/1/dev.cppget.org/signed/packages.manifest
+++ b/tests/load/pkg/1/dev.cppget.org/signed/packages.manifest
@@ -1,2 +1,2 @@
: 1
-sha256sum: ab258d8d475c9dde36591df5f9c73bced79919ddec33408ef871025cbeab01d5
+sha256sum: 22e2ee564571d9fc9ac2748764ab45a3d64e717226dc41936a2197ef961751ac
diff --git a/tests/load/pkg/1/dev.cppget.org/signed/repositories.manifest b/tests/load/pkg/1/dev.cppget.org/signed/repositories.manifest
index ecd89f7..fd7a3a0 100644
--- a/tests/load/pkg/1/dev.cppget.org/signed/repositories.manifest
+++ b/tests/load/pkg/1/dev.cppget.org/signed/repositories.manifest
@@ -1,35 +1,36 @@
: 1
# Local repository manifest (this repository).
#
-certificate: \
+certificate:\
-----BEGIN CERTIFICATE-----
-MIIFOzCCAyOgAwIBAgIJAIsajMs6HOxHMA0GCSqGSIb3DQEBCwUAMDcxFzAVBgNV
-BAoMDkNvZGUgU3ludGhlc2lzMRwwGgYDVQQDDBNuYW1lOmRldi5jcHBnZXQub3Jn
-MB4XDTE3MDcwNzA2MzgzNFoXDTIyMDcwNjA2MzgzNFowNzEXMBUGA1UECgwOQ29k
-ZSBTeW50aGVzaXMxHDAaBgNVBAMME25hbWU6ZGV2LmNwcGdldC5vcmcwggIiMA0G
-CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDK8NqNbQckQpv9B3hBRjnTcpkgKq2e
-1HOFLQJxgS1TS2QfqUTKePpd10LbDgXOhI2iycKCf7Zv/uf3RE+VyQ/BthNUvQ0O
-bWPsEKo+DQOLPjqIaS+u2bmMXzCDjwjufbd9ruPY2PYRTBOsXgTL1+GGIQu0bP5u
-i1mEGn95xuYhEJ4x1UUsVWV0l0D37orV/OaOVffPY3xhlQE++aiXLptof1gzM2D8
-lsQPvWLizrtDAHpiwb4oXQQbifDyeXj+qh7OdIqL10rxZZ/0Q0GqrTOyeSlXuo5i
-C3MdNSlRmWNGqvPwpushFBQec04exXI3AjQZ/DUlMxtDx2xIqQMtaYOQ5iqm9426
-crgrUoXZG/5ePYTCmnSbpZVak9md44inJWqSESTL0+EfWuLdXop0QV7LZrIaV2pV
-BJba0/jiS5mltR/ikiJ7gaP/bbfutJGGfzyk1PrvyehhK/snGUh6Nr0NMHozS+J+
-7QXdSEMjLXbmF5hBsvEfrGub+YSexEEODA34YnBIA453ph4CIo/3nTpDLrm3EkSF
-1jV5vGhg3vzB6v+TIP9MXALm4/NUurn8I643KMoNSS9RCDuiqLnE8V1uCmSP8LR8
-OO7vxlmaM/OfqHehAALgsU/KFT1lgpAfHE2x5YBxT6s407DJJpaPkbHMiCNHScWQ
-5ezqnH0UMNwsawIDAQABo0owSDAOBgNVHQ8BAf8EBAMCB4AwFgYDVR0lAQH/BAww
-CgYIKwYBBQUHAwMwHgYDVR0RBBcwFYETaW5mb0BkZXYuY3BwZ2V0Lm9yZzANBgkq
-hkiG9w0BAQsFAAOCAgEAlJnw+1TMlY8GGKwF0MBfH0bAW+AhkHd7E33DSFoU12LT
-mx99QaiFd4r2dkE3dA4c8OJXK+vzhi2R+kWvzR+ZF4XEGfkM6Z6pZWUK8/5hymiq
-pXbPQA21QHJh1RkN0xOxaPwjB4BWj2hk2aUqiuH/R4c2u6U4RfSwimBSbI+QSqF3
-Ho5eAuaezicxWKRQya70FpXGFn+vN6E9HZ8mlp+7eSV3A4eYKaGliqfoVHagYaFz
-EM/SFueGhynAHtWzx21f3RhlPWJ1QZcLQayZT8980KJKWO70abKZdcuOTpYBDiYZ
-SKcAu4fhCWuhkxlKltwxdRx1FqE/UZpoj2LJnw5pEzVmF9X30VC1f5F6YWicedJr
-GCmdQhK3qPZKvNM7i19IBlizo5BKuVB6TsdxWgTTzmOZN6oEwsbVtGTxPek7jGJj
-V0vi3zeCCaGJ5K+t6MahAT47CpA/+lJVLCGT6Clw9DvFEJmIr01bmD9uUGZwIgc3
-w88Hh4ap5/u7w07cNwYtncA7cKQCBG9vXi2cXpudBL6uLeM5rqYBSD5hj4zDjzpd
-VglIFXohfyMfGh3kDPkQ4dw627S0NuxzmocE0jjdsXfQuLNeg+JRNEHB8QPwTC8X
-EY1xZfPv9XzlVQxd7gLDKA8QbbKWpNe73XMoZXUyeyVuf5q1g+c6m1uPB5jJpdw=
+MIIFRjCCAy6gAwIBAgIUc9xEjZAXCpw+00SGYGDTY0t2nLUwDQYJKoZIhvcNAQEL
+BQAwNzEXMBUGA1UECgwOQ29kZSBTeW50aGVzaXMxHDAaBgNVBAMME25hbWU6ZGV2
+LmNwcGdldC5vcmcwHhcNMjExMTI0MTIxNjMwWhcNMzExMTIyMTIxNjMwWjA3MRcw
+FQYDVQQKDA5Db2RlIFN5bnRoZXNpczEcMBoGA1UEAwwTbmFtZTpkZXYuY3BwZ2V0
+Lm9yZzCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMrw2o1tByRCm/0H
+eEFGOdNymSAqrZ7Uc4UtAnGBLVNLZB+pRMp4+l3XQtsOBc6EjaLJwoJ/tm/+5/dE
+T5XJD8G2E1S9DQ5tY+wQqj4NA4s+OohpL67ZuYxfMIOPCO59t32u49jY9hFME6xe
+BMvX4YYhC7Rs/m6LWYQaf3nG5iEQnjHVRSxVZXSXQPfuitX85o5V989jfGGVAT75
+qJcum2h/WDMzYPyWxA+9YuLOu0MAemLBvihdBBuJ8PJ5eP6qHs50iovXSvFln/RD
+QaqtM7J5KVe6jmILcx01KVGZY0aq8/Cm6yEUFB5zTh7FcjcCNBn8NSUzG0PHbEip
+Ay1pg5DmKqb3jbpyuCtShdkb/l49hMKadJullVqT2Z3jiKclapIRJMvT4R9a4t1e
+inRBXstmshpXalUEltrT+OJLmaW1H+KSInuBo/9tt+60kYZ/PKTU+u/J6GEr+ycZ
+SHo2vQ0wejNL4n7tBd1IQyMtduYXmEGy8R+sa5v5hJ7EQQ4MDfhicEgDjnemHgIi
+j/edOkMuubcSRIXWNXm8aGDe/MHq/5Mg/0xcAubj81S6ufwjrjcoyg1JL1EIO6Ko
+ucTxXW4KZI/wtHw47u/GWZoz85+od6EAAuCxT8oVPWWCkB8cTbHlgHFPqzjTsMkm
+lo+RscyII0dJxZDl7OqcfRQw3CxrAgMBAAGjSjBIMA4GA1UdDwEB/wQEAwIHgDAW
+BgNVHSUBAf8EDDAKBggrBgEFBQcDAzAeBgNVHREEFzAVgRNpbmZvQGRldi5jcHBn
+ZXQub3JnMA0GCSqGSIb3DQEBCwUAA4ICAQBvVUHRUj9vR+QgDQGOtXBcOB1G/1xX
+1gU6ivjP9UzZEmXmqukgx0aYcjxctAm7Yf0lsj2xOwmVPGcMC3tGYJG8yOvpW5dQ
+8uOGmNqNaRq7AJN4gio6uc9dkipNhcBmVilY08yv67wGaIGhHt4kbDiF/3YAzsMR
+/YfVQ3Kb4EyQpC/p7dsio2/m8gRb5lUr6K22fdPr2AfQbdNsAF6EMAfEfCDC9yAa
+uzB5Dc+wpqWvsPp+ohiroJqC99hwmfzPMxAYTB2cmEqmVHGSwqoC9bn7fI59t9l6
+N2fynRhenLookYfy7jqb4a6O702fAHefGD6teHYbTpg6dwlVY/PLI+T1SPSisH3k
+jS2WV03FK8aTNGe7E2RBxH2smca0Z1oaJ9RaluZ9HIRrw434m9+z01DL5w0EWRpC
+qa62iYSgGFcxkXRwb8VeWPtGb9/CPXtVFDtW19mOeeHqr8/xVOCjR2MCKAsxLazl
+yGQhAUu19n2y7vuj4FOEeJ1mwaaUyu2MPqZWZ3loM14muk/ZJfFsJRfdHg7+hSel
+alE0ujce0to39AApGZRIwozut17hYjl5m3314+46KaNuxRmo2xw5wNl8UslEgQYC
+fK3CY+6p9n64QJXnE+2+KGrDuYYXQP3TClHLv/IftgAlf6cZxu4RsNdvUsU15M0J
+BGQjsz7h0bI3fw==
-----END CERTIFICATE-----
\
diff --git a/tests/load/pkg/1/dev.cppget.org/signed/signature.manifest b/tests/load/pkg/1/dev.cppget.org/signed/signature.manifest
index 46cea28..1d0d8e0 100644
--- a/tests/load/pkg/1/dev.cppget.org/signed/signature.manifest
+++ b/tests/load/pkg/1/dev.cppget.org/signed/signature.manifest
@@ -1,13 +1,13 @@
: 1
-sha256sum: 99ad81bc39c66e4eeeae438c46b22e2ab7bba3b3406c01df8bb301abbe8149d8
-signature: \
-tzjAIL6BA9D4L1eflZeM04a0sJjCGi8exyS0EAGlRZXRcXROX83Q+xIMbX5uPpAhMxbOHRfcywRi
-xM9UYjxVsy9sIrd9hwc4UBzm/BoFvvpQdjalSE4CN11MAbaFhiwWK0YkprTV8kTo2jYWfjTtT32Y
-o9sRhP6+WE4H32905ln3hudBEdn5fzWzIDmTs/ip2Gac7KMgMFAdqffjVWyCvR50IhpZiHUnnxVB
-NQ21NuckCS6ST94Sl/SXtFjMGJNjZGrKHYMRGfAysF++wl0FtWwxcbyNVMl1ED8ymu4cS8ydEvhO
-TsB2ENhQv5tAf5GSOeEE3GW3F6Xae8/ohE0K+mWasLWlgszAuLp8376H0L2x5lkgItXbYuzYfFKz
-6A5lUTfk8XV2ss+5DBop0AIm26m7rVO66s7w5Gpt3K8F7WLcQCQr4Ja/+AxE6YKtHbyqQ+PseNp1
-FSEdCZlZxdGnvA5/NWzpLZbsx0bCgKJ8RnMHrdOKsou/fbFHS9gv9AoFNVB1/l60aT1E28H0YsuN
-nx+rArDQZvxZKRL/O/p0YfmKkiQO3ikutBn5COhnbjMputb/TMed01lu9cbXGy1dskWxRmsDCVTI
-al15nByn6b3MI3Bd8cbvXwojaRMlombhDb+ybccMtAMmDqmX+7IFx0mkh1XLDKCgPtvpRN2xqDc=
+sha256sum: ef6a4f9d8f82794c484acea48ab6bb25ccaba270aa77a87848c5e06de9e6bca5
+signature:\
+sshgL1u/38ny3mVchVoML7TRTpPTTJyUqHcK5/iAJifCHCe1D7zqFzHtEO0To/9PcdN6vyF+0FEs
+IRkzVw8LnGkKWAeOwTftlO54zPC9swSh4BvuwMu+FEFa+3IcB/eecvRu0mPe+W2GZMoBBiCFJw2F
+4tOn6a4qDDqq5SbRdJ63IJnT8sRsBLQLFfbIsuN35KFvdpgBG7QBkP+dy2G5bbEr4TJqBQgQqmC/
+FCmGGEAwJ51ZCivGjFJcJ9QK89CI0s+z755TCHdcQBOBvjXDjFoRqw1MHDF77ZNeyKv1QL5ivLsO
+HqtV6YEYNiqBdpO7n14jwgWTBA8vZm5tCQ1G+vnie0q56Rde3nqKFpxHI3/uv59fhvXk9isikRRs
+cI4JIMNWld7Z1si4jrTA2ix/7PZwWTPLZTkJ6c+RRlHuBkGUvrC5n2mA0WXWpewWH0BZqgzSv4AW
+xFxLurFiIliYPMf4O7fvTtbARIsPzTFlZ0VU8QBOg7/pbxn20wHDwlQOBG8Y2y69KyMGdPysdCm6
+8QLjmIvYGy3osfK7bh9ZpjgsHe2Tx+rjwq87Zpo9H8D3DjVilC5WblaB+2YAydc/q4oZsKb/X2QQ
+KXer6XF/Y8l7xLpiZLrUg2oA5j0uIFog6kX7sKeFv2vDRgAjBLf4lXLVKd5VQoNQTOxObAoKmys=
\
diff --git a/tests/submit/buildfile b/tests/submit/buildfile
index aaffede..a90115a 100644
--- a/tests/submit/buildfile
+++ b/tests/submit/buildfile
@@ -1,5 +1,4 @@
# file : tests/submit/buildfile
-# copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
# license : MIT; see accompanying LICENSE file
dir = ../../brep/handler/submit/
@@ -8,9 +7,12 @@ include $dir
commons = data
-./: testscript{* -{$commons}} common_testscript{$commons} {*/ -test/}{**} \
- $dir/exe{brep-submit-dir} $dir/exe{brep-submit-git} file{hello.tar.gz} \
- doc{README}
+./: testscript{* -{$commons}} common_testscript{$commons} {*/ -test/}{**} \
+ $dir/exe{brep-submit-dir} \
+ $dir/exe{brep-submit-git} \
+ $dir/exe{brep-submit-pub} \
+ file{hello.tar.gz} doc{README}
testscript{submit-dir}@./: test = $out_base/$dir/brep-submit-dir
testscript{submit-git}@./: test = $out_base/$dir/brep-submit-git
+testscript{submit-pub}@./: test = $out_base/$dir/brep-submit-pub
diff --git a/tests/submit/data.testscript b/tests/submit/data.testscript
index 70507d5..b0fe8f0 100644
--- a/tests/submit/data.testscript
+++ b/tests/submit/data.testscript
@@ -1,15 +1,15 @@
# file : tests/submit/data.testscript
-# copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
# license : MIT; see accompanying LICENSE file
# Pre-created submission data directory that will be copied by subsequent
# tests and scope setup commands. The common approach will be that group
-# scopes copy and modify the parent scope submission directory as required by
-# the nested tests and scopes. Tests will also clone the parent scope
-# submission data directory to optionally modify it, use and cleanup at the
-# end. Note that configuration can not be shared between multiple submission
-# handler processes. Also we need to make sure that submission data
-# directories are not cloned while being used by submission handler scripts.
+# scopes copy and modify the parent scope submission data directory as
+# required by the nested tests and scopes. Tests will also clone the parent
+# scope submission data directory to optionally modify it, use and cleanup at
+# the end. Note that submission data directory can not be shared between
+# multiple submission handler processes. Also we need to make sure that
+# submission data directories are not cloned while being used by submission
+# handler scripts.
#
data_dir = $regex.replace($path_search('*/request.manifest', $src_base), \
'(.*)/.*', \
@@ -26,10 +26,10 @@ root_data_dir = $~/$data_dir
# The most commonly used submission data directory cloning command that copies
# it from the parent scope working directory.
#
-clone_data = cp --no-cleanup -r ../$data_dir ./
-clone_data_clean = cp --no-cleanup -r ../$data_dir ./ &$data_dir/***
+clone_data = [cmdline] cp --no-cleanup -r ../$data_dir ./
+clone_data_clean = [cmdline] cp --no-cleanup -r ../$data_dir ./ &$data_dir/***
# Clones the original submission data directory.
#
-clone_root_data = cp --no-cleanup -r $root_data_dir ./
-clone_root_data_clean = cp --no-cleanup -r $root_data_dir ./ &$data_dir/***
+clone_root_data = [cmdline] cp --no-cleanup -r $root_data_dir ./
+clone_root_data_clean = [cmdline] cp --no-cleanup -r $root_data_dir ./ &$data_dir/***
diff --git a/tests/submit/submit-dir.testscript b/tests/submit/submit-dir.testscript
index 4a0c619..285710f 100644
--- a/tests/submit/submit-dir.testscript
+++ b/tests/submit/submit-dir.testscript
@@ -1,5 +1,4 @@
# file : tests/submit/submit-dir.testscript
-# copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
# license : MIT; see accompanying LICENSE file
.include data.testscript
@@ -78,7 +77,16 @@
$* >>"EOO"
: 1
status: 400
- message: archive is not a valid package \(run bpkg pkg-verify for details\)
+ message:\\
+ package archive is not valid
+
+ gzip: libhello-0.1.0.tar.gz: not in gzip format
+ tar: This does not look like a tar archive
+ tar: libhello-0.1.0/manifest: Not found in archive
+ tar: Exiting with failure status due to previous errors
+ info: libhello-0.1.0.tar.gz does not appear to be a bpkg package
+ info: run bpkg pkg-verify for details
+ \\
reference: $checksum
EOO
}
diff --git a/tests/submit/submit-git.testscript b/tests/submit/submit-git.testscript
index 122c9ae..5197afc 100644
--- a/tests/submit/submit-git.testscript
+++ b/tests/submit/submit-git.testscript
@@ -1,5 +1,4 @@
# file : tests/submit/submit-git.testscript
-# copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
# license : MIT; see accompanying LICENSE file
.include data.testscript
@@ -10,19 +9,23 @@
#
# test.redirects += 2>!
-g = git 2>! >&2
+g = [cmdline] git 2>! >&2
# Create and clone the reference repository.
#
root_ref = $~/ref.git
root_ref_dir = $~/ref
-clone_root_ref = cp --no-cleanup -r $root_ref ./ &ref.git/***
+clone_root_ref = [cmdline] cp --no-cleanup -r $root_ref ./ &ref.git/***
+mkdir --no-cleanup $root_ref
+$g -C $root_ref init --bare &ref.git/***
-+$g clone $root_ref $root_ref_dir &ref/***
+# Convert specific warnings to infos as we expect them to appear. This, in
+# particular, prevents bbot workers to set task result status to warning.
+#
++git clone $root_ref $root_ref_dir &ref/*** 2>&1 | \
+ sed -e 's/warning: (.*cloned an empty repository.*)/info: \1/' >&2 2>!
+cat <<EOI >=$root_ref_dir/submit.config.bash
sections[alpha]=1/alpha
@@ -32,6 +35,8 @@ clone_root_ref = cp --no-cleanup -r $root_ref ./ &ref.git/***
owners=owners
EOI
++$g -C $root_ref_dir config user.name 'Test Script'
++$g -C $root_ref_dir config user.email 'testscript@example.com'
+$g -C $root_ref_dir add '*'
+$g -C $root_ref_dir commit -m 'Add submit.config.bash'
+$g -C $root_ref_dir push
@@ -43,7 +48,7 @@ root_tgt_url = "file:///$~/tgt.git"
+cp -r $root_ref $root_tgt
-clone_root_tgt = cp --no-cleanup -r $root_tgt ./ &tgt.git/***
+clone_root_tgt = [cmdline] cp --no-cleanup -r $root_tgt ./ &tgt.git/***
# Extract the package repository.
#
@@ -95,10 +100,10 @@ pkg_ctl="$prj_ctl/hello.git"
: success
:
{
- : ref-unknown-tgt-aquire-prj-pkg
+ : ref-unknown-tgt-acquire-prj-pkg
:
: Test that on the first package submission the project and package names
- : ownership is successfully aquired. Authentication is enabled on both the
+ : ownership is successfully acquired. Authentication is enabled on both the
: reference and target repos.
:
: Note that here we also test that --commiter-* options are picked up
@@ -174,7 +179,7 @@ pkg_ctl="$prj_ctl/hello.git"
: ref-disabled-tgt-aquire-prj-pkg
:
: Test that on the first package submit the project and package names
- : ownership is successfully aquired. Authentication is disabled for the
+ : ownership is successfully acquired. Authentication is disabled for the
: reference repo.
:
{
@@ -191,6 +196,8 @@ pkg_ctl="$prj_ctl/hello.git"
# owners=owners
EOI
+ $g -C ref config user.name 'Test Script';
+ $g -C ref config user.email 'testscript@example.com';
$g -C ref commit -am 'Disable ownership';
$g -C ref push;
@@ -204,10 +211,10 @@ pkg_ctl="$prj_ctl/hello.git"
EOO
}
- : ref-absent-tgt-aquire-prj-pkg
+ : ref-absent-tgt-acquire-prj-pkg
:
: Test that on the first package submit the project and package names
- : ownership is successfully aquired. Reference repo is absent.
+ : ownership is successfully acquired. Reference repo is absent.
:
: Note that here we also pass the --result-url option.
:
@@ -252,6 +259,8 @@ pkg_ctl="$prj_ctl/hello.git"
control: $pkg_ctl
EOI
+ $g -C tgt config user.name 'Test Script';
+ $g -C tgt config user.email 'testscript@example.com';
$g -C tgt add owners;
$g -C tgt commit -m 'Add ownership info';
$g -C tgt push;
@@ -289,6 +298,8 @@ pkg_ctl="$prj_ctl/hello.git"
control: $prj_ctl/
EOI
+ $g -C ref config user.name 'Test Script';
+ $g -C ref config user.email 'testscript@example.com';
$g -C ref add owners;
$g -C ref commit -m 'Add ownership info';
$g -C ref push;
@@ -306,6 +317,8 @@ pkg_ctl="$prj_ctl/hello.git"
control: $pkg_ctl
EOI
+ $g -C tgt config user.name 'Test Script';
+ $g -C tgt config user.email 'testscript@example.com';
$g -C tgt add owners;
$g -C tgt commit -m 'Add ownership info';
$g -C tgt push;
@@ -358,6 +371,8 @@ pkg_ctl="$prj_ctl/hello.git"
control: $pkg_ctl
EOI
+ $g -C ref config user.name 'Test Script';
+ $g -C ref config user.email 'testscript@example.com';
$g -C ref add owners;
$g -C ref commit -m 'Add ownership info';
$g -C ref push;
@@ -393,6 +408,8 @@ pkg_ctl="$prj_ctl/hello.git"
control: $prj_ctl/
EOI
+ $g -C ref config user.name 'Test Script';
+ $g -C ref config user.email 'testscript@example.com';
$g -C ref add owners;
$g -C ref commit -m 'Add ownership info';
$g -C ref push;
@@ -426,6 +443,8 @@ pkg_ctl="$prj_ctl/hello.git"
# owners=owners
EOI
+ $g -C tgt config user.name 'Test Script';
+ $g -C tgt config user.email 'testscript@example.com';
$g -C tgt commit -am 'Disable ownership';
$g -C tgt push;
@@ -437,6 +456,78 @@ pkg_ctl="$prj_ctl/hello.git"
EOO
}
+ : ref-absent-tgt-pkg-rev
+ :
+ : Test that the package revision is removed.
+ :
+ {
+ $clone_root_data;
+
+ $clone_root_tgt;
+ $g clone tgt.git &tgt/***;
+
+ cat <<EOI >=tgt/submit.config.bash;
+ sections['*']=1/alpha
+ EOI
+
+ $g -C tgt config user.name 'Test Script';
+ $g -C tgt config user.email 'testscript@example.com';
+ $g -C tgt commit -am 'Add config and archive';
+ $g -C tgt push;
+
+ # Initial submission.
+ #
+ $* "file:///$~/tgt.git" $data_dir >>"EOO";
+ : 1
+ status: 200
+ message: package submission is queued: libhello/0.1.0
+ reference: $checksum
+ EOO
+
+ $g -C tgt pull;
+
+ test -f tgt/1/alpha/hello/libhello-0.1.0.tar.gz;
+
+ # Revision submission.
+ #
+ # Here we test that the handler removes the previous revision.
+ #
+ $clone_root_data_clean;
+
+ tar -xf $~/$data_dir/libhello-0.1.0.tar.gz;
+ sed -i -e 's/(version: 0.1.0)/\1+1/' libhello-0.1.0/manifest;
+ mv libhello-0.1.0 libhello-0.1.0+1;
+ tar cfz $~/$data_dir/libhello-0.1.0+1.tar.gz libhello-0.1.0+1;
+ rm -r libhello-0.1.0+1;
+ rm $~/$data_dir/libhello-0.1.0.tar.gz;
+ sed -i -e 's/(archive: libhello-0.1.0)(.tar.gz)/\1+1\2/' $data_dir/request.manifest;
+
+ $* "file:///$~/tgt.git" $data_dir >>"EOO" &tgt/1/alpha/hello/libhello-0.1.0+1.tar.gz;
+ : 1
+ status: 200
+ message: package submission is queued: libhello/0.1.0+1
+ reference: $checksum
+ EOO
+
+ $g -C tgt pull;
+
+ test -f tgt/1/alpha/hello/libhello-0.1.0.tar.gz == 1;
+ test -f tgt/1/alpha/hello/libhello-0.1.0+1.tar.gz;
+
+ # While at it, test the older revision submission.
+ #
+ $clone_root_data_clean;
+
+ $* "file:///$~/tgt.git" $data_dir >>"EOO";
+ : 1
+ status: 422
+ message: newer revision libhello/0.1.0+1 is present
+ reference: $checksum
+ EOO
+
+ test -f tgt/1/alpha/hello/libhello-0.1.0+1.tar.gz
+ }
+
: section-fallback
:
{
@@ -455,6 +546,8 @@ pkg_ctl="$prj_ctl/hello.git"
owners=owners
EOI
+ $g -C tgt config user.name 'Test Script';
+ $g -C tgt config user.email 'testscript@example.com';
$g -C tgt commit -am 'Add section name fallback';
$g -C tgt push;
@@ -572,6 +665,8 @@ pkg_ctl="$prj_ctl/hello.git"
mkdir -p ref/1/alpha/hello;
cp $data_dir/libhello-0.1.0.tar.gz ref/1/alpha/hello/;
+ $g -C ref config user.name 'Test Script';
+ $g -C ref config user.email 'testscript@example.com';
$g -C ref add 1/;
$g -C ref commit -m 'Add libhello-0.1.0.tar.gz';
$g -C ref push;
@@ -613,6 +708,8 @@ pkg_ctl="$prj_ctl/hello.git"
control: $prj_ctl/foo
EOI
+ $g -C ref config user.name 'Test Script';
+ $g -C ref config user.email 'testscript@example.com';
$g -C ref add owners;
$g -C ref commit -m 'Add ownership info';
$g -C ref push;
@@ -645,6 +742,8 @@ pkg_ctl="$prj_ctl/hello.git"
control: https://example.com/foo
EOI
+ $g -C ref config user.name 'Test Script';
+ $g -C ref config user.email 'testscript@example.com';
$g -C ref add owners/hello/project-owner.manifest;
$g -C ref commit -m 'Add project ownership info';
$g -C ref push;
@@ -686,6 +785,8 @@ pkg_ctl="$prj_ctl/hello.git"
control: $prj_ctl/foo
EOI
+ $g -C ref config user.name 'Test Script';
+ $g -C ref config user.email 'testscript@example.com';
$g -C ref add owners;
$g -C ref commit -m 'Add ownership info';
$g -C ref push;
@@ -712,6 +813,8 @@ pkg_ctl="$prj_ctl/hello.git"
mkdir -p tgt/1/alpha/hello;
cp $data_dir/libhello-0.1.0.tar.gz tgt/1/alpha/hello/;
+ $g -C tgt config user.name 'Test Script';
+ $g -C tgt config user.email 'testscript@example.com';
$g -C tgt add 1/;
$g -C tgt commit -m 'Add libhello-0.1.0.tar.gz';
$g -C tgt push;
@@ -752,6 +855,8 @@ pkg_ctl="$prj_ctl/hello.git"
control: $prj_ctl/foo
EOI
+ $g -C tgt config user.name 'Test Script';
+ $g -C tgt config user.email 'testscript@example.com';
$g -C tgt add owners;
$g -C tgt commit -m 'Add ownership info';
$g -C tgt push;
@@ -784,6 +889,8 @@ pkg_ctl="$prj_ctl/hello.git"
# owners=owners
EOI
+ $g -C tgt config user.name 'Test Script';
+ $g -C tgt config user.email 'testscript@example.com';
$g -C tgt commit -am 'Disable ownership';
$g -C tgt push;
@@ -817,6 +924,8 @@ pkg_ctl="$prj_ctl/hello.git"
control: $prj_ctl/
EOI
+ $g -C ref config user.name 'Test Script';
+ $g -C ref config user.email 'testscript@example.com';
$g -C ref add owners;
$g -C ref commit -m 'Add project ownership info';
$g -C ref push;
@@ -832,6 +941,8 @@ pkg_ctl="$prj_ctl/hello.git"
# owners=owners
EOI
+ $g -C tgt config user.name 'Test Script';
+ $g -C tgt config user.email 'testscript@example.com';
$g -C tgt commit -am 'Disable ownership';
$g -C tgt push;
@@ -872,6 +983,8 @@ pkg_ctl="$prj_ctl/hello.git"
control: $prj_ctl/foo
EOI
+ $g -C tgt config user.name 'Test Script';
+ $g -C tgt config user.email 'testscript@example.com';
$g -C tgt add owners;
$g -C tgt commit -m 'Add ownership info';
$g -C tgt push;
diff --git a/tests/submit/submit-pub.testscript b/tests/submit/submit-pub.testscript
new file mode 100644
index 0000000..8c042a7
--- /dev/null
+++ b/tests/submit/submit-pub.testscript
@@ -0,0 +1,213 @@
+# file : tests/submit/submit-pub.testscript
+# license : MIT; see accompanying LICENSE file
+
+.include data.testscript
+
+user=$getenv("USER")
+
+# Disable tests if the password-less sudo is not enabled for the user.
+#
++if! sudo -u "$user" --non-interactive echo 'test' >'test'
+ exit
+end
+
+root_rep=$~/pkg-1
+
++mkdir -p $root_rep/1
++echo ": 1" >=$root_rep/1/repositories.manifest
++bpkg rep-create $root_rep/1 2>! &$root_rep/1/packages.manifest
+
+clone_root_rep = [cmdline] cp --no-cleanup -r $root_rep ./ &pkg-1/*** &?pkg.lock
+
+: args
+{
+ : none
+ :
+ $* 2>>~%EOE% != 0
+ %\[.+\] \[brep:error\] \[ref \] \[brep-submit-pub\]: usage: .+brep-submit-pub \[<options>\] \[<loader-path> <loader-options>\] <repo> <dir>%
+ EOE
+
+ : few
+ :
+ $* 2>>~%EOE% != 0
+ %\[.+\] \[brep:error\] \[ref \] \[brep-submit-pub\]: usage: .+brep-submit-pub \[<options>\] \[<loader-path> <loader-options>\] <repo> <dir>%
+ EOE
+
+ : repo-not-exist
+ :
+ $* $~/repo $~/dir 2>>~%EOE% != 0
+ %\[.+\] \[brep:error\] \[ref dir\] \[brep-submit-pub\]: '.+repo' does not exist or is not a symlink%
+ EOE
+
+ : dir-not-exist
+ :
+ {
+ $clone_root_rep;
+ ln -s pkg-1 pkg;
+ $* $~/pkg $~/dir 2>>~%EOE% != 0
+ %\[.+\] \[brep:error\] \[ref dir\] \[brep-submit-pub\]: '.+dir' does not exist or is not a directory%
+ EOE
+ }
+}
+
+: success
+:
+{
+ test.options += --user "$user"
+
+ : simulate
+ :
+ {
+ $clone_root_data;
+ echo 'simulate: success' >+$data_dir/request.manifest;
+
+ $clone_root_rep;
+ ln -s pkg-1 pkg;
+
+ $* $~/pkg $~/$data_dir >>"EOO";
+ : 1
+ status: 200
+ message: package is published: libhello/0.1.0
+ reference: $checksum
+ EOO
+
+ test -d $data_dir != 0
+ }
+
+ : for-real
+ :
+ : Here we also create the package revision which is expected to be removed
+ : by the handler.
+ :
+ {
+ $clone_root_data;
+ $clone_root_rep;
+ ln -s pkg-1 pkg;
+
+ # Initial submission.
+ #
+ $* $~/pkg $~/$data_dir &!pkg-1/*** &pkg-*/*** >>"EOO";
+ : 1
+ status: 200
+ message: package is published: libhello/0.1.0
+ reference: $checksum
+ EOO
+
+ test -f pkg/1/hello/libhello-0.1.0.tar.gz;
+
+ # While at it, test the duplicate submission.
+ #
+ $clone_root_data_clean;
+
+ $* $~/pkg $~/$data_dir >>"EOO";
+ : 1
+ status: 422
+ message: duplicate submission
+ reference: $checksum
+ EOO
+
+ test -f pkg/1/hello/libhello-0.1.0.tar.gz;
+
+ # Revision submission.
+ #
+ # Here we test that the handler removes the previous revision.
+ #
+ tar -xf $~/$data_dir/libhello-0.1.0.tar.gz;
+ sed -i -e 's/(version: 0.1.0)/\1+1/' libhello-0.1.0/manifest;
+ mv libhello-0.1.0 libhello-0.1.0+1;
+ tar cfz $~/$data_dir/libhello-0.1.0+1.tar.gz libhello-0.1.0+1;
+ rm -r libhello-0.1.0+1;
+ rm $~/$data_dir/libhello-0.1.0.tar.gz;
+ sed -i -e 's/(archive: libhello-0.1.0)(.tar.gz)/\1+1\2/' $data_dir/request.manifest;
+
+ $* $~/pkg $~/$data_dir >>"EOO";
+ : 1
+ status: 200
+ message: package is published: libhello/0.1.0+1
+ reference: $checksum
+ EOO
+
+ test -f pkg/1/hello/libhello-0.1.0.tar.gz == 1;
+ test -f pkg/1/hello/libhello-0.1.0+1.tar.gz;
+
+ # While at it, test the older revision submission.
+ #
+ $clone_root_data_clean;
+
+ $* $~/pkg $~/$data_dir >>"EOO";
+ : 1
+ status: 422
+ message: newer revision libhello/0.1.0+1 is present
+ reference: $checksum
+ EOO
+
+ test -f pkg/1/hello/libhello-0.1.0+1.tar.gz
+ }
+
+ : result-url
+ :
+ {
+ $clone_root_data;
+ $clone_root_rep;
+ ln -s pkg-1 pkg;
+
+ test.options += --result-url 'https://example.com/';
+
+ $* $~/pkg $~/$data_dir &!pkg-1/*** &pkg-*/*** >>"EOO"
+ : 1
+ status: 200
+ message: package is published: https://example.com/libhello/0.1.0
+ reference: $checksum
+ EOO
+ }
+}
+
+: failure
+:
+{
+ test.options += --user "$user"
+
+ : invalid-package
+ :
+ : Here we remove repositories.manifest to make sure that the bpkg-rep-create
+ : failure ends up with a proper response.
+ :
+ {
+ $clone_root_data_clean;
+ $clone_root_rep;
+ ln -s pkg-1 pkg;
+
+ rm pkg/1/repositories.manifest;
+
+ $* $~/brep-loader $~/pkg $~/$data_dir >>~"%EOO%"
+ : 1
+ status: 400
+ message:\\
+ submitted archive is not a valid package
+ %.+
+ \\
+ reference: $checksum
+ EOO
+ }
+
+ : not-loadable
+ :
+ : Here we specify a non-existing brep loader program to make sure that the
+ : brep database load failure ends up with a proper response.
+ :
+ {
+ $clone_root_data_clean;
+ $clone_root_rep;
+ ln -s pkg-1 pkg;
+
+ $* $~/brep-loader $~/pkg $~/$data_dir >>~"%EOO%"
+ : 1
+ status: 400
+ message:\\
+ unable to add package to repository
+ %.+
+ \\
+ reference: $checksum
+ EOO
+ }
+}
diff --git a/tests/upload/545f1f78-63ea-4acf-bcb8-37b2513a78c8/archive.tar b/tests/upload/545f1f78-63ea-4acf-bcb8-37b2513a78c8/archive.tar
new file mode 100644
index 0000000..d3b5b17
--- /dev/null
+++ b/tests/upload/545f1f78-63ea-4acf-bcb8-37b2513a78c8/archive.tar
Binary files differ
diff --git a/tests/upload/545f1f78-63ea-4acf-bcb8-37b2513a78c8/request.manifest b/tests/upload/545f1f78-63ea-4acf-bcb8-37b2513a78c8/request.manifest
new file mode 100644
index 0000000..c59303b
--- /dev/null
+++ b/tests/upload/545f1f78-63ea-4acf-bcb8-37b2513a78c8/request.manifest
@@ -0,0 +1,22 @@
+: 1
+id: 545f1f78-63ea-4acf-bcb8-37b2513a78c8
+session: 4d4c8b36-56c5-42e0-91d1-58bfd1228002/libhello/1.1.0+10/x86_64-linux-\
+gnu/linux_fedora_37-gcc_12.2-bindist/default/queue/0.3.0/1683122318585120886
+instance: archive
+archive: archive.tar
+sha256sum: 4fa79e4e11a03db321514800806a2b0a3a8eef9375dc22963f4e5a16764c0d5e
+timestamp: 2023-05-08T09:18:20Z
+name: libhello
+version: 1.1.0+10
+project: hello
+target-config: linux_fedora_37-gcc_12.2-bindist
+package-config: default
+target: x86_64-linux-gnu
+tenant: 4d4c8b36-56c5-42e0-91d1-58bfd1228002
+toolchain-name: queue
+toolchain-version: 0.3.0
+repository-name: git:build2.org/var/scm/hello/libhello#master@7f62790591b66bd\
+a248140013bdbd12bf078c2a2
+machine-name: linux_fedora_37-bindist-gcc_12.2
+machine-summary: Fedora Linux 37 with system-default GCC 12.2.1 and bpkg-pkg-\
+bindist prerequisites
diff --git a/tests/upload/buildfile b/tests/upload/buildfile
new file mode 100644
index 0000000..32d7720
--- /dev/null
+++ b/tests/upload/buildfile
@@ -0,0 +1,13 @@
+# file : tests/upload/buildfile
+# license : MIT; see accompanying LICENSE file
+
+dir = ../../brep/handler/upload/
+
+include $dir
+
+commons = data
+
+./: testscript{* -{$commons}} common_testscript{$commons} {*/ -test/}{**} \
+ $dir/exe{brep-upload-bindist}
+
+testscript{upload-bindist}@./: test = $out_base/$dir/brep-upload-bindist
diff --git a/tests/upload/data.testscript b/tests/upload/data.testscript
new file mode 100644
index 0000000..3d3eede
--- /dev/null
+++ b/tests/upload/data.testscript
@@ -0,0 +1,34 @@
+# file : tests/upload/data.testscript
+# license : MIT; see accompanying LICENSE file
+
+# Pre-created upload data directory that will be copied by subsequent tests
+# and scope setup commands. The common approach will be that group scopes copy
+# and modify the parent scope upload data directory as required by the nested
+# tests and scopes. Tests will also clone the parent scope upload data
+# directory to optionally modify it, use and cleanup at the end. Note that
+# upload data directory can not be shared between multiple upload handler
+# processes. Also we need to make sure that upload data directories are not
+# cloned while being used by upload handler scripts.
+#
+data_dir = $regex.replace($path_search('*/request.manifest', $src_base), \
+ '(.*)/.*', \
+ '\1')
+
+request_id = "$data_dir"
+
+# Copy the original upload data directory to the root scope.
+#
++cp -r $src_base/$data_dir ./
+
+root_data_dir = $~/$data_dir
+
+# The most commonly used upload data directory cloning command that copies it
+# from the parent scope working directory.
+#
+clone_data = [cmdline] cp --no-cleanup -r ../$data_dir ./
+clone_data_clean = [cmdline] cp --no-cleanup -r ../$data_dir ./ &$data_dir/***
+
+# Clones the original upload data directory.
+#
+clone_root_data = [cmdline] cp --no-cleanup -r $root_data_dir ./
+clone_root_data_clean = [cmdline] cp --no-cleanup -r $root_data_dir ./ &$data_dir/***
diff --git a/tests/upload/upload-bindist.testscript b/tests/upload/upload-bindist.testscript
new file mode 100644
index 0000000..d43c567
--- /dev/null
+++ b/tests/upload/upload-bindist.testscript
@@ -0,0 +1,126 @@
+# file : tests/upload/upload-bindist.testscript
+# license : MIT; see accompanying LICENSE file
+
+.include data.testscript
+
+: args
+{
+ : no-dir
+ :
+ $* 2>>~%EOE% != 0
+ %\[.+\] \[brep:error\] \[ref \] \[brep-upload-bindist\]: usage: .+brep-upload-bindist \[<options>\] <root> <dir>%
+ EOE
+
+ : no-root
+ :
+ $* $~/dir 2>>~%EOE% != 0
+ %\[.+\] \[brep:error\] \[ref dir\] \[brep-upload-bindist\]: usage: .+brep-upload-bindist \[<options>\] <root> <dir>%
+ EOE
+
+ : root-not-exist
+ :
+ : While at it, also test that the trailing slash is stripped from the
+ : directory paths.
+ :
+ $* $~/root/ $~/dir/ 2>>~%EOE% != 0
+ %\[.+\] \[brep:error\] \[ref dir\] \[brep-upload-bindist\]: '.+root' does not exist or is not a directory%
+ EOE
+
+ : data-not-exist
+ :
+ mkdir root;
+ $* $~/root $~/dir 2>>~%EOE% != 0
+ %\[.+\] \[brep:error\] \[ref dir\] \[brep-upload-bindist\]: '.+dir' does not exist or is not a directory%
+ EOE
+}
+
+: success
+:
+{
+ mkdir --no-cleanup bindist-root/ &bindist-root/***;
+
+ # Test the first upload.
+ #
+ $clone_data;
+
+ $* $~/bindist-root/ $~/$data_dir >>"EOO";
+ : 1
+ status: 200
+ message: binary distribution packages are published
+ reference: $request_id
+ EOO
+
+ timestamp = '2023-05-08T09:18:20Z';
+ tenant = '4d4c8b36-56c5-42e0-91d1-58bfd1228002';
+ dir = [dir_path] bindist-root/$tenant/archive/fedora35/hello/libhello/1.1.0+10/;
+
+ test -f $dir/default/libhello-1.0.0+10.tar.xz;
+ test -f $dir/default-$timestamp/libhello-1.0.0+10.tar.xz;
+
+ # Repeat the upload using the same timestamp to make sure that we properly
+ # handle this situation (by adding the retry number as a suffix to the
+ # package configuration directory name).
+ #
+ $clone_data;
+
+ $* $~/bindist-root/ $~/$data_dir >>"EOO" &bindist-root/***;
+ : 1
+ status: 200
+ message: binary distribution packages are published
+ reference: $request_id
+ EOO
+
+ test -f $dir/default/libhello-1.0.0+10.tar.xz;
+ test -f $dir/default-$timestamp-0/libhello-1.0.0+10.tar.xz;
+ test -d $dir/default-$timestamp/ != 0;
+
+ # Test the second upload without --keep-previous option.
+ #
+ data_dir2 = 22222222-2222-2222-2222-222222222222;
+ request_id2 = $data_dir2;
+ timestamp2 = '2023-05-09T09:18:20Z';
+
+ cp --no-cleanup -r ../$data_dir ./$data_dir2;
+
+ sed -i -e "s%^\(id:\) .+\$%\\1 $request_id2%" \
+ $data_dir2/request.manifest;
+
+ sed -i -e "s%^\(timestamp:\) .+\$%\\1 $timestamp2%" \
+ $data_dir2/request.manifest;
+
+ $* $~/bindist-root/ $~/$data_dir2 >>"EOO";
+ : 1
+ status: 200
+ message: binary distribution packages are published
+ reference: $request_id2
+ EOO
+
+ test -f $dir/default/libhello-1.0.0+10.tar.xz;
+ test -f $dir/default-$timestamp2/libhello-1.0.0+10.tar.xz;
+ test -d $dir/default-$timestamp.0/ != 0;
+
+ # Test the third upload with --keep-previous option.
+ #
+ data_dir3 = 33333333-3333-3333-3333-333333333333;
+ request_id3 = $data_dir3;
+ timestamp3 = '2023-05-10T09:18:20Z';
+
+ cp --no-cleanup -r ../$data_dir ./$data_dir3;
+
+ sed -i -e "s%^\(id:\) .+\$%\\1 $request_id3%" \
+ $data_dir3/request.manifest;
+
+ sed -i -e "s%^\(timestamp:\) .+\$%\\1 $timestamp3%" \
+ $data_dir3/request.manifest;
+
+ $* --keep-previous $~/bindist-root/ $~/$data_dir3 >>"EOO";
+ : 1
+ status: 200
+ message: binary distribution packages are published
+ reference: $request_id3
+ EOO
+
+ test -f $dir/default/libhello-1.0.0+10.tar.xz;
+ test -f $dir/default-$timestamp3/libhello-1.0.0+10.tar.xz;
+ test -f $dir/default-$timestamp2/libhello-1.0.0+10.tar.xz
+}
diff --git a/tests/web/xhtml/buildfile b/tests/web/xhtml/buildfile
index 983909f..ff683b9 100644
--- a/tests/web/xhtml/buildfile
+++ b/tests/web/xhtml/buildfile
@@ -1,8 +1,7 @@
# file : tests/web/xhtml/buildfile
-# copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
# license : MIT; see accompanying LICENSE file
-include ../../../web/
+include ../../../web/xhtml/
-exe{driver}: {hxx cxx}{*} ../../../web/libus{web}
+exe{driver}: {hxx cxx}{*} ../../../web/xhtml/libue{xhtml}
exe{driver}: file{test.out}: test.stdout = true
diff --git a/tests/web/xhtml/driver.cxx b/tests/web/xhtml/driver.cxx
index 9b35ae8..3393eb3 100644
--- a/tests/web/xhtml/driver.cxx
+++ b/tests/web/xhtml/driver.cxx
@@ -1,5 +1,4 @@
// file : tests/web/xhtml/driver.cxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
#include <iostream>
@@ -7,7 +6,10 @@
#include <libstudxml/serializer.hxx>
-#include <web/xhtml.hxx>
+#include <web/xhtml/serialization.hxx>
+
+#undef NDEBUG
+#include <cassert>
using namespace std;
using namespace xml;
diff --git a/web/apache/log.hxx b/web/server/apache/log.hxx
index 147a6af..f7738ef 100644
--- a/web/apache/log.hxx
+++ b/web/server/apache/log.hxx
@@ -1,9 +1,8 @@
-// file : web/apache/log.hxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
+// file : web/server/apache/log.hxx -*- C++ -*-
// license : MIT; see accompanying LICENSE file
-#ifndef WEB_APACHE_LOG_HXX
-#define WEB_APACHE_LOG_HXX
+#ifndef WEB_SERVER_APACHE_LOG_HXX
+#define WEB_SERVER_APACHE_LOG_HXX
#include <httpd.h> // request_rec, server_rec
#include <http_log.h>
@@ -12,7 +11,7 @@
#include <cstdint> // uint64_t
#include <algorithm> // min()
-#include <web/module.hxx>
+#include <web/server/module.hxx>
namespace web
{
@@ -78,4 +77,4 @@ namespace web
}
}
-#endif // WEB_APACHE_LOG_HXX
+#endif // WEB_SERVER_APACHE_LOG_HXX
diff --git a/web/apache/request.cxx b/web/server/apache/request.cxx
index bed7042..f6e9f15 100644
--- a/web/apache/request.cxx
+++ b/web/server/apache/request.cxx
@@ -1,8 +1,7 @@
-// file : web/apache/request.cxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
+// file : web/server/apache/request.cxx -*- C++ -*-
// license : MIT; see accompanying LICENSE file
-#include <web/apache/request.hxx>
+#include <web/server/apache/request.hxx>
#include <apr.h> // APR_SIZE_MAX
#include <apr_errno.h> // apr_status_t, APR_SUCCESS, APR_E*, apr_strerror()
@@ -35,11 +34,11 @@
#include <streambuf>
#include <algorithm> // min()
-#include <libbutl/utility.mxx> // icasecmp()
-#include <libbutl/optional.mxx>
-#include <libbutl/timestamp.mxx>
+#include <libbutl/utility.hxx> // icasecmp()
+#include <libbutl/optional.hxx>
+#include <libbutl/timestamp.hxx>
-#include <web/mime-url-encoding.hxx>
+#include <web/server/mime-url-encoding.hxx>
using namespace std;
using namespace butl;
@@ -790,7 +789,7 @@ namespace web
if (is != nullptr)
{
if (r != nullptr)
- throw invalid_argument ("multiple uploads for '" + name + "'");
+ throw invalid_argument ("multiple uploads for '" + name + '\'');
r = is;
}
diff --git a/web/apache/request.hxx b/web/server/apache/request.hxx
index 01c7290..bc105ec 100644
--- a/web/apache/request.hxx
+++ b/web/server/apache/request.hxx
@@ -1,9 +1,8 @@
-// file : web/apache/request.hxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
+// file : web/server/apache/request.hxx -*- C++ -*-
// license : MIT; see accompanying LICENSE file
-#ifndef WEB_APACHE_REQUEST_HXX
-#define WEB_APACHE_REQUEST_HXX
+#ifndef WEB_SERVER_APACHE_REQUEST_HXX
+#define WEB_SERVER_APACHE_REQUEST_HXX
#include <httpd.h> // request_rec, HTTP_*, OK, M_POST
@@ -15,8 +14,8 @@
#include <ostream>
#include <streambuf>
-#include <web/module.hxx>
-#include <web/apache/stream.hxx>
+#include <web/server/module.hxx>
+#include <web/server/apache/stream.hxx>
namespace web
{
@@ -229,6 +228,6 @@ namespace web
}
}
-#include <web/apache/request.ixx>
+#include <web/server/apache/request.ixx>
-#endif // WEB_APACHE_REQUEST_HXX
+#endif // WEB_SERVER_APACHE_REQUEST_HXX
diff --git a/web/apache/request.ixx b/web/server/apache/request.ixx
index 157a751..119fd2e 100644
--- a/web/apache/request.ixx
+++ b/web/server/apache/request.ixx
@@ -1,5 +1,4 @@
-// file : web/apache/request.ixx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
+// file : web/server/apache/request.ixx -*- C++ -*-
// license : MIT; see accompanying LICENSE file
#include <http_protocol.h> // ap_*()
diff --git a/web/apache/service.cxx b/web/server/apache/service.cxx
index 3306c61..6d02c1a 100644
--- a/web/apache/service.cxx
+++ b/web/server/apache/service.cxx
@@ -1,8 +1,7 @@
-// file : web/apache/service.cxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
+// file : web/server/apache/service.cxx -*- C++ -*-
// license : MIT; see accompanying LICENSE file
-#include <web/apache/service.hxx>
+#include <web/server/apache/service.hxx>
#include <apr_pools.h> // apr_palloc()
@@ -16,11 +15,11 @@
#include <cstring> // strlen(), strcmp()
#include <exception>
-#include <libbutl/utility.mxx> // function_cast()
-#include <libbutl/optional.mxx>
+#include <libbutl/utility.hxx> // function_cast()
+#include <libbutl/optional.hxx>
-#include <web/module.hxx>
-#include <web/apache/log.hxx>
+#include <web/server/module.hxx>
+#include <web/server/apache/log.hxx>
using namespace std;
using namespace butl;
@@ -48,7 +47,7 @@ namespace web
for (const auto& o: od)
{
auto i (
- option_descriptions_.emplace (name_ + "-" + o.first, o.second));
+ option_descriptions_.emplace (name_ + '-' + o.first, o.second));
assert (i.second);
*d++ =
diff --git a/web/apache/service.hxx b/web/server/apache/service.hxx
index 42ef52f..ad54d2c 100644
--- a/web/apache/service.hxx
+++ b/web/server/apache/service.hxx
@@ -1,9 +1,8 @@
-// file : web/apache/service.hxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
+// file : web/server/apache/service.hxx -*- C++ -*-
// license : MIT; see accompanying LICENSE file
-#ifndef WEB_APACHE_SERVICE_HXX
-#define WEB_APACHE_SERVICE_HXX
+#ifndef WEB_SERVER_APACHE_SERVICE_HXX
+#define WEB_SERVER_APACHE_SERVICE_HXX
#include <apr_pools.h> // apr_pool_t
#include <apr_hooks.h> // APR_HOOK_*
@@ -16,9 +15,9 @@
#include <string>
#include <cassert>
-#include <web/module.hxx>
-#include <web/apache/log.hxx>
-#include <web/apache/request.hxx>
+#include <web/server/module.hxx>
+#include <web/server/apache/log.hxx>
+#include <web/server/apache/request.hxx>
namespace web
{
@@ -329,6 +328,6 @@ namespace web
}
}
-#include <web/apache/service.txx>
+#include <web/server/apache/service.txx>
-#endif // WEB_APACHE_SERVICE_HXX
+#endif // WEB_SERVER_APACHE_SERVICE_HXX
diff --git a/web/apache/service.txx b/web/server/apache/service.txx
index 99a8110..9e1037b 100644
--- a/web/apache/service.txx
+++ b/web/server/apache/service.txx
@@ -1,5 +1,4 @@
-// file : web/apache/service.txx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
+// file : web/server/apache/service.txx -*- C++ -*-
// license : MIT; see accompanying LICENSE file
#include <httpd.h> // APEXIT_CHILDSICK
@@ -9,7 +8,7 @@
#include <utility> // move()
#include <exception>
-#include <libbutl/utility.mxx> // operator<<(ostream, exception)
+#include <libbutl/utility.hxx> // operator<<(ostream, exception)
namespace web
{
diff --git a/web/apache/stream.hxx b/web/server/apache/stream.hxx
index e103449..77145af 100644
--- a/web/apache/stream.hxx
+++ b/web/server/apache/stream.hxx
@@ -1,9 +1,8 @@
-// file : web/apache/stream.hxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
+// file : web/server/apache/stream.hxx -*- C++ -*-
// license : MIT; see accompanying LICENSE file
-#ifndef WEB_APACHE_STREAM_HXX
-#define WEB_APACHE_STREAM_HXX
+#ifndef WEB_SERVER_APACHE_STREAM_HXX
+#define WEB_SERVER_APACHE_STREAM_HXX
#include <httpd.h> // request_rec, HTTP_*
#include <http_protocol.h> // ap_*()
@@ -14,7 +13,7 @@
#include <streambuf>
#include <algorithm> // min(), max()
-#include <web/module.hxx> // invalid_request
+#include <web/server/module.hxx> // invalid_request
namespace web
{
@@ -146,4 +145,4 @@ namespace web
}
}
-#endif // WEB_APACHE_STREAM_HXX
+#endif // WEB_SERVER_APACHE_STREAM_HXX
diff --git a/web/buildfile b/web/server/buildfile
index a535574..26de70f 100644
--- a/web/buildfile
+++ b/web/server/buildfile
@@ -1,5 +1,4 @@
-# file : web/buildfile
-# copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
+# file : web/server/buildfile
# license : MIT; see accompanying LICENSE file
# This is currently part of the brep apache module but lives in a separate
@@ -9,11 +8,8 @@
#
import libs = libapr1%lib{apr-1}
import libs += libapreq2%lib{apreq2}
-import libs += libstudxml%lib{studxml}
import libs += libbutl%lib{butl}
-libus{web}: {hxx ixx txx cxx}{** -version} {hxx}{version} $libs
-
-hxx{version}: in{version} $src_root/manifest
+libus{web-server}: {hxx ixx txx cxx}{**} $libs
{hxx ixx txx}{*}: install = false
diff --git a/web/mime-url-encoding.cxx b/web/server/mime-url-encoding.cxx
index c43510e..fd09cd2 100644
--- a/web/mime-url-encoding.cxx
+++ b/web/server/mime-url-encoding.cxx
@@ -1,13 +1,12 @@
-// file : web/mime-url-encoding.cxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
+// file : web/server/mime-url-encoding.cxx -*- C++ -*-
// license : MIT; see accompanying LICENSE file
-#include <web/mime-url-encoding.hxx>
+#include <web/server/mime-url-encoding.hxx>
#include <string>
#include <iterator> // back_inserter
-#include <libbutl/url.mxx>
+#include <libbutl/url.hxx>
using namespace std;
using namespace butl;
diff --git a/web/mime-url-encoding.hxx b/web/server/mime-url-encoding.hxx
index 225215d..34172a4 100644
--- a/web/mime-url-encoding.hxx
+++ b/web/server/mime-url-encoding.hxx
@@ -1,9 +1,8 @@
-// file : web/mime-url-encoding.hxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
+// file : web/server/mime-url-encoding.hxx -*- C++ -*-
// license : MIT; see accompanying LICENSE file
-#ifndef WEB_MIME_URL_ENCODING_HXX
-#define WEB_MIME_URL_ENCODING_HXX
+#ifndef WEB_SERVER_MIME_URL_ENCODING_HXX
+#define WEB_SERVER_MIME_URL_ENCODING_HXX
#include <string>
@@ -30,4 +29,4 @@ namespace web
bool query = true);
}
-#endif // WEB_MIME_URL_ENCODING_HXX
+#endif // WEB_SERVER_MIME_URL_ENCODING_HXX
diff --git a/web/module.hxx b/web/server/module.hxx
index 73ffcaf..20f6217 100644
--- a/web/module.hxx
+++ b/web/server/module.hxx
@@ -1,22 +1,22 @@
-// file : web/module.hxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
+// file : web/server/module.hxx -*- C++ -*-
// license : MIT; see accompanying LICENSE file
-#ifndef WEB_MODULE_HXX
-#define WEB_MODULE_HXX
+#ifndef WEB_SERVER_MODULE_HXX
+#define WEB_SERVER_MODULE_HXX
#include <map>
#include <string>
#include <vector>
#include <iosfwd>
#include <chrono>
+#include <memory> // enable_shared_from_this
#include <cstdint> // uint16_t
#include <cstddef> // size_t
#include <utility> // move()
#include <stdexcept> // runtime_error
-#include <libbutl/path.mxx>
-#include <libbutl/optional.mxx>
+#include <libbutl/path.hxx>
+#include <libbutl/optional.hxx>
namespace web
{
@@ -237,7 +237,7 @@ namespace web
// directories (e.g., apache/) if you need to see the code that
// does this.
//
- class handler
+ class handler: public std::enable_shared_from_this<handler>
{
public:
virtual
@@ -297,4 +297,4 @@ namespace web
};
}
-#endif // WEB_MODULE_HXX
+#endif // WEB_SERVER_MODULE_HXX
diff --git a/web/version.hxx.in b/web/version.hxx.in
deleted file mode 100644
index 10851f4..0000000
--- a/web/version.hxx.in
+++ /dev/null
@@ -1,12 +0,0 @@
-// file : web/version.hxx.in -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
-// license : MIT; see accompanying LICENSE file
-
-#ifndef WEB_VERSION_HXX_IN
-#define WEB_VERSION_HXX_IN
-
-#include <libstudxml/version.hxx>
-
-$libstudxml.check(LIBSTUDXML_VERSION, LIBSTUDXML_SNAPSHOT)$
-
-#endif // WEB_VERSION_HXX_IN
diff --git a/web/.gitignore b/web/xhtml/.gitignore
index 426db9e..426db9e 100644
--- a/web/.gitignore
+++ b/web/xhtml/.gitignore
diff --git a/web/xhtml/buildfile b/web/xhtml/buildfile
new file mode 100644
index 0000000..06dd34c
--- /dev/null
+++ b/web/xhtml/buildfile
@@ -0,0 +1,10 @@
+# file : web/xhtml/buildfile
+# license : MIT; see accompanying LICENSE file
+
+import libs = libstudxml%lib{studxml}
+
+./: {libue libus}{xhtml}: {hxx ixx txx cxx}{** -version} {hxx}{version} $libs
+
+hxx{version}: in{version} $src_root/manifest
+
+{hxx ixx txx}{*}: install = false
diff --git a/web/xhtml-fragment.cxx b/web/xhtml/fragment.cxx
index fe8a0a7..843db82 100644
--- a/web/xhtml-fragment.cxx
+++ b/web/xhtml/fragment.cxx
@@ -1,8 +1,7 @@
-// file : web/xhtml-fragment.cxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
+// file : web/xhtml/fragment.cxx -*- C++ -*-
// license : MIT; see accompanying LICENSE file
-#include <web/xhtml-fragment.hxx>
+#include <web/xhtml/fragment.hxx>
#include <string>
#include <cassert>
@@ -10,7 +9,7 @@
#include <libstudxml/parser.hxx>
#include <libstudxml/serializer.hxx>
-#include <web/xhtml.hxx>
+#include <web/xhtml/serialization.hxx>
using namespace std;
using namespace xml;
diff --git a/web/xhtml-fragment.hxx b/web/xhtml/fragment.hxx
index fd41967..eab4335 100644
--- a/web/xhtml-fragment.hxx
+++ b/web/xhtml/fragment.hxx
@@ -1,5 +1,4 @@
-// file : web/xhtml-fragment.hxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
+// file : web/xhtml/fragment.hxx -*- C++ -*-
// license : MIT; see accompanying LICENSE file
#ifndef WEB_XHTML_FRAGMENT_HXX
diff --git a/web/xhtml.hxx b/web/xhtml/serialization.hxx
index 6d35e49..03e72ff 100644
--- a/web/xhtml.hxx
+++ b/web/xhtml/serialization.hxx
@@ -1,13 +1,12 @@
-// file : web/xhtml.hxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
+// file : web/xhtml/serialization.hxx -*- C++ -*-
// license : MIT; see accompanying LICENSE file
-#ifndef WEB_XHTML_HXX
-#define WEB_XHTML_HXX
+#ifndef WEB_XHTML_SERIALIZATION_HXX
+#define WEB_XHTML_SERIALIZATION_HXX
#include <libstudxml/serializer.hxx>
-#include <web/version.hxx>
+#include <web/xhtml/version.hxx>
namespace web
{
@@ -356,4 +355,4 @@ namespace web
}
}
-#endif // WEB_XHTML_HXX
+#endif // WEB_XHTML_SERIALIZATION_HXX
diff --git a/web/xhtml/version.hxx.in b/web/xhtml/version.hxx.in
new file mode 100644
index 0000000..fe3e4e5
--- /dev/null
+++ b/web/xhtml/version.hxx.in
@@ -0,0 +1,11 @@
+// file : web/xhtml/version.hxx.in -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#ifndef WEB_XHTML_VERSION_HXX_IN
+#define WEB_XHTML_VERSION_HXX_IN
+
+#include <libstudxml/version.hxx>
+
+$libstudxml.check(LIBSTUDXML_VERSION, LIBSTUDXML_SNAPSHOT)$
+
+#endif // WEB_XHTML_VERSION_HXX_IN
diff --git a/www/buildfile b/www/buildfile
index 6c2f15f..2a66c93 100644
--- a/www/buildfile
+++ b/www/buildfile
@@ -1,5 +1,4 @@
# file : www/buildfile
-# copyright : Copyright (c) 2014-2019 Code Synthesis Ltd
# license : MIT; see accompanying LICENSE file
css{*} xhtml{*}: install = data/www/
diff --git a/www/builds-body.css b/www/builds-body.css
index 6c27b09..b5275c3 100644
--- a/www/builds-body.css
+++ b/www/builds-body.css
@@ -37,16 +37,17 @@
.build th, #filter th
{
- width: 7.0em;
+ width: 7.4em;
}
.build tr.name td .value,
.build tr.version td .value,
.build tr.toolchain td .value,
-.build tr.config td .value,
-.build tr.machine td .value,
.build tr.target td .value,
+.build tr.tgt-config td .value,
+.build tr.pkg-config td .value,
.build tr.timestamp td .value,
+.build tr.login td .value,
.build tr.result td .value,
.build tr.tenant td .value
{
diff --git a/www/ci.xhtml b/www/ci.xhtml
index 185f08b..573cca7 100644
--- a/www/ci.xhtml
+++ b/www/ci.xhtml
@@ -13,6 +13,10 @@
<th>package</th>
<td><input type="text" name="package"/></td>
</tr>
+ <tr>
+ <th>interactive</th>
+ <td><input type="text" name="interactive"/></td>
+ </tr>
</tbody>
</table>
<table class="form-table">
diff --git a/www/package-details-body.css b/www/package-details-body.css
index 940b493..1083c54 100644
--- a/www/package-details-body.css
+++ b/www/package-details-body.css
@@ -184,7 +184,6 @@ table.version th {width: 7.6em;}
table.version tr.version td .value,
table.version tr.priority td .value,
-table.version tr.repository td .value,
table.version tr.depends td .value,
table.version tr.requires td .value
{
diff --git a/www/package-version-details-body.css b/www/package-version-details-body.css
index 772f9eb..1c41ed5 100644
--- a/www/package-version-details-body.css
+++ b/www/package-version-details-body.css
@@ -145,7 +145,6 @@ h1, h2, h3
#version tr.version td .value,
#version tr.priority td .value,
-#version tr.repository td .value
{
/* <code> style. */
font-family: monospace;
@@ -244,10 +243,15 @@ h1, h2, h3
font-size: 0.94em;
}
+/*
+ * Tests, examples, and benchmarks tables.
+ */
#tests {margin-top: .4em; margin-bottom: 1em;}
+#tests th {width: 2.8em; text-align: center;}
+#tests th:after{content: "";}
#tests tr:nth-child(even) td {background-color: rgba(0, 0, 0, 0.07);}
-#tests td {margin-left: 2.8em; padding-left: .4em;}
+#tests td {padding-left: .4em;}
#tests tr.tests td .value
{
@@ -257,9 +261,11 @@ h1, h2, h3
}
#examples {margin-top: .4em; margin-bottom: 1em;}
+#examples th {width: 2.8em; text-align: center;}
+#examples th:after{content: "";}
#examples tr:nth-child(even) td {background-color: rgba(0, 0, 0, 0.07);}
-#examples td {margin-left: 2.8em; padding-left: .4em;}
+#examples td {padding-left: .4em;}
#examples tr.examples td .value
{
@@ -269,9 +275,11 @@ h1, h2, h3
}
#benchmarks {margin-top: .4em; margin-bottom: 1em;}
+#benchmarks th {width: 2.8em; text-align: center;}
+#benchmarks th:after{content: "";}
#benchmarks tr:nth-child(even) td {background-color: rgba(0, 0, 0, 0.07);}
-#benchmarks td {margin-left: 2.8em; padding-left: .4em;}
+#benchmarks td {padding-left: .4em;}
#benchmarks tr.benchmarks td .value
{
@@ -281,6 +289,54 @@ h1, h2, h3
}
/*
+ * Binaries.
+ */
+#binaries
+{
+ width: calc(100% + .8rem);
+ margin-left: -.4rem;
+ border: none;
+ border-spacing: 0 0;
+
+ margin-top: .4em;
+ margin-bottom: 1em;
+ border-collapse: collapse;
+}
+
+#binaries tr:nth-child(even) td {background-color: rgba(0, 0, 0, 0.07);}
+
+#binaries td
+{
+ padding: .08em .4rem;
+}
+
+#binaries td:last-child {width: 100%;}
+
+#binaries td .value
+{
+ display: inline-block;
+ white-space: nowrap;
+
+ /* <code> style. */
+ font-family: monospace;
+ font-size: 0.94em;
+}
+
+/* Re-styling for full page variant. */
+
+.full #binaries td
+{
+ vertical-align: top;
+}
+
+.full #binaries td .value
+{
+ margin-right: 1em;
+
+ white-space: normal;
+}
+
+/*
* Builds.
*/
#builds {margin-bottom: 1em;}
@@ -297,12 +353,15 @@ h1, h2, h3
.build th
{
- width: 7.0em;
+ width: 7.4em;
}
.build tr.toolchain td .value,
-.build tr.config td .value,
+.build tr.target td .value,
+.build tr.tgt-config td .value,
+.build tr.pkg-config td .value,
.build tr.timestamp td .value,
+.build tr.login td .value,
.build tr.result td .value
{
/* <code> style. */
@@ -319,13 +378,51 @@ h1, h2, h3
.build .abnormal {color: #ff0000;}
/*
- * Changes.
+ * Changes (plain text).
*
* This is a <pre> block that fits lines up to 80 characters long and
* wraps longer ones.
*/
-#changes
+#changes.plain pre
{
font-size: 0.85em;
- margin: .5em 0 .5em 0;
}
+
+/*
+ * Changes (Markdown).
+ *
+ * These are descendants of the <div> block containing the result of
+ * Markdown-to-HTML translation.
+ *
+ * Note that the Markdown code blocks are translated into the
+ * <pre><code>...<code/></pre> element construct.
+ */
+#changes.markdown h1,
+#changes.markdown h2
+{
+ white-space: normal;
+}
+
+/* code-box.css */
+#changes.markdown :not(pre) > code
+{
+ background-color: rgba(0, 0, 0, 0.05);
+ border-radius: 0.2em;
+ padding: .2em .32em .18em .32em;
+}
+
+/* pre-box.css */
+#changes.markdown pre
+{
+ background-color: rgba(0, 0, 0, 0.05);
+ border-radius: 0.2em;
+ padding: .8em .4em .8em .4em;
+ margin: 2em -.4em 2em -.4em; /* Use paddings of #content. */
+}
+
+#changes.markdown pre > code
+{
+ font-size: inherit;
+}
+
+#changes.markdown .error {color: #ff0000;}
diff --git a/www/packages-body.css b/www/packages-body.css
index 79911d4..986308f 100644
--- a/www/packages-body.css
+++ b/www/packages-body.css
@@ -33,7 +33,6 @@
.package tr.name td .value,
.package tr.depends td .value,
-.package tr.requires td .value,
.package tr.tenant td .value
{
/* <code> style. */