aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore6
-rw-r--r--LICENSE2
-rw-r--r--NEWS287
-rw-r--r--bpkg/archive.cxx165
-rw-r--r--bpkg/archive.hxx11
-rw-r--r--bpkg/argument-grouping.cli94
-rw-r--r--bpkg/auth.cxx152
-rw-r--r--bpkg/auth.hxx29
-rw-r--r--bpkg/bpkg.cli31
-rw-r--r--bpkg/bpkg.cxx494
-rw-r--r--bpkg/bpkg.hxx35
-rw-r--r--bpkg/buildfile90
-rw-r--r--bpkg/cfg-create.cli82
-rw-r--r--bpkg/cfg-create.cxx190
-rw-r--r--bpkg/cfg-create.hxx22
-rw-r--r--bpkg/cfg-info.cli103
-rw-r--r--bpkg/cfg-info.cxx129
-rw-r--r--bpkg/cfg-info.hxx18
-rw-r--r--bpkg/cfg-link.cli83
-rw-r--r--bpkg/cfg-link.cxx321
-rw-r--r--bpkg/cfg-link.hxx40
-rw-r--r--bpkg/cfg-unlink.cli81
-rw-r--r--bpkg/cfg-unlink.cxx292
-rw-r--r--bpkg/cfg-unlink.hxx18
-rw-r--r--bpkg/checksum.cxx2
-rw-r--r--bpkg/checksum.hxx6
-rw-r--r--bpkg/common.cli178
-rw-r--r--bpkg/database.cxx1128
-rw-r--r--bpkg/database.hxx675
-rw-r--r--bpkg/diagnostics.cxx8
-rw-r--r--bpkg/diagnostics.hxx62
-rw-r--r--bpkg/fetch-git.cxx725
-rw-r--r--bpkg/fetch-pkg.cxx110
-rw-r--r--bpkg/fetch.cxx501
-rw-r--r--bpkg/fetch.hxx73
-rw-r--r--bpkg/forward.hxx10
-rw-r--r--bpkg/help.cxx2
-rw-r--r--bpkg/manifest-utility.cxx448
-rw-r--r--bpkg/manifest-utility.hxx115
-rwxr-xr-xbpkg/odb.sh11
-rw-r--r--bpkg/options-types.hxx25
-rw-r--r--bpkg/package-configuration.cxx516
-rw-r--r--bpkg/package-configuration.hxx223
-rw-r--r--bpkg/package-query.cxx726
-rw-r--r--bpkg/package-query.hxx281
-rw-r--r--bpkg/package-skeleton.cxx2892
-rw-r--r--bpkg/package-skeleton.hxx400
-rw-r--r--bpkg/package.cxx699
-rw-r--r--bpkg/package.hxx850
-rw-r--r--bpkg/package.ixx19
-rw-r--r--bpkg/package.xml378
-rw-r--r--bpkg/pkg-bindist.cli908
-rw-r--r--bpkg/pkg-bindist.cxx689
-rw-r--r--bpkg/pkg-bindist.hxx27
-rw-r--r--bpkg/pkg-build-collect.cxx8379
-rw-r--r--bpkg/pkg-build-collect.hxx1882
-rw-r--r--bpkg/pkg-build.cli278
-rw-r--r--bpkg/pkg-build.cxx8766
-rw-r--r--bpkg/pkg-checkout.cxx317
-rw-r--r--bpkg/pkg-checkout.hxx74
-rw-r--r--bpkg/pkg-clean.hxx1
-rw-r--r--bpkg/pkg-command.cxx137
-rw-r--r--bpkg/pkg-command.hxx30
-rw-r--r--bpkg/pkg-configure.cxx950
-rw-r--r--bpkg/pkg-configure.hxx179
-rw-r--r--bpkg/pkg-disfigure.cli14
-rw-r--r--bpkg/pkg-disfigure.cxx166
-rw-r--r--bpkg/pkg-disfigure.hxx15
-rw-r--r--bpkg/pkg-drop.cli40
-rw-r--r--bpkg/pkg-drop.cxx366
-rw-r--r--bpkg/pkg-fetch.cxx161
-rw-r--r--bpkg/pkg-fetch.hxx10
-rw-r--r--bpkg/pkg-install.hxx6
-rw-r--r--bpkg/pkg-purge.cxx21
-rw-r--r--bpkg/pkg-purge.hxx4
-rw-r--r--bpkg/pkg-status.cli149
-rw-r--r--bpkg/pkg-status.cxx547
-rw-r--r--bpkg/pkg-test.hxx1
-rw-r--r--bpkg/pkg-uninstall.hxx5
-rw-r--r--bpkg/pkg-unpack.cxx204
-rw-r--r--bpkg/pkg-unpack.hxx10
-rw-r--r--bpkg/pkg-update.hxx7
-rw-r--r--bpkg/pkg-verify.cli17
-rw-r--r--bpkg/pkg-verify.cxx435
-rw-r--r--bpkg/pkg-verify.hxx67
-rw-r--r--bpkg/pointer-traits.hxx58
-rw-r--r--bpkg/rep-add.cxx8
-rw-r--r--bpkg/rep-add.hxx5
-rw-r--r--bpkg/rep-create.cli14
-rw-r--r--bpkg/rep-create.cxx48
-rw-r--r--bpkg/rep-fetch.cxx708
-rw-r--r--bpkg/rep-fetch.hxx34
-rw-r--r--bpkg/rep-info.cli21
-rw-r--r--bpkg/rep-info.cxx31
-rw-r--r--bpkg/rep-list.cxx2
-rw-r--r--bpkg/rep-mask.cxx368
-rw-r--r--bpkg/rep-mask.hxx73
-rw-r--r--bpkg/rep-remove.cxx143
-rw-r--r--bpkg/rep-remove.hxx17
-rw-r--r--bpkg/repository-signing.cli14
-rw-r--r--bpkg/repository-types.cli4
-rw-r--r--bpkg/satisfaction.cxx46
-rw-r--r--bpkg/satisfaction.hxx23
-rw-r--r--bpkg/satisfaction.test.cxx3
-rw-r--r--bpkg/system-package-manager-archive.cxx794
-rw-r--r--bpkg/system-package-manager-archive.hxx55
-rw-r--r--bpkg/system-package-manager-debian.cxx3616
-rw-r--r--bpkg/system-package-manager-debian.hxx271
-rw-r--r--bpkg/system-package-manager-debian.test.cxx386
-rw-r--r--bpkg/system-package-manager-debian.test.testscript1177
-rw-r--r--bpkg/system-package-manager-fedora.cxx4560
-rw-r--r--bpkg/system-package-manager-fedora.hxx372
-rw-r--r--bpkg/system-package-manager-fedora.test.cxx431
-rw-r--r--bpkg/system-package-manager-fedora.test.testscript1410
-rw-r--r--bpkg/system-package-manager.cxx904
-rw-r--r--bpkg/system-package-manager.hxx463
-rw-r--r--bpkg/system-package-manager.test.cxx160
-rw-r--r--bpkg/system-package-manager.test.hxx112
-rw-r--r--bpkg/system-package-manager.test.testscript158
-rw-r--r--bpkg/system-repository.cxx12
-rw-r--r--bpkg/system-repository.hxx21
-rw-r--r--bpkg/types-parsers.cxx151
-rw-r--r--bpkg/types-parsers.hxx58
-rw-r--r--bpkg/types.hxx149
-rw-r--r--bpkg/utility.cxx164
-rw-r--r--bpkg/utility.hxx126
-rw-r--r--bpkg/utility.txx138
-rw-r--r--bpkg/wrapper-traits.hxx2
-rw-r--r--build/root.build16
-rw-r--r--doc/buildfile3
-rwxr-xr-xdoc/cli.sh32
-rw-r--r--doc/manual.cli2302
m---------doc/style0
-rw-r--r--manifest22
-rw-r--r--repositories.manifest8
-rw-r--r--tests/.gitignore6
-rw-r--r--tests/auth.testscript6
-rwxr-xr-xtests/auth/cert17
-rw-r--r--tests/auth/default-cert-fp2
-rw-r--r--tests/auth/default-cert.pem54
-rw-r--r--tests/auth/mismatch-cert.pem56
-rw-r--r--tests/auth/noemail-cert.pem54
-rw-r--r--tests/auth/self-any-cert.pem56
-rw-r--r--tests/auth/self-cert.pem55
-rw-r--r--tests/auth/subdomain-cert.pem56
-rw-r--r--tests/build/root.build5
-rw-r--r--tests/cfg-create.testscript205
-rw-r--r--tests/cfg-info.testscript176
-rw-r--r--tests/cfg-link.testscript290
-rw-r--r--tests/cfg-unlink.testscript275
l---------tests/cfg-unlink/t7a1
-rw-r--r--tests/common.testscript39
-rw-r--r--tests/common/compatibility/t15/libbar-1.0.0.tar.gzbin0 -> 442 bytes
-rw-r--r--tests/common/compatibility/t15/libbaz-1.0.0.tar.gzbin0 -> 442 bytes
-rw-r--r--tests/common/compatibility/t15/libbiz-1.0.0.tar.gzbin0 -> 422 bytes
-rw-r--r--tests/common/compatibility/t15/libfoo-1.0.0.tar.gzbin0 -> 423 bytes
-rw-r--r--tests/common/compatibility/t15/repositories.manifest1
-rw-r--r--tests/common/dependency-alternatives/t11a/bac-1.0.0.tar.gzbin0 -> 472 bytes
-rw-r--r--tests/common/dependency-alternatives/t11a/bar-0.1.0.tar.gzbin0 -> 462 bytes
-rw-r--r--tests/common/dependency-alternatives/t11a/bar-1.0.0.tar.gzbin0 -> 454 bytes
-rw-r--r--tests/common/dependency-alternatives/t11a/bas-1.0.0.tar.gzbin0 -> 465 bytes
-rw-r--r--tests/common/dependency-alternatives/t11a/bat-1.0.0.tar.gzbin0 -> 452 bytes
-rw-r--r--tests/common/dependency-alternatives/t11a/bax-0.1.0.tar.gzbin0 -> 423 bytes
-rw-r--r--tests/common/dependency-alternatives/t11a/bax-1.0.0.tar.gzbin0 -> 494 bytes
-rw-r--r--tests/common/dependency-alternatives/t11a/baz-0.1.0.tar.gzbin0 -> 421 bytes
-rw-r--r--tests/common/dependency-alternatives/t11a/baz-1.0.0.tar.gzbin0 -> 413 bytes
-rw-r--r--tests/common/dependency-alternatives/t11a/bex-1.0.0.tar.gzbin0 -> 357 bytes
-rw-r--r--tests/common/dependency-alternatives/t11a/bix-1.0.0.tar.gzbin0 -> 414 bytes
-rw-r--r--tests/common/dependency-alternatives/t11a/biz-0.1.0.tar.gzbin0 -> 416 bytes
-rw-r--r--tests/common/dependency-alternatives/t11a/biz-1.0.0.tar.gzbin0 -> 449 bytes
-rw-r--r--tests/common/dependency-alternatives/t11a/boo-1.0.0.tar.gzbin0 -> 460 bytes
-rw-r--r--tests/common/dependency-alternatives/t11a/box-0.1.0.tar.gzbin0 -> 402 bytes
-rw-r--r--tests/common/dependency-alternatives/t11a/box-0.2.0.tar.gzbin0 -> 396 bytes
-rw-r--r--tests/common/dependency-alternatives/t11a/box-1.0.0.tar.gzbin0 -> 415 bytes
-rw-r--r--tests/common/dependency-alternatives/t11a/buc-1.0.0.tar.gzbin0 -> 465 bytes
-rw-r--r--tests/common/dependency-alternatives/t11a/bus-0.1.0.tar.gzbin0 -> 478 bytes
-rw-r--r--tests/common/dependency-alternatives/t11a/bus-1.0.0.tar.gzbin0 -> 464 bytes
-rw-r--r--tests/common/dependency-alternatives/t11a/bux-1.0.0.tar.gzbin0 -> 454 bytes
-rw-r--r--tests/common/dependency-alternatives/t11a/buz-1.0.0.tar.gzbin0 -> 449 bytes
-rw-r--r--tests/common/dependency-alternatives/t11a/dex-1.0.0.tar.gzbin0 -> 465 bytes
-rw-r--r--tests/common/dependency-alternatives/t11a/dix-1.0.0.tar.gzbin0 -> 472 bytes
-rw-r--r--tests/common/dependency-alternatives/t11a/diz-1.0.0.tar.gzbin0 -> 473 bytes
-rw-r--r--tests/common/dependency-alternatives/t11a/dox-1.0.0.tar.gzbin0 -> 445 bytes
-rw-r--r--tests/common/dependency-alternatives/t11a/fex-0.1.0.tar.gzbin0 -> 440 bytes
-rw-r--r--tests/common/dependency-alternatives/t11a/fex-1.0.0.tar.gzbin0 -> 455 bytes
-rw-r--r--tests/common/dependency-alternatives/t11a/fix-0.1.0.tar.gzbin0 -> 421 bytes
-rw-r--r--tests/common/dependency-alternatives/t11a/fix-1.0.0.tar.gzbin0 -> 450 bytes
-rw-r--r--tests/common/dependency-alternatives/t11a/foo-0.1.0.tar.gzbin0 -> 454 bytes
-rw-r--r--tests/common/dependency-alternatives/t11a/foo-0.2.0.tar.gzbin0 -> 464 bytes
-rw-r--r--tests/common/dependency-alternatives/t11a/foo-1.0.0.tar.gzbin0 -> 485 bytes
-rw-r--r--tests/common/dependency-alternatives/t11a/fox-0.1.0.tar.gzbin0 -> 425 bytes
-rw-r--r--tests/common/dependency-alternatives/t11a/fox-0.2.0.tar.gzbin0 -> 457 bytes
-rw-r--r--tests/common/dependency-alternatives/t11a/fox-1.0.0.tar.gzbin0 -> 461 bytes
-rw-r--r--tests/common/dependency-alternatives/t11a/fux-0.1.0.tar.gzbin0 -> 443 bytes
-rw-r--r--tests/common/dependency-alternatives/t11a/fux-0.1.1.tar.gzbin0 -> 443 bytes
-rw-r--r--tests/common/dependency-alternatives/t11a/fux-0.2.0.tar.gzbin0 -> 486 bytes
-rw-r--r--tests/common/dependency-alternatives/t11a/fux-1.0.0.tar.gzbin0 -> 429 bytes
-rw-r--r--tests/common/dependency-alternatives/t11a/libbar-0.1.0.tar.gzbin0 -> 409 bytes
-rw-r--r--tests/common/dependency-alternatives/t11a/libbar-1.0.0.tar.gzbin0 -> 465 bytes
-rw-r--r--tests/common/dependency-alternatives/t11a/libbaz-0.1.0.tar.gzbin0 -> 412 bytes
-rw-r--r--tests/common/dependency-alternatives/t11a/libbaz-1.0.0.tar.gzbin0 -> 490 bytes
-rw-r--r--tests/common/dependency-alternatives/t11a/libbiz-0.1.0.tar.gzbin0 -> 401 bytes
-rw-r--r--tests/common/dependency-alternatives/t11a/libbiz-1.0.0.tar.gzbin0 -> 409 bytes
-rw-r--r--tests/common/dependency-alternatives/t11a/libbox-0.1.0.tar.gzbin0 -> 411 bytes
-rw-r--r--tests/common/dependency-alternatives/t11a/libbox-1.0.0.tar.gzbin0 -> 487 bytes
-rw-r--r--tests/common/dependency-alternatives/t11a/libfoo-0.1.0.tar.gzbin0 -> 411 bytes
-rw-r--r--tests/common/dependency-alternatives/t11a/libfoo-1.0.0.tar.gzbin0 -> 488 bytes
-rw-r--r--tests/common/dependency-alternatives/t11a/repositories.manifest1
-rw-r--r--tests/common/dependency-alternatives/t11a/tax-1.0.0.tar.gzbin0 -> 462 bytes
-rw-r--r--tests/common/dependency-alternatives/t11a/tex-0.1.0.tar.gzbin0 -> 451 bytes
-rw-r--r--tests/common/dependency-alternatives/t11a/tex-0.2.0.tar.gzbin0 -> 462 bytes
-rw-r--r--tests/common/dependency-alternatives/t11a/tex-0.3.0.tar.gzbin0 -> 466 bytes
-rw-r--r--tests/common/dependency-alternatives/t11a/tex-1.0.0.tar.gzbin0 -> 492 bytes
-rw-r--r--tests/common/dependency-alternatives/t11a/tez-0.1.0.tar.gzbin0 -> 465 bytes
-rw-r--r--tests/common/dependency-alternatives/t11a/tez-1.0.0.tar.gzbin0 -> 481 bytes
-rw-r--r--tests/common/dependency-alternatives/t11a/tix-0.1.0.tar.gzbin0 -> 407 bytes
-rw-r--r--tests/common/dependency-alternatives/t11a/tix-1.0.0.tar.gzbin0 -> 460 bytes
-rw-r--r--tests/common/dependency-alternatives/t11a/tiz-1.0.0.tar.gzbin0 -> 496 bytes
-rw-r--r--tests/common/dependency-alternatives/t11a/toz-0.1.0.tar.gzbin0 -> 406 bytes
-rw-r--r--tests/common/dependency-alternatives/t11a/toz-0.2.0.tar.gzbin0 -> 464 bytes
-rw-r--r--tests/common/dependency-alternatives/t11a/toz-1.0.0.tar.gzbin0 -> 482 bytes
-rw-r--r--tests/common/dependency-alternatives/t11a/tux-1.0.0.tar.gzbin0 -> 469 bytes
-rw-r--r--tests/common/dependency-alternatives/t11a/tuz-1.0.0.tar.gzbin0 -> 440 bytes
-rw-r--r--tests/common/dependency-alternatives/t11a/tvz-0.1.0.tar.gzbin0 -> 475 bytes
-rw-r--r--tests/common/dependency-alternatives/t11a/tvz-1.0.0.tar.gzbin0 -> 451 bytes
-rw-r--r--tests/common/dependency-alternatives/t13a/bar-1.0.0.tar.gzbin0 -> 530 bytes
-rw-r--r--tests/common/dependency-alternatives/t13a/baz-1.0.0.tar.gzbin0 -> 527 bytes
-rw-r--r--tests/common/dependency-alternatives/t13a/biz-1.0.0.tar.gzbin0 -> 450 bytes
-rw-r--r--tests/common/dependency-alternatives/t13a/box-1.0.0.tar.gzbin0 -> 505 bytes
-rw-r--r--tests/common/dependency-alternatives/t13a/liba-1.0.0.tar.gzbin0 -> 456 bytes
-rw-r--r--tests/common/dependency-alternatives/t13a/repositories.manifest1
-rw-r--r--tests/common/dependency-alternatives/t13b/bar-1.0.0.tar.gzbin0 -> 439 bytes
-rw-r--r--tests/common/dependency-alternatives/t13b/baz-1.0.0.tar.gzbin0 -> 439 bytes
-rw-r--r--tests/common/dependency-alternatives/t13b/biz-1.0.0.tar.gzbin0 -> 399 bytes
-rw-r--r--tests/common/dependency-alternatives/t13b/liba-1.0.0.tar.gzbin0 -> 431 bytes
-rw-r--r--tests/common/dependency-alternatives/t13b/repositories.manifest1
-rw-r--r--tests/common/dependency-alternatives/t13c/bar-1.0.0.tar.gzbin0 -> 468 bytes
-rw-r--r--tests/common/dependency-alternatives/t13c/baz-1.0.0.tar.gzbin0 -> 403 bytes
-rw-r--r--tests/common/dependency-alternatives/t13c/liba-1.0.0.tar.gzbin0 -> 401 bytes
-rw-r--r--tests/common/dependency-alternatives/t13c/repositories.manifest1
-rw-r--r--tests/common/dependency-alternatives/t13d/bar-1.0.0.tar.gzbin0 -> 539 bytes
-rw-r--r--tests/common/dependency-alternatives/t13d/baz-1.0.0.tar.gzbin0 -> 407 bytes
-rw-r--r--tests/common/dependency-alternatives/t13d/liba-1.0.0.tar.gzbin0 -> 401 bytes
-rw-r--r--tests/common/dependency-alternatives/t13d/libb-1.0.0.tar.gzbin0 -> 347 bytes
-rw-r--r--tests/common/dependency-alternatives/t13d/repositories.manifest1
-rw-r--r--tests/common/dependency-alternatives/t13e/bar-1.0.0.tar.gzbin0 -> 443 bytes
-rw-r--r--tests/common/dependency-alternatives/t13e/baz-1.0.0.tar.gzbin0 -> 448 bytes
-rw-r--r--tests/common/dependency-alternatives/t13e/biz-1.0.0.tar.gzbin0 -> 395 bytes
-rw-r--r--tests/common/dependency-alternatives/t13e/liba-1.0.0.tar.gzbin0 -> 422 bytes
-rw-r--r--tests/common/dependency-alternatives/t13e/repositories.manifest1
-rw-r--r--tests/common/dependency-alternatives/t13f/bar-1.0.0.tar.gzbin0 -> 511 bytes
-rw-r--r--tests/common/dependency-alternatives/t13f/baz-1.0.0.tar.gzbin0 -> 458 bytes
-rw-r--r--tests/common/dependency-alternatives/t13f/liba-1.0.0.tar.gzbin0 -> 421 bytes
-rw-r--r--tests/common/dependency-alternatives/t13f/repositories.manifest1
-rw-r--r--tests/common/dependency-alternatives/t13g/bar-1.0.0.tar.gzbin0 -> 533 bytes
-rw-r--r--tests/common/dependency-alternatives/t13g/baz-1.0.0.tar.gzbin0 -> 497 bytes
-rw-r--r--tests/common/dependency-alternatives/t13g/biz-1.0.0.tar.gzbin0 -> 538 bytes
-rw-r--r--tests/common/dependency-alternatives/t13g/box-1.0.0.tar.gzbin0 -> 535 bytes
-rw-r--r--tests/common/dependency-alternatives/t13g/liba-1.0.0.tar.gzbin0 -> 402 bytes
-rw-r--r--tests/common/dependency-alternatives/t13g/libb-1.0.0.tar.gzbin0 -> 347 bytes
-rw-r--r--tests/common/dependency-alternatives/t13g/repositories.manifest1
-rw-r--r--tests/common/dependency-alternatives/t13h/bar-1.0.0.tar.gzbin0 -> 492 bytes
-rw-r--r--tests/common/dependency-alternatives/t13h/baz-1.0.0.tar.gzbin0 -> 416 bytes
-rw-r--r--tests/common/dependency-alternatives/t13h/liba-1.0.0.tar.gzbin0 -> 421 bytes
-rw-r--r--tests/common/dependency-alternatives/t13h/repositories.manifest1
-rw-r--r--tests/common/dependency-alternatives/t13i/bar-1.0.0.tar.gzbin0 -> 481 bytes
-rw-r--r--tests/common/dependency-alternatives/t13i/baz-1.0.0.tar.gzbin0 -> 501 bytes
-rw-r--r--tests/common/dependency-alternatives/t13i/liba-1.0.0.tar.gzbin0 -> 421 bytes
-rw-r--r--tests/common/dependency-alternatives/t13i/repositories.manifest1
-rw-r--r--tests/common/dependency-alternatives/t13j/bar-1.0.0.tar.gzbin0 -> 508 bytes
-rw-r--r--tests/common/dependency-alternatives/t13j/baz-1.0.0.tar.gzbin0 -> 398 bytes
-rw-r--r--tests/common/dependency-alternatives/t13j/biz-1.0.0.tar.gzbin0 -> 503 bytes
-rw-r--r--tests/common/dependency-alternatives/t13j/liba-1.0.0.tar.gzbin0 -> 494 bytes
-rw-r--r--tests/common/dependency-alternatives/t13j/repositories.manifest1
-rw-r--r--tests/common/dependency-alternatives/t13k/bar-1.0.0.tar.gzbin0 -> 505 bytes
-rw-r--r--tests/common/dependency-alternatives/t13k/baz-1.0.0.tar.gzbin0 -> 417 bytes
-rw-r--r--tests/common/dependency-alternatives/t13k/liba-1.0.0.tar.gzbin0 -> 421 bytes
-rw-r--r--tests/common/dependency-alternatives/t13k/repositories.manifest1
-rw-r--r--tests/common/dependency-alternatives/t13l/bar-1.0.0.tar.gzbin0 -> 433 bytes
-rw-r--r--tests/common/dependency-alternatives/t13l/baz-1.0.0.tar.gzbin0 -> 439 bytes
-rw-r--r--tests/common/dependency-alternatives/t13l/liba-1.0.0.tar.gzbin0 -> 404 bytes
-rw-r--r--tests/common/dependency-alternatives/t13l/libb-1.0.0.tar.gzbin0 -> 354 bytes
-rw-r--r--tests/common/dependency-alternatives/t13l/repositories.manifest1
-rw-r--r--tests/common/dependency-alternatives/t13m/bar-1.0.0.tar.gzbin0 -> 401 bytes
-rw-r--r--tests/common/dependency-alternatives/t13m/baz-1.0.0.tar.gzbin0 -> 531 bytes
-rw-r--r--tests/common/dependency-alternatives/t13m/bix-1.0.0.tar.gzbin0 -> 407 bytes
-rw-r--r--tests/common/dependency-alternatives/t13m/biz-1.0.0.tar.gzbin0 -> 411 bytes
-rw-r--r--tests/common/dependency-alternatives/t13m/box-1.0.0.tar.gzbin0 -> 414 bytes
-rw-r--r--tests/common/dependency-alternatives/t13m/liba-1.0.0.tar.gzbin0 -> 422 bytes
-rw-r--r--tests/common/dependency-alternatives/t13m/repositories.manifest1
-rw-r--r--tests/common/dependency-alternatives/t13n/bar-1.0.0.tar.gzbin0 -> 482 bytes
-rw-r--r--tests/common/dependency-alternatives/t13n/liba-1.0.0.tar.gzbin0 -> 351 bytes
-rw-r--r--tests/common/dependency-alternatives/t13n/libb-1.0.0.tar.gzbin0 -> 354 bytes
-rw-r--r--tests/common/dependency-alternatives/t13n/repositories.manifest1
-rw-r--r--tests/common/dependency-alternatives/t13o/bar-1.0.0.tar.gzbin0 -> 497 bytes
-rw-r--r--tests/common/dependency-alternatives/t13o/baz-1.0.0.tar.gzbin0 -> 398 bytes
-rw-r--r--tests/common/dependency-alternatives/t13o/bix-1.0.0.tar.gzbin0 -> 406 bytes
-rw-r--r--tests/common/dependency-alternatives/t13o/biz-1.0.0.tar.gzbin0 -> 407 bytes
-rw-r--r--tests/common/dependency-alternatives/t13o/liba-1.0.0.tar.gzbin0 -> 351 bytes
-rw-r--r--tests/common/dependency-alternatives/t13o/repositories.manifest1
-rw-r--r--tests/common/dependency-alternatives/t8a/bar-1.0.0.tar.gzbin0 -> 360 bytes
-rw-r--r--tests/common/dependency-alternatives/t8a/bax-0.1.0.tar.gzbin0 -> 437 bytes
-rw-r--r--tests/common/dependency-alternatives/t8a/bax-1.0.0.tar.gzbin0 -> 446 bytes
-rw-r--r--tests/common/dependency-alternatives/t8a/baz-1.0.0.tar.gzbin0 -> 357 bytes
-rw-r--r--tests/common/dependency-alternatives/t8a/bix-0.1.0.tar.gzbin0 -> 350 bytes
-rw-r--r--tests/common/dependency-alternatives/t8a/bix-1.0.0.tar.gzbin0 -> 438 bytes
-rw-r--r--tests/common/dependency-alternatives/t8a/box-1.0.0.tar.gzbin0 -> 479 bytes
-rw-r--r--tests/common/dependency-alternatives/t8a/bux-1.0.0.tar.gzbin0 -> 359 bytes
-rw-r--r--tests/common/dependency-alternatives/t8a/dax-1.0.0.tar.gzbin0 -> 470 bytes
-rw-r--r--tests/common/dependency-alternatives/t8a/dix-0.1.0.tar.gzbin0 -> 348 bytes
-rw-r--r--tests/common/dependency-alternatives/t8a/dix-1.0.0.tar.gzbin0 -> 395 bytes
-rw-r--r--tests/common/dependency-alternatives/t8a/dox-1.0.0.tar.gzbin0 -> 351 bytes
-rw-r--r--tests/common/dependency-alternatives/t8a/dux-1.0.0.tar.gzbin0 -> 353 bytes
-rw-r--r--tests/common/dependency-alternatives/t8a/fax-1.0.0.tar.gzbin0 -> 584 bytes
-rw-r--r--tests/common/dependency-alternatives/t8a/fix-1.0.0.tar.gzbin0 -> 438 bytes
-rw-r--r--tests/common/dependency-alternatives/t8a/foo-1.0.0.tar.gzbin0 -> 373 bytes
-rw-r--r--tests/common/dependency-alternatives/t8a/fox-1.0.0.tar.gzbin0 -> 452 bytes
-rw-r--r--tests/common/dependency-alternatives/t8a/foz-1.0.0.tar.gzbin0 -> 353 bytes
-rw-r--r--tests/common/dependency-alternatives/t8a/fux-1.0.0.tar.gzbin0 -> 466 bytes
-rw-r--r--tests/common/dependency-alternatives/t8a/fuz-1.0.0.tar.gzbin0 -> 412 bytes
-rw-r--r--tests/common/dependency-alternatives/t8a/libbar-1.0.0.tar.gzbin0 -> 411 bytes
-rw-r--r--tests/common/dependency-alternatives/t8a/libbaz-1.0.0.tar.gzbin0 -> 416 bytes
-rw-r--r--tests/common/dependency-alternatives/t8a/libbaz-1.1.0.tar.gzbin0 -> 415 bytes
-rw-r--r--tests/common/dependency-alternatives/t8a/libbiz-0.1.0.tar.gzbin0 -> 350 bytes
-rw-r--r--tests/common/dependency-alternatives/t8a/libbiz-1.0.0.tar.gzbin0 -> 347 bytes
-rw-r--r--tests/common/dependency-alternatives/t8a/libbox-0.1.0.tar.gzbin0 -> 352 bytes
-rw-r--r--tests/common/dependency-alternatives/t8a/libbox-0.1.1.tar.gzbin0 -> 349 bytes
-rw-r--r--tests/common/dependency-alternatives/t8a/libbox-1.0.0.tar.gzbin0 -> 348 bytes
-rw-r--r--tests/common/dependency-alternatives/t8a/libfoo-1.0.0.tar.gzbin0 -> 407 bytes
-rw-r--r--tests/common/dependency-alternatives/t8a/libfoo-2.0.0.tar.gzbin0 -> 406 bytes
-rw-r--r--tests/common/dependency-alternatives/t8a/libfox-1.0.0.tar.gzbin0 -> 398 bytes
-rw-r--r--tests/common/dependency-alternatives/t8a/repositories.manifest1
-rw-r--r--tests/common/dependency-alternatives/t8a/tax-1.0.0.tar.gzbin0 -> 373 bytes
-rw-r--r--tests/common/dependency-alternatives/t8a/tex-1.0.0.tar.gzbin0 -> 470 bytes
-rw-r--r--tests/common/dependency-alternatives/t8a/tix-1.0.0.tar.gzbin0 -> 425 bytes
-rw-r--r--tests/common/dependency-alternatives/t8a/tox-1.0.0.tar.gzbin0 -> 504 bytes
-rw-r--r--tests/common/dependency-alternatives/t8a/tpx-1.0.0.tar.gzbin0 -> 496 bytes
-rw-r--r--tests/common/dependency-alternatives/t8a/tux-1.0.0.tar.gzbin0 -> 431 bytes
-rw-r--r--tests/common/dependency-alternatives/t8a/tvx-1.0.0.tar.gzbin0 -> 482 bytes
-rw-r--r--tests/common/dependency-alternatives/t8a/twx-1.0.0.tar.gzbin0 -> 431 bytes
-rw-r--r--tests/common/foo-1.tar.gzbin261 -> 332 bytes
-rw-r--r--tests/common/git/README2
-rwxr-xr-xtests/common/git/init42
-rw-r--r--tests/common/git/state0/libbar.tarbin184320 -> 194560 bytes
-rw-r--r--tests/common/git/state0/libfoo.tarbin450560 -> 491520 bytes
-rw-r--r--tests/common/git/state0/libfox.tarbin245760 -> 276480 bytes
-rw-r--r--tests/common/git/state0/links.tarbin276480 -> 296960 bytes
-rw-r--r--tests/common/git/state0/style-basic.tarbin81920 -> 81920 bytes
-rw-r--r--tests/common/git/state0/style.tarbin143360 -> 153600 bytes
-rw-r--r--tests/common/git/state1/libbaz.tarbin61440 -> 71680 bytes
-rw-r--r--tests/common/git/state1/libfoo.tarbin512000 -> 552960 bytes
-rw-r--r--tests/common/git/state1/libfox.tarbin245760 -> 276480 bytes
-rw-r--r--tests/common/git/state1/style-basic.tarbin81920 -> 81920 bytes
-rw-r--r--tests/common/git/state1/style.tarbin143360 -> 163840 bytes
-rw-r--r--tests/common/hello/libhello-1.0.0.tar.gzbin2448 -> 2458 bytes
-rw-r--r--tests/common/libhello-1.0.0/manifest2
-rw-r--r--tests/common/linked/t7a/foo-1.0.0.tar.gzbin0 -> 476 bytes
-rw-r--r--tests/common/linked/t7a/libbar-1.0.0.tar.gzbin0 -> 459 bytes
-rw-r--r--tests/common/linked/t7a/libbaz-1.0.0.tar.gzbin0 -> 466 bytes
-rw-r--r--tests/common/linked/t7a/libbix-1.0.0.tar.gzbin0 -> 371 bytes
-rw-r--r--tests/common/linked/t7a/libbiz-1.0.0.tar.gzbin0 -> 431 bytes
-rw-r--r--tests/common/linked/t7a/libbox-1.0.0.tar.gzbin0 -> 459 bytes
-rw-r--r--tests/common/linked/t7a/libbuild2-bar-1.0.0.tar.gzbin0 -> 406 bytes
-rw-r--r--tests/common/linked/t7a/libbuild2-foo-1.0.0.tar.gzbin0 -> 433 bytes
-rw-r--r--tests/common/linked/t7a/libbuz-1.0.0.tar.gzbin0 -> 414 bytes
-rw-r--r--tests/common/linked/t7a/libfax-1.0.0.tar.gzbin0 -> 409 bytes
-rw-r--r--tests/common/linked/t7a/libfix-1.0.0.tar.gzbin0 -> 424 bytes
-rw-r--r--tests/common/linked/t7a/repositories.manifest1
-rw-r--r--tests/common/linked/t7b/foo-1.1.0.tar.gzbin0 -> 438 bytes
-rw-r--r--tests/common/linked/t7b/libbar-1.1.0.tar.gzbin0 -> 457 bytes
-rw-r--r--tests/common/linked/t7b/libbaz-1.1.0.tar.gzbin0 -> 413 bytes
-rw-r--r--tests/common/linked/t7b/libbox-1.1.0.tar.gzbin0 -> 444 bytes
-rw-r--r--tests/common/linked/t7b/repositories.manifest4
-rw-r--r--tests/common/prereq-cycle/extra/libbar-1.1.0+1.tar.gzbin243 -> 321 bytes
-rw-r--r--tests/common/prereq-cycle/math/libbar-1.0.0.tar.gzbin241 -> 319 bytes
-rw-r--r--tests/common/prereq-cycle/stable/libfoo-1.0.0.tar.gzbin240 -> 320 bytes
-rw-r--r--tests/common/satisfy/libbar-0.1.0.tar.gzbin0 -> 406 bytes
-rw-r--r--tests/common/satisfy/libbar-1.0.0.tar.gzbin356 -> 418 bytes
-rw-r--r--tests/common/satisfy/libbar-1.1.0.tar.gzbin368 -> 445 bytes
-rw-r--r--tests/common/satisfy/libbar-1.2.0.tar.gzbin348 -> 485 bytes
-rw-r--r--tests/common/satisfy/libbar-2.1.0.tar.gzbin0 -> 414 bytes
-rw-r--r--tests/common/satisfy/libbax-1.0.0.tar.gzbin0 -> 347 bytes
-rw-r--r--tests/common/satisfy/libbax-2.0.0.tar.gzbin0 -> 350 bytes
-rw-r--r--tests/common/satisfy/libbaz-1.1.0.tar.gzbin363 -> 400 bytes
-rw-r--r--tests/common/satisfy/libbaz-1.2.0.tar.gzbin0 -> 386 bytes
-rw-r--r--tests/common/satisfy/libbaz-2.0.0.tar.gzbin0 -> 388 bytes
-rw-r--r--tests/common/satisfy/libbaz-2.1.0.tar.gzbin0 -> 385 bytes
-rw-r--r--tests/common/satisfy/libbix-1.0.0.tar.gzbin0 -> 360 bytes
-rw-r--r--tests/common/satisfy/libbix-2.0.0.tar.gzbin0 -> 363 bytes
-rw-r--r--tests/common/satisfy/libbox-1.0.0.tar.gzbin0 -> 361 bytes
-rw-r--r--tests/common/satisfy/libbox-1.2.0.tar.gzbin0 -> 421 bytes
-rw-r--r--tests/common/satisfy/libbox-2.0.0.tar.gzbin0 -> 367 bytes
-rw-r--r--tests/common/satisfy/libbux-1.0.0.tar.gzbin0 -> 351 bytes
-rw-r--r--tests/common/satisfy/libfix-1.0.0.tar.gzbin0 -> 364 bytes
-rw-r--r--tests/common/satisfy/libfoo-0.1.0.tar.gzbin0 -> 355 bytes
-rw-r--r--tests/common/satisfy/libfoo-1.0.0.tar.gzbin369 -> 423 bytes
-rw-r--r--tests/common/satisfy/libfoo-1.1.0+1.tar.gzbin353 -> 349 bytes
-rw-r--r--tests/common/satisfy/libfoo-1.1.0+2.tar.gzbin0 -> 348 bytes
-rw-r--r--tests/common/satisfy/libfoo-1.1.0+3.tar.gzbin0 -> 347 bytes
-rw-r--r--tests/common/satisfy/libfoo-1.1.0.tar.gzbin349 -> 400 bytes
-rw-r--r--tests/common/satisfy/libfoo-1.1.1.tar.gzbin0 -> 404 bytes
-rw-r--r--tests/common/satisfy/libfoo-2.0.0.tar.gzbin0 -> 368 bytes
-rw-r--r--tests/common/satisfy/libfoo-3.0.0.tar.gzbin0 -> 416 bytes
-rw-r--r--tests/common/satisfy/libfox-1.1.0.tar.gzbin0 -> 370 bytes
-rw-r--r--tests/common/satisfy/libfox-2.0.0.tar.gzbin0 -> 367 bytes
-rw-r--r--tests/common/satisfy/libfox-2.1.0.tar.gzbin0 -> 374 bytes
-rw-r--r--tests/common/satisfy/libfox-3.0.0.tar.gzbin0 -> 379 bytes
-rw-r--r--tests/common/satisfy/t10/libbar-baz-1.0.0.tar.gzbin0 -> 399 bytes
-rw-r--r--tests/common/satisfy/t10/libbar-foo-1.0.0.tar.gzbin0 -> 401 bytes
-rw-r--r--tests/common/satisfy/t10/libbar-tests-1.0.0.tar.gzbin0 -> 632 bytes
-rw-r--r--tests/common/satisfy/t10/libfoo-bar-1.0.0.tar.gzbin0 -> 376 bytes
-rw-r--r--tests/common/satisfy/t10/libfoo-baz-1.0.0.tar.gzbin0 -> 376 bytes
-rw-r--r--tests/common/satisfy/t10/libfoo-tests-1.0.0.tar.gzbin0 -> 492 bytes
l---------tests/common/satisfy/t10/repositories.manifest1
-rw-r--r--tests/common/satisfy/t12a/libbar-0.1.0.tar.gzbin0 -> 386 bytes
-rw-r--r--tests/common/satisfy/t12a/libbaz-1.0.0.tar.gzbin0 -> 442 bytes
-rw-r--r--tests/common/satisfy/t12a/repositories.manifest1
-rw-r--r--tests/common/satisfy/t12b/bar-1.0.0.tar.gzbin0 -> 372 bytes
-rw-r--r--tests/common/satisfy/t12b/baz-0.1.0.tar.gzbin0 -> 359 bytes
-rw-r--r--tests/common/satisfy/t12b/baz-1.0.0.tar.gzbin0 -> 366 bytes
-rw-r--r--tests/common/satisfy/t12b/foo-0.1.0.tar.gzbin0 -> 357 bytes
-rw-r--r--tests/common/satisfy/t12b/foo-1.0.0.tar.gzbin0 -> 368 bytes
-rw-r--r--tests/common/satisfy/t12b/libbar-1.0.0.tar.gzbin0 -> 393 bytes
-rw-r--r--tests/common/satisfy/t12b/libbaz-0.1.0.tar.gzbin0 -> 443 bytes
-rw-r--r--tests/common/satisfy/t12b/repositories.manifest4
l---------tests/common/satisfy/t14a/libfoo-1.0.0.tar.gz1
l---------tests/common/satisfy/t14a/repositories.manifest1
l---------tests/common/satisfy/t14b/libfoo-1.1.0.tar.gz1
l---------tests/common/satisfy/t14b/repositories.manifest1
l---------tests/common/satisfy/t14c/libfoo-1.1.0+1.tar.gz1
l---------tests/common/satisfy/t14c/repositories.manifest1
l---------tests/common/satisfy/t14d/libfoo-1.1.0+2.tar.gz1
l---------tests/common/satisfy/t14d/repositories.manifest1
l---------tests/common/satisfy/t14e/libfoo-1.1.0+3.tar.gz1
l---------tests/common/satisfy/t14e/repositories.manifest1
l---------tests/common/satisfy/t14f/libfoo-1.1.1.tar.gz1
l---------tests/common/satisfy/t14f/repositories.manifest1
l---------tests/common/satisfy/t14i/libfoo-1.2.0.tar.gz1
l---------tests/common/satisfy/t14i/repositories.manifest1
l---------tests/common/satisfy/t2/libfoo-0.1.0.tar.gz1
l---------tests/common/satisfy/t4f/libbar-1.2.0.tar.gz1
l---------tests/common/satisfy/t4f/libbar-2.1.0.tar.gz1
l---------tests/common/satisfy/t4f/libbax-1.0.0.tar.gz1
l---------tests/common/satisfy/t4f/libbax-2.0.0.tar.gz1
l---------tests/common/satisfy/t4f/libbix-1.0.0.tar.gz1
l---------tests/common/satisfy/t4f/libbix-2.0.0.tar.gz1
l---------tests/common/satisfy/t4f/libbox-1.0.0.tar.gz1
l---------tests/common/satisfy/t4f/libbox-2.0.0.tar.gz1
l---------tests/common/satisfy/t4f/libbux-1.0.0.tar.gz1
l---------tests/common/satisfy/t4f/libfix-1.0.0.tar.gz1
l---------tests/common/satisfy/t4f/libfoo-2.0.0.tar.gz1
l---------tests/common/satisfy/t4f/libfox-1.1.0.tar.gz1
l---------tests/common/satisfy/t4f/libfox-2.0.0.tar.gz1
-rw-r--r--tests/common/satisfy/t4f/repositories.manifest1
l---------tests/common/satisfy/t4i/libbar-0.1.0.tar.gz1
l---------tests/common/satisfy/t4i/libbaz-2.0.0.tar.gz1
-rw-r--r--tests/common/satisfy/t4i/repositories.manifest1
l---------tests/common/satisfy/t4j/libbar-0.1.0.tar.gz1
l---------tests/common/satisfy/t4j/libbar-1.2.0.tar.gz1
l---------tests/common/satisfy/t4j/libbaz-1.2.0.tar.gz1
l---------tests/common/satisfy/t4j/libbaz-2.1.0.tar.gz1
l---------tests/common/satisfy/t4j/libfix-1.0.0.tar.gz1
l---------tests/common/satisfy/t4j/libfoo-3.0.0.tar.gz1
l---------tests/common/satisfy/t4j/libfox-0.0.1.tar.gz1
l---------tests/common/satisfy/t4j/libfox-2.1.0.tar.gz1
l---------tests/common/satisfy/t4j/libfox-3.0.0.tar.gz1
-rw-r--r--tests/common/satisfy/t4j/repositories.manifest1
-rw-r--r--tests/common/satisfy/t4k/libbar-1.0.0.tar.gzbin0 -> 364 bytes
-rw-r--r--tests/common/satisfy/t4k/libbaz-1.0.0.tar.gzbin0 -> 355 bytes
-rw-r--r--tests/common/satisfy/t4k/libfax-1.0.0.tar.gzbin0 -> 365 bytes
-rw-r--r--tests/common/satisfy/t4k/libfax-2.0.0.tar.gzbin0 -> 364 bytes
-rw-r--r--tests/common/satisfy/t4k/libfaz-1.0.0.tar.gzbin0 -> 360 bytes
-rw-r--r--tests/common/satisfy/t4k/libfaz-2.0.0.tar.gzbin0 -> 360 bytes
-rw-r--r--tests/common/satisfy/t4k/libfex-1.0.0.tar.gzbin0 -> 362 bytes
-rw-r--r--tests/common/satisfy/t4k/libfex-2.0.0.tar.gzbin0 -> 360 bytes
-rw-r--r--tests/common/satisfy/t4k/libfix-1.0.0.tar.gzbin0 -> 360 bytes
-rw-r--r--tests/common/satisfy/t4k/libfix-2.0.0.tar.gzbin0 -> 361 bytes
-rw-r--r--tests/common/satisfy/t4k/libfoo-1.0.0.tar.gzbin0 -> 359 bytes
-rw-r--r--tests/common/satisfy/t4k/libfoo-2.0.0.tar.gzbin0 -> 360 bytes
-rw-r--r--tests/common/satisfy/t4k/libfox-1.0.0.tar.gzbin0 -> 361 bytes
-rw-r--r--tests/common/satisfy/t4k/libfox-1.2.0.tar.gzbin0 -> 376 bytes
-rw-r--r--tests/common/satisfy/t4k/libfox-2.0.0.tar.gzbin0 -> 362 bytes
-rw-r--r--tests/common/satisfy/t4k/libfux-1.0.0.tar.gzbin0 -> 371 bytes
-rw-r--r--tests/common/satisfy/t4k/libfux-2.0.0.tar.gzbin0 -> 371 bytes
-rw-r--r--tests/common/satisfy/t4k/libfuz-1.0.0.tar.gzbin0 -> 349 bytes
-rw-r--r--tests/common/satisfy/t4k/libfuz-2.0.0.tar.gzbin0 -> 348 bytes
-rw-r--r--tests/common/satisfy/t4k/repositories.manifest1
l---------tests/common/satisfy/t5/libbox-1.2.0.tar.gz1
-rw-r--r--tests/common/satisfy/t9/foo-1.0.0.tar.gzbin0 -> 363 bytes
-rw-r--r--tests/common/satisfy/t9/libbar-1.0.0.tar.gzbin0 -> 353 bytes
-rw-r--r--tests/common/satisfy/t9/libbaz-1.0.0.tar.gzbin0 -> 362 bytes
-rw-r--r--tests/common/satisfy/t9/libbox-1.0.0.tar.gzbin0 -> 364 bytes
l---------tests/common/satisfy/t9/repositories.manifest1
-rw-r--r--tests/config.testscript7
-rw-r--r--tests/pkg-build.testscript30706
l---------tests/pkg-build/libbar-0.0.3.tar.gz1
l---------tests/pkg-build/libbar-1.1.0.tar.gz1
l---------tests/pkg-build/libbar-1.2.0.tar.gz1
l---------tests/pkg-build/t101
l---------tests/pkg-build/t11a1
l---------tests/pkg-build/t12a1
l---------tests/pkg-build/t12b1
l---------tests/pkg-build/t13a1
l---------tests/pkg-build/t13b1
l---------tests/pkg-build/t13c1
l---------tests/pkg-build/t13d1
l---------tests/pkg-build/t13e1
l---------tests/pkg-build/t13f1
l---------tests/pkg-build/t13g1
l---------tests/pkg-build/t13h1
l---------tests/pkg-build/t13i1
l---------tests/pkg-build/t13j1
l---------tests/pkg-build/t13k1
l---------tests/pkg-build/t13l1
l---------tests/pkg-build/t13m1
l---------tests/pkg-build/t13n1
l---------tests/pkg-build/t13o1
l---------tests/pkg-build/t14a1
l---------tests/pkg-build/t14b1
l---------tests/pkg-build/t14c1
l---------tests/pkg-build/t14d1
l---------tests/pkg-build/t14e1
l---------tests/pkg-build/t14f1
l---------tests/pkg-build/t14i1
l---------tests/pkg-build/t151
l---------tests/pkg-build/t4f1
l---------tests/pkg-build/t4i1
l---------tests/pkg-build/t4j1
l---------tests/pkg-build/t4k1
l---------tests/pkg-build/t7a1
l---------tests/pkg-build/t7b1
l---------tests/pkg-build/t8a1
l---------tests/pkg-build/t91
-rw-r--r--tests/pkg-checkout.testscript24
-rw-r--r--tests/pkg-clean.testscript2
-rw-r--r--tests/pkg-configure.testscript85
l---------tests/pkg-configure/t8a1
-rw-r--r--tests/pkg-drop.testscript419
l---------tests/pkg-drop/t7a1
-rw-r--r--tests/pkg-fetch.testscript9
-rw-r--r--tests/pkg-status.testscript339
-rw-r--r--tests/pkg-status/extra/libbar-1.1.0+1.tar.gzbin243 -> 322 bytes
-rw-r--r--tests/pkg-status/stable/libbar-1.0.0.tar.gzbin241 -> 323 bytes
-rw-r--r--tests/pkg-status/stable/libfoo-1.0.0.tar.gzbin240 -> 323 bytes
-rw-r--r--tests/pkg-status/testing/libbar-1.0.0+1.tar.gzbin243 -> 363 bytes
-rw-r--r--tests/pkg-status/testing/libbar-1.1.0.tar.gzbin242 -> 373 bytes
-rw-r--r--tests/pkg-status/testing/libbaz-1.0.0.tar.gzbin0 -> 360 bytes
-rw-r--r--tests/pkg-status/unstable/libbar-2.0.0.tar.gzbin245 -> 322 bytes
-rw-r--r--tests/pkg-system.testscript64
-rw-r--r--tests/pkg-system/libbar-0+1.tar.gzbin239 -> 319 bytes
-rw-r--r--tests/pkg-update.testscript2
-rw-r--r--tests/pkg-verify.testscript61
-rw-r--r--tests/pkg-verify/foo-2.tar.gzbin437 -> 509 bytes
-rw-r--r--tests/pkg-verify/foo-3.tar.gzbin0 -> 412 bytes
l---------tests/pkg-verify/libbaz-1.0.0.tar.gz1
-rw-r--r--tests/remote-git.testscript9
-rw-r--r--tests/rep-auth.testscript18
-rw-r--r--tests/rep-auth/expired/packages.manifest2
-rw-r--r--tests/rep-auth/expired/repositories.manifest2
-rw-r--r--tests/rep-auth/expired/signature.manifest22
-rw-r--r--tests/rep-create.testscript44
-rw-r--r--tests/rep-create/stable/bar-1.tar.gzbin269 -> 345 bytes
-rw-r--r--tests/rep-create/testing/foo-2.tar.gzbin277 -> 356 bytes
-rw-r--r--tests/rep-fetch-git-refname.testscript6
-rw-r--r--tests/rep-fetch.testscript11
-rw-r--r--tests/rep-fetch/no-cycle/extra/libbar-1.1.0+1.tar.gzbin243 -> 323 bytes
-rw-r--r--tests/rep-fetch/no-cycle/math/libbar-1.0.0.tar.gzbin241 -> 324 bytes
-rw-r--r--tests/rep-fetch/no-cycle/stable/libfoo-1.0.0.tar.gzbin240 -> 320 bytes
-rw-r--r--tests/rep-info.testscript115
l---------tests/rep-info/t151
-rw-r--r--tests/rep-list.testscript2
-rw-r--r--tests/rep-list/extra/libbar-1.1.0+1.tar.gzbin243 -> 323 bytes
-rw-r--r--tests/rep-list/math/libbar-1.0.0.tar.gzbin241 -> 324 bytes
-rw-r--r--tests/rep-list/stable/libfoo-1.0.0.tar.gzbin240 -> 321 bytes
-rw-r--r--tests/rep-list/testing/libbar-2.0.0.tar.gzbin245 -> 323 bytes
-rw-r--r--tests/rep-remove.testscript2
-rw-r--r--tests/rep-remove/alpha/libbar-2.0.0.tar.gzbin245 -> 323 bytes
l---------[-rw-r--r--]tests/rep-remove/testing/libbar-2.0.0.tar.gzbin245 -> 28 bytes
578 files changed, 85985 insertions, 5874 deletions
diff --git a/.gitignore b/.gitignore
index c3de2e7..5046596 100644
--- a/.gitignore
+++ b/.gitignore
@@ -5,10 +5,16 @@
*.d
*.t
*.i
+*.i.*
*.ii
+*.ii.*
*.o
*.obj
+*.gcm
+*.pcm
+*.ifc
*.so
+*.dylib
*.dll
*.a
*.lib
diff --git a/LICENSE b/LICENSE
index a83cdb1..5a25163 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,6 +1,6 @@
MIT License
-Copyright (c) 2014-2021 the build2 authors (see the AUTHORS and LEGAL files).
+Copyright (c) 2014-2024 the build2 authors (see the AUTHORS and LEGAL files).
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
diff --git a/NEWS b/NEWS
index c951450..24f2673 100644
--- a/NEWS
+++ b/NEWS
@@ -1,3 +1,290 @@
+Version 0.16.0
+
+ * System package manager query/installation support for Debian and Fedora
+ (and alike).
+
+ The pkg-build command will now query (unless --sys-no-query is specified)
+ the system package manager on Debian (and alike, such as Ubuntu) and
+ Fedora (and alike, such as RHEL) for versions of packages that are
+ specified as coming from the system (the sys: scheme). For example, if
+ running the following command on one of these distributions:
+
+ bpkg build hello ?sys:libsqlite3
+
+ Then pkg-build will query the system package manager for the installed
+ version of libsqlite3 and fail if none is present.
+
+ Additionally, if --sys-install is specified, pkg-build will attempt to
+ install such packages if not present but available from the system package
+ repository.
+
+ Other relevant options include --sys-yes, --sys-no-fetch, --sys-no-stub,
+ and --sys-sudo. See bpkg-pkg-build(1) for details.
+
+ See also the `*-{name, version, to-downstream-version}` package manifest
+ values in the manual for details on the bpkg to distribution package name
+ and version mapping.
+
+ * Binary distribution package generation support for Debian and Fedora (and
+ alike).
+
+ The new pkg-bindist command can be used to automatically generate binary
+ distribution packages from bpkg packages for Debian (and alike, such as
+ Ubuntu), Fedora (and alike, such as RHEL), and for other operating systems
+ as installation archives. For Debian and Fedora, dependencies can be
+ satisfied with system packages, bpkg packages, or bundled. See
+ bpkg-pkg-bindist(1) for details.
+
+ * Package build configuration support (*-build-config manifest values).
+
+ A package can now customize in its manifest the build configuration used
+ by automated build bots. This includes specifying configuration variable
+ values, forcing specific versions of dependencies, satisfying dependencies
+ with system packages, and enabling/disabling build bot steps. For example:
+
+ # Test with extras enabled.
+ #
+ extras-build-config: config.libfoo.extra=true
+
+ # Test with system-installed libsqlite3.
+ #
+ system-builds: sys
+ system-build-config: ?sys:libsqlite3
+
+ # Enable Debian binary distribution generation and upload.
+ #
+ bindist-debian-builds: bindist
+ bindist-debian-build-include: linux_debian*-**
+ bindist-debian-build-include: linux_ubuntu*-**
+ bindist-debian-build-exclude: **
+ bindist-debian-build-config:
+ \
+ +bpkg.bindist.debian:
+ +bbot.bindist.upload:
+ \
+
+ See the `*-build-config` package manifest values in the manual for
+ details.
+
+ * New package-description and package-description-{file,type} package
+ manifest values.
+
+ Compared to the description* values, these can be used to provide a bpkg
+ package-specific description, such as the recommended usage, configuration
+ variables, etc. See the `description`, `package-description` package
+ manifest values in the manual for details.
+
+ * New changes-type package manifest value and type auto-detection, similar
+ to description.
+
+ See the `changes` package manifest value in the manual for details.
+
+ * New --deorphan pkg-build option.
+
+ This option can be used to replace orphaned packages (packages that no
+ longer have the corresponding package available in the repository it came
+ from) with the closest available package versions that satisfy all the
+ constraints.
+
+ * New --mask-repository* pkg-build options.
+
+ These options allow pretending for the duration of the pkg-build command
+ execution that the specified repository was removed as if by performing
+ the rep-remove command.
+
+ * New --dependent-exit pkg-drop option.
+
+ This option causes the pkg-drop command to silently exit with the
+ specified error code if attempting to drop dependent packages.
+
+ * New --git-capabilities common option to override auto-detected git
+ capabilities.
+
+ We now also assume the git repository protocol is smart if the HTTP
+ response code is 401 (requires authentication).
+
+ * curl is now used instead of wget as the default fetch program.
+
+ We used to prefer wget 1.16 because it has --show-progress which results
+ in nicer progress. But experience shows that wget is quite unreliable plus
+ with bdep always using curl, it would be strange to use both curl and wget
+ (and expecting the user to setup proxy, authentication, etc., for both).
+
+Version 0.15.0
+
+ * New dependency declaration features:
+
+ - Dependency groups, for example:
+
+ depends: { libboost-any libboost-log libboost-uuid } ~1.77.0
+
+
+ - Conditional dependencies, for example:
+
+ depends: libposix-getopt ^1.0.0 ? ($cxx.target.class == 'windows')
+
+
+ - Dependency alternatives, for example:
+
+ depends: libmysqlclient >= 5.0.3 | libmariadb ^10.2.2
+
+
+ - Reflected configuration variables, for example:
+
+ depends: libposix-getopt ^1.0.0 \
+ ? ($cxx.target.class == 'windows') \
+ config.hello.external_regex=true
+
+ Or:
+
+ depends: libmysqlclient >= 5.0.3 config.hello.db='mysql' | \
+ libmariadb ^10.2.2 config.hello.db='mariadb'
+
+
+ - Dependency configuration, for example:
+
+ depends:
+ \
+ libmariadb ^10.2.2
+ {
+ require
+ {
+ config.libmariadb.cache = true
+
+ if ($cxx.target.class != 'windows')
+ config.libmariadb.tls = true
+ }
+ }
+ \
+
+ Or:
+
+ depends:
+ \
+ libmariadb ^10.2.2
+ {
+ prefer
+ {
+ config.libmariadb.cache = true
+
+ config.libmariadb.buffer = ($config.libmariadb.buffer < 4096 \
+ ? 4096 \
+ : $config.libmariadb.buffer)
+ }
+
+ accept ($config.libmariadb.buffer >= 4096)
+ }
+ \
+
+ See the `depends` package manifest value in the manual for details.
+
+ The implementation of these features led to bpkg becoming a special build
+ system driver with repository metadata now containing the minimal subset
+ of build system files for each package (called the package build system
+ skeleton). See "Package Build System Skeleton" in the manual for details.
+
+ * Support for JSON output in the bpkg-pkg-status command.
+
+ See the --stdout-format option in bpkg-pkg-status(1) for details.
+
+ * New --all, --all-pattern bpkg-pkg-drop options.
+
+ These options can be used to drop all the held packages (in case of
+ --all-pattern, limited to those that match a wildcard pattern).
+
+ * New --keep-tmp common option.
+
+ This option instructs bpkg not to remove its temporary directory at the
+ end of the command execution and print its path if the verbosity level is
+ 2 or higher. This option is primarily useful for troubleshooting.
+
+Version 0.14.0
+
+ * Support for configuration types, configuration linking, and build-time
+ dependencies.
+
+ Configurations can now be linked with each other to allow a package to be
+ built in one configuration while its dependencies -- in one or more linked
+ configurations. This can be used to create a "base" configuration with
+ common dependencies that are shared between multiple configurations.
+
+ Configurations now also have types with the three predefined types being
+ `target` (the default) `host` (used for build-time dependencies), and
+ `build2` (used for build system modules). This mechanism together with
+ configuration linking is used to provide separate configurations for
+ build-time dependencies, for example, tools that need to be executed or
+ build system modules that need to be loaded during the build.
+
+ If during dependency resolution a build-time dependency is encountered and
+ there is no build configuration of a suitable type linked with the target
+ configuration, then a private configuration of the needed type is
+ automatically created and linked.
+
+ New relevant commands: bpkg-cfg-{link,unlink,info}.
+
+ Command with relevant changes (new options, etc): bpkg-cfg-create,
+ bpkg-pkg-{build,status}.
+
+ See bpkg-cfg-create(1) for details on this functionality.
+
+ * Tests, examples, and benchmark packages can now also be run-time or build-
+ time.
+
+ See the `tests`, `examples`, and `benchmarks` package manifest values for
+ details.
+
+ * Two new pre-defined automated build bot requirements: `bootstrap` (package
+ is a build system module that requires bootstrapping) and `host` (package
+ is normally used as build-time dependency and should be built in a host
+ configuration).
+
+ See the `requires` package manifest value for details.
+
+ * Configuration of an external package is now preserved between upgrades and
+ downgrades.
+
+ The same behavior for normal packages is still a TODO. The old behavior
+ can be achieved with the new --disfigure bpkg-pkg-build option that forces
+ a from-scratch reconfiguration.
+
+ * New `min-bpkg-version` repositories manifest value that allows specifying
+ the minimum supported bpkg version.
+
+ See also the new --min-bpkg-version bpkg-rep-create option.
+
+ * Change of the `build-email` package manifest value semantics.
+
+ Now build result notifications are only sent if this value is explicitly
+ specified (before we used to fallback to `email`).
+
+ * New --immediate|-i and --recursive|-r bpkg-pkg-{update,clean} options.
+
+ These options can be used to additionally update or clean immediate or all
+ dependencies of a package, respectively.
+
+ * New --all-pattern bpkg-pkg-{update,clean,test,install,uninstall} option.
+
+ This option can be used to perform the command on all the held packages
+ that match a wildcard pattern.
+
+ * New --rebuild-checksum bpkg-pkg-build option.
+
+ This option can be used to avoid rebuilds if the result of a dependency
+ resolution for a package has not changed.
+
+ * New --noop-exit bpkg-pkg-build option.
+
+ This option can be used to request a distinct exit code if the build is
+ a noop (performs no new package builds, upgrades, etc).
+
+ * New --output-{root,purge} bpkg-pkg-checkout and --checkout-{root,purge}
+ bpkg-pkg-build options.
+
+ * New --keep-config bpkg-pkg-disfigure option.
+
+ * New BPKG_DEF_OPT environment variable that can be used to suppress loading
+ of default options files.
+
Version 0.13.0
* The SPDX License Expression is now the default scheme for the 'license'
diff --git a/bpkg/archive.cxx b/bpkg/archive.cxx
index c096701..c41c4e3 100644
--- a/bpkg/archive.cxx
+++ b/bpkg/archive.cxx
@@ -31,22 +31,45 @@ namespace bpkg
}
#endif
+ // Only the extract ('x') and list ('t') operations are supported.
+ //
static pair<cstrings, size_t>
- start_extract (const common_options& co, const path& a)
+ start (const common_options& co, char op, const path& a)
{
+ assert (op == 'x' || op == 't');
+
cstrings args;
// On Windows we default to libarchive's bsdtar with auto-decompression
// (though there is also bsdcat which we could have used).
//
- const char* tar (co.tar_specified ()
- ? co.tar ().string ().c_str ()
+ // OpenBSD tar does not support -O|--to-stdout and so far the best
+ // solution seems to require bsdtar (libarchive) or gtar (GNU tar).
+ //
+ const char* tar;
+
+ if (co.tar_specified ())
+ tar = co.tar ().string ().c_str ();
+ else
+ {
#ifdef _WIN32
- : "bsdtar"
+ tar = "bsdtar";
+#elif defined(__OpenBSD__)
+ // A bit wasteful to do this every time (and throw away the result).
+ // Oh, well, the user can always "optimize" this away by passing
+ // explicit --tar.
+ //
+ if (!process::try_path_search ("bsdtar", true).empty ())
+ tar = "bsdtar";
+ else if (!process::try_path_search ("gtar", true).empty ())
+ tar = "gtar";
+ else
+ fail << "bsdtar or gtar required on OpenBSD for -O|--to-stdout support"
+ << endf;
#else
- : "tar"
+ tar = "tar";
#endif
- );
+ }
// See if we need to decompress.
//
@@ -91,7 +114,7 @@ namespace bpkg
args.push_back ("--force-local");
#endif
- args.push_back ("-xf");
+ args.push_back (op == 'x' ? "-xf" : "-tf");
args.push_back (i == 0 ? a.string ().c_str () : "-");
return make_pair (move (args), i);
@@ -100,7 +123,7 @@ namespace bpkg
pair<process, process>
start_extract (const common_options& co, const path& a, const dir_path& d)
{
- pair<cstrings, size_t> args_i (start_extract (co, a));
+ pair<cstrings, size_t> args_i (start (co, 'x', a));
cstrings& args (args_i.first);
size_t i (args_i.second);
@@ -171,31 +194,20 @@ namespace bpkg
}
}
- pair<process, process>
- start_extract (const common_options& co,
- const path& a,
- const path& f,
- bool diag)
+ // Only the extract ('x') and list ('t') operations are supported.
+ //
+ static pair<process, process>
+ start (const common_options& co,
+ char op,
+ const path& a,
+ const cstrings& tar_args,
+ bool diag)
{
- assert (!f.empty () && f.relative ());
-
- pair<cstrings, size_t> args_i (start_extract (co, a));
+ pair<cstrings, size_t> args_i (start (co, op, a));
cstrings& args (args_i.first);
size_t i (args_i.second);
- // -O/--to-stdout -- extract to stdout.
- //
- args.push_back ("-O");
-
- // On Windows neither MSYS GNU tar nor BSD tar will find the archived file
- // if its path is provided in the Windows notation.
- //
-#ifdef _WIN32
- string fs (f.posix_string ());
- args.push_back (fs.c_str ());
-#else
- args.push_back (f.string ().c_str ());
-#endif
+ args.insert (args.end (), tar_args.begin (), tar_args.end ());
args.push_back (nullptr);
args.push_back (nullptr); // Pipe end.
@@ -245,6 +257,34 @@ namespace bpkg
}
}
+ pair<process, process>
+ start_extract (const common_options& co,
+ const path& a,
+ const path& f,
+ bool diag)
+ {
+ assert (!f.empty () && f.relative ());
+
+ cstrings args;
+ args.reserve (2);
+
+ // -O/--to-stdout -- extract to stdout.
+ //
+ args.push_back ("-O");
+
+ // On Windows neither MSYS GNU tar nor BSD tar will find the archived file
+ // if its path is provided in the Windows notation.
+ //
+#ifdef _WIN32
+ string fs (f.posix_string ());
+ args.push_back (fs.c_str ());
+#else
+ args.push_back (f.string ().c_str ());
+#endif
+
+ return start (co, 'x', a, args, diag);
+ }
+
string
extract (const common_options& o, const path& a, const path& f, bool diag)
try
@@ -253,7 +293,7 @@ namespace bpkg
try
{
- // Do not throw when eofbit is set (end of stream reached), and
+ // Do not throw when eofbit is set (end of stream is reached), and
// when failbit is set (getline() failed to extract any character).
//
ifdstream is (move (pr.second.in_ofd), ifdstream::badbit);
@@ -290,4 +330,69 @@ namespace bpkg
//
fail << "unable to extract " << f << " from " << a << ": " << e << endf;
}
+
+ paths
+ archive_contents (const common_options& o, const path& a, bool diag)
+ try
+ {
+ pair<process, process> pr (start (o, 't', a, cstrings (), diag));
+
+ try
+ {
+ paths r;
+
+ // Do not throw when eofbit is set (end of stream reached), and
+ // when failbit is set (getline() failed to extract any character).
+ //
+ ifdstream is (move (pr.second.in_ofd), ifdstream::badbit);
+
+ for (string l; !eof (getline (is, l)); )
+ r.emplace_back (move (l));
+
+ is.close ();
+
+ if (pr.second.wait () && pr.first.wait ())
+ return r;
+
+ // Fall through.
+ }
+ catch (const invalid_path& e)
+ {
+ // Just fall through if the pipeline has failed.
+ //
+ if (pr.second.wait () && pr.first.wait ())
+ {
+ if (diag)
+ error << "unable to obtain contents for " << a
+ << ": invalid path '" << e.path << "'";
+
+ throw failed ();
+ }
+
+ // Fall through.
+ }
+ catch (const io_error&)
+ {
+ // Child exit status doesn't matter. Just wait for the process
+ // completion and fall through.
+ //
+ pr.second.wait (); pr.first.wait (); // Check throw.
+ }
+
+ // While it is reasonable to assuming the child process issued diagnostics
+ // if exited with an error status, tar, specifically, doesn't mention the
+ // archive name. So print the error message whatever the child exit status
+ // is, if the diagnostics is requested.
+ //
+ if (diag)
+ error << "unable to obtain contents for " << a;
+
+ throw failed ();
+ }
+ catch (const process_error& e)
+ {
+ // Note: this is not a tar error, so no diag check.
+ //
+ fail << "unable to obtain contents for " << a << ": " << e << endf;
+ }
}
diff --git a/bpkg/archive.hxx b/bpkg/archive.hxx
index cfc4cdb..44b3f01 100644
--- a/bpkg/archive.hxx
+++ b/bpkg/archive.hxx
@@ -45,6 +45,17 @@ namespace bpkg
const path& archive,
const path& file,
bool diag = true);
+
+ // Start the processes similar to the above functions but execute tar in the
+ // archive contents listing mode (-t) and then parse its stdout as a list of
+ // paths (one per line). If diag is false, then don't issue diagnostics
+ // about the reason why the contents can't be obtained (the archive is
+ // broken, etc).
+ //
+ paths
+ archive_contents (const common_options&,
+ const path& archive,
+ bool diag = true);
}
#endif // BPKG_ARCHIVE_HXX
diff --git a/bpkg/argument-grouping.cli b/bpkg/argument-grouping.cli
index c30a360..573f53d 100644
--- a/bpkg/argument-grouping.cli
+++ b/bpkg/argument-grouping.cli
@@ -7,7 +7,8 @@ include <bpkg/common-options.hxx>;
"\name=bpkg-argument-grouping"
"\summary=argument grouping facility"
-// NOTE: the grouping documentation was copied from CLI.
+// NOTE: the grouping documentation (indented part) was copied verbatim from
+// CLI's --generate-group-scanner documentation.
//
"
\h|SYNOPSIS|
@@ -20,48 +21,51 @@ For certain commands certain options and command line variables can be grouped
to only apply to specific arguments. This help topic describes the argument
grouping facility used for this purpose.
-Groups can be specified before (leading) and/or after (trailing) the argument
-they apply to. A leading group starts with '\cb{{}' and ends with '\cb{\}+}'
-while a trailing group starts with '\cb{+{}' and ends with '\cb{\}}'. For
-example:
-
-\
-{ --foo --bar }+ arg # 'arg' with '--foo' '--bar'
-arg +{ fox=1 baz=2 } # 'arg' with 'fox=1' 'baz=2'
-\
-
-Multiple leading and/or trailing groups can be specified for the same
-argument. For example:
-
-\
-{ -f }+ { -b }+ arg +{ f=1 } +{ b=2 } # 'arg' with '-f' 'b' 'f=1' 'b=2'
-\
-
-Note that the group applies to a single argument only. For example:
-
-\
-{ --foo }+ arg1 arg2 +{ --bar } # 'arg1' with '--foo' and
- # 'arg2' with '--bar'
-\
-
-The group separators ('\cb{{}', '\cb{\}+'}, etc) must be separate command line
-arguments. In particular, they must not be adjacent either to the arguments
-inside the group nor to the argument they apply to. All such cases will be
-treated as ordinary arguments. For example:
-
-\
-{--foo}+ arg # '{--foo}+' ...
-arg+{ --foo } # 'arg+{' ...
-\
-
-If one of the group separators needs to be specified as an argument verbatim,
-then it must be escaped with '\cb{\\}'. For example:
-
-\
-} # error: unexpected group separator
-}x # '}x'
-\} # '}'
-{ \}+ }+ arg # 'arg' with '}+'
-\
-
+ Groups can be specified before (leading) and/or after (trailing) the
+ argument they apply to. A leading group starts with '\cb{{}' and ends
+ with '\cb{\}+}' while a trailing group starts with '\cb{+{}' and ends
+ with '\cb{\}}'. For example:
+
+ \
+ { --foo --bar }+ arg # 'arg' with '--foo' '--bar'
+ arg +{ fox=1 baz=2 } # 'arg' with 'fox=1' 'baz=2'
+ \
+
+ Multiple leading and/or trailing groups can be specified for the
+ same argument. For example:
+
+ \
+ { -f }+ { -b }+ arg +{ f=1 } +{ b=2 } # 'arg' with '-f' 'b' 'f=1' 'b=2'
+ \
+
+ The group applies to a single argument only unless multiple arguments
+ are themselves grouped with '\cb{{}' and '\cb{\}}'. For example:
+
+ \
+ { --foo }+ arg1 arg2 +{ --bar } # 'arg1' with '--foo'
+ # 'arg2' with '--bar'
+
+ { --foo }+ { arg1 arg2 } +{ --bar } # 'arg1' with '--foo' '--bar'
+ # 'arg2' with '--foo' '--bar'
+ \
+
+ The group separators ('\cb{{}', '\cb{\}+'}, etc) must be separate command
+ line arguments. In particular, they must not be adjacent either to the
+ arguments inside the group nor to the argument they apply to. All such
+ cases will be treated as ordinary arguments. For example:
+
+ \
+ {--foo}+ arg # '{--foo}+' ...
+ arg+{ --foo } # 'arg+{' ...
+ \
+
+ If one of the group separators needs to be specified as an argument
+ verbatim, then it must be escaped with '\cb{\\}'. For example:
+
+ \
+ } # error: unexpected group separator
+ }x # '}x'
+ \} # '}'
+ { \}+ }+ arg # 'arg' with '}+'
+ \
"
diff --git a/bpkg/auth.cxx b/bpkg/auth.cxx
index 0693abc..663054d 100644
--- a/bpkg/auth.cxx
+++ b/bpkg/auth.cxx
@@ -7,11 +7,11 @@
#include <limits> // numeric_limits
#include <iterator> // ostreambuf_iterator
-#include <libbutl/sha256.mxx>
-#include <libbutl/base64.mxx>
-#include <libbutl/openssl.mxx>
-#include <libbutl/timestamp.mxx>
-#include <libbutl/filesystem.mxx>
+#include <libbutl/base64.hxx>
+#include <libbutl/openssl.hxx>
+#include <libbutl/timestamp.hxx>
+#include <libbutl/filesystem.hxx>
+#include <libbutl/semantic-version.hxx>
#include <bpkg/package.hxx>
#include <bpkg/package-odb.hxx>
@@ -23,10 +23,14 @@ using namespace butl;
namespace bpkg
{
- static const string openssl_rsautl ("rsautl");
- static const string openssl_x509 ("x509");
-
- const char* openssl_commands[3] = {openssl_rsautl.c_str (),
+ static const string openssl_version ("version");
+ static const string openssl_pkeyutl ("pkeyutl");
+ static const string openssl_rsautl ("rsautl");
+ static const string openssl_x509 ("x509");
+
+ const char* openssl_commands[5] = {openssl_version.c_str (),
+ openssl_pkeyutl.c_str (),
+ openssl_rsautl.c_str (),
openssl_x509.c_str (),
nullptr};
@@ -39,6 +43,49 @@ namespace bpkg
print_process (args, n);
}
+ // Return true if the openssl version is greater or equal to 3.0.0 and so
+ // pkeyutl needs to be used instead of rsautl. Cache the result on the first
+ // function call.
+ //
+ // Note that openssl 3.0.0 deprecates rsautl in favor of pkeyutl.
+ //
+ // Also note that pkeyutl is only implemented in openssl version 1.0.0 and
+ // its -verifyrecover mode is broken in the [1.1.1 1.1.1d] version range
+ // (see the 'pkeyutl -verifyrecover error "input data too long to be a
+ // hash"' issue report for details).
+ //
+ static optional<bool> use_pkeyutl;
+
+ static bool
+ use_openssl_pkeyutl (const common_options& co)
+ {
+ if (!use_pkeyutl)
+ {
+ const path& openssl_path (co.openssl ()[openssl_version]);
+
+ try
+ {
+ optional<openssl_info> oi (
+ openssl::info (print_command, 2, openssl_path));
+
+ use_pkeyutl = oi &&
+ oi->name == "OpenSSL" &&
+ oi->version >= semantic_version {3, 0, 0};
+ }
+ catch (const process_error& e)
+ {
+ fail << "unable to execute " << openssl_path << ": " << e << endf;
+ }
+ catch (const io_error& e)
+ {
+ fail << "unable to read '" << openssl_path << "' output: " << e
+ << endf;
+ }
+ }
+
+ return *use_pkeyutl;
+ }
+
// Find the repository location prefix that ends with the version component.
// We consider all repositories under this location to be related.
//
@@ -160,21 +207,32 @@ namespace bpkg
getline (os.in, s);
os.in.close ();
- try
+ if (os.wait ())
{
- const size_t n (19);
-
- if (os.wait () &&
- s.size () > n && s.compare (0, n, "SHA256 Fingerprint=") == 0)
+ // Normally the output is:
+ //
+ // SHA256 Fingerprint=<fingerprint>
+ //
+ // But it can be translated and SHA spelled in lower case (LC_ALL=C
+ // doesn't seem to help in some cases).
+ //
+ if (icasecmp (s, "SHA256", 6) == 0)
{
- string fp (s, n);
- string ab (fingerprint_to_sha256 (fp, 16));
- return {move (fp), move (ab)};
+ size_t p (s.find ('='));
+ if (p != string::npos)
+ {
+ try
+ {
+ string fp (s, p + 1);
+ string ab (fingerprint_to_sha256 (fp, 16));
+ return {move (fp), move (ab)};
+ }
+ catch (const invalid_argument&)
+ {
+ }
+ }
}
}
- catch (const invalid_argument&)
- {
- }
calc_failed ();
@@ -561,7 +619,6 @@ namespace bpkg
//
static shared_ptr<certificate>
auth_cert (const common_options& co,
- const dir_path& conf,
database& db,
const optional<string>& pem,
const repository_location& rl,
@@ -603,7 +660,7 @@ namespace bpkg
//
if (pem)
{
- path f (conf / certs_dir / path (cert->id + ".pem"));
+ path f (db.config_orig / certs_dir / path (cert->id + ".pem"));
try
{
@@ -624,6 +681,7 @@ namespace bpkg
shared_ptr<const certificate>
authenticate_certificate (const common_options& co,
const dir_path* conf,
+ database* db,
const optional<string>& pem,
const repository_location& rl,
const optional<string>& dependent_trust)
@@ -633,15 +691,12 @@ namespace bpkg
if (co.trust_no () && co.trust_yes ())
fail << "--trust-yes and --trust-no are mutually exclusive";
- if (conf != nullptr && conf->empty ())
- conf = exists (bpkg_dir) ? &current_dir : nullptr;
-
- assert (conf == nullptr || !conf->empty ());
-
shared_ptr<certificate> r;
if (conf == nullptr)
{
+ assert (db == nullptr);
+
// If we have no configuration, go straight to authenticating a new
// certificate.
//
@@ -650,20 +705,21 @@ namespace bpkg
? auth_real (co, fp, *pem, rl, dependent_trust).cert
: auth_dummy (co, fp.abbreviated, rl);
}
- else if (transaction::has_current ())
+ else if (db != nullptr)
{
+ assert (transaction::has_current ());
+
r = auth_cert (co,
- *conf,
- transaction::current ().database (),
+ *db,
pem,
rl,
dependent_trust);
}
else
{
- database db (open (*conf, trace));
+ database db (*conf, trace, false /* pre_attach */);
transaction t (db);
- r = auth_cert (co, *conf, db, pem, rl, dependent_trust);
+ r = auth_cert (co, db, pem, rl, dependent_trust);
t.commit ();
}
@@ -680,11 +736,6 @@ namespace bpkg
{
tracer trace ("authenticate_repository");
- if (conf != nullptr && conf->empty ())
- conf = exists (bpkg_dir) ? &current_dir : nullptr;
-
- assert (conf == nullptr || !conf->empty ());
-
path f;
auto_rmfile rm;
@@ -699,7 +750,7 @@ namespace bpkg
try
{
- rm = tmp_file ("cert");
+ rm = tmp_file (conf != nullptr ? *conf : empty_dir_path, "cert");
f = rm.path;
ofdstream ofs (f);
@@ -825,15 +876,22 @@ namespace bpkg
dr << ": " << *e;
};
- const path& openssl_path (co.openssl ()[openssl_rsautl]);
- const strings& openssl_opts (co.openssl_option ()[openssl_rsautl]);
+ bool ku (use_openssl_pkeyutl (co));
+ const string& cmd (ku ? openssl_pkeyutl : openssl_rsautl);
+
+ const path& openssl_path (co.openssl ()[cmd]);
+ const strings& openssl_opts (co.openssl_option ()[cmd]);
try
{
openssl os (print_command,
path ("-"), fdstream_mode::text, 2,
- openssl_path, openssl_rsautl,
- openssl_opts, "-verify", "-certin", "-inkey", f);
+ openssl_path, cmd,
+ openssl_opts,
+ ku ? "-verifyrecover" : "-verify",
+ "-certin",
+ "-inkey",
+ f);
for (const auto& c: sm.signature)
os.out.put (c); // Sets badbit on failure.
@@ -914,14 +972,18 @@ namespace bpkg
dr << ": " << *e;
};
- const path& openssl_path (co.openssl ()[openssl_rsautl]);
- const strings& openssl_opts (co.openssl_option ()[openssl_rsautl]);
+ const string& cmd (use_openssl_pkeyutl (co)
+ ? openssl_pkeyutl
+ : openssl_rsautl);
+
+ const path& openssl_path (co.openssl ()[cmd]);
+ const strings& openssl_opts (co.openssl_option ()[cmd]);
try
{
openssl os (print_command,
fdstream_mode::text, path ("-"), 2,
- openssl_path, openssl_rsautl,
+ openssl_path, cmd,
openssl_opts, "-sign", "-inkey", key_name);
os.out << sha256sum;
diff --git a/bpkg/auth.hxx b/bpkg/auth.hxx
index b5ae1ff..54e6884 100644
--- a/bpkg/auth.hxx
+++ b/bpkg/auth.hxx
@@ -15,20 +15,18 @@
namespace bpkg
{
// Authenticate a repository certificate. If the configuration directory is
- // NULL, then perform without a certificate database. If it is empty, then
- // check if the current working directory is a configuration. If it is, then
- // use its certificate database. Otherwise, continue as if it was NULL. All
- // other values (including '.') are assumed to be valid configuration paths
- // and will be diagnosed if that's not the case.
+ // NULL, then perform without a certificate database. Otherwise, use its
+ // certificate database.
//
// If the dependent trust fingerprint is present then try to authenticate
// the certificate for use by the dependent prior to prompting the user.
// Note that if certificate is authenticated for such a use, then it is not
// persisted into the database.
//
- // If the configuration is used, then check if we are already in transaction.
- // If so, then assume the configuration database is already opened and use
- // that. Otherwise, open the database and start a new transaction.
+ // If the configuration is used and also the configuration database is
+ // specified, then assume the database is already opened with the
+ // transaction started and use that. Otherwise, open the database and start
+ // a new transaction.
//
// Note that one drawback of doing this as part of an existing transaction
// is that if things go south and the transaction gets aborted, then all the
@@ -38,6 +36,7 @@ namespace bpkg
shared_ptr<const certificate>
authenticate_certificate (const common_options&,
const dir_path* configuration,
+ database*,
const optional<string>& cert_pem,
const repository_location&,
const optional<string>& dependent_trust);
@@ -80,15 +79,11 @@ namespace bpkg
// openssl x509 -noout -modulus -in cert.pem
// openssl rsa -noout -modulus -in key.pem
//
- // But taking into account that we need to be able to use custom engines to
- // access keys, it seems to be impossible to provide the same additional
- // openssl options to fit both the rsa and pkeyutl commands. The first would
- // require "-engine pkcs11 -inform engine", while the second -- "-engine
- // pkcs11 -keyform engine". Also it would require to enter the key password
- // again, which is a showstopper. Maybe the easiest would be to recover the
- // sum back from the signature using the certificate, and compare it with
- // the original sum (like we do in authenticate_repository()). But that
- // would require to temporarily save the certificate to file.
+ // However, it would require to enter the key password again, which is a
+ // showstopper. Maybe the easiest would be to recover the sum back from the
+ // signature using the certificate, and compare it with the original sum
+ // (like we do in authenticate_repository()). But that would require to
+ // temporarily save the certificate to file.
//
std::vector<char>
sign_repository (const common_options&,
diff --git a/bpkg/bpkg.cli b/bpkg/bpkg.cli
index 48f655e..6edea97 100644
--- a/bpkg/bpkg.cli
+++ b/bpkg/bpkg.cli
@@ -50,6 +50,10 @@ namespace bpkg
configuration is an amalgamation that contains packages as subprojects
(see \l{bpkg-cfg-create(1)} for details).
+ Build configurations can be linked with each other so that while a
+ package is built in one configuration, some of its dependencies can be
+ built in linked configurations (see \l{bpkg-cfg-create(1)} for details).
+
A \i{bpkg package} is an archive or directory (potentially in a version
control system) that contains a \cb{build2} project plus the package
\cb{manifest} file. \cb{bpkg} can either use package archives/directories
@@ -168,6 +172,21 @@ namespace bpkg
"\l{bpkg-cfg-create(1)} \- create configuration"
}
+ bool cfg-info
+ {
+ "\l{bpkg-cfg-info(1)} \- print configuration information"
+ }
+
+ bool cfg-link|link
+ {
+ "\l{bpkg-cfg-link(1)} \- link configuration"
+ }
+
+ bool cfg-unlink|unlink
+ {
+ "\l{bpkg-cfg-unlink(1)} \- unlink configuration"
+ }
+
bool rep-info
{
"\l{bpkg-rep-info(1)} \- print repository information"
@@ -238,6 +257,11 @@ namespace bpkg
"\l{bpkg-pkg-clean(1)} \- clean package"
}
+ bool pkg-bindist|bindist
+ {
+ "\l{bpkg-pkg-bindist(1)} \- generate binary distribution package"
+ }
+
bool pkg-verify
{
"\l{bpkg-pkg-verify(1)} \- verify package archive"
@@ -314,9 +338,10 @@ namespace bpkg
"\h|ENVIRONMENT|
- Commands executed by \cb{bpkg} while the build configuration database is
- open will have the \cb{BPKG_OPEN_CONFIG} environment variable set to the
- absolute and normalized configuration directory path. This can be used by
+ Commands executed by \cb{bpkg} while the current and linked build
+ configuration databases are open will have the \cb{BPKG_OPEN_CONFIGS}
+ environment variable set to the space-separated, \cb{\"}-quoted list of
+ absolute and normalized configuration directory paths. This can be used by
build system hooks and/or programs that they execute.
"
diff --git a/bpkg/bpkg.cxx b/bpkg/bpkg.cxx
index f1ee302..b5eaf7d 100644
--- a/bpkg/bpkg.cxx
+++ b/bpkg/bpkg.cxx
@@ -1,15 +1,40 @@
// file : bpkg/bpkg.cxx -*- C++ -*-
// license : MIT; see accompanying LICENSE file
-#ifndef _WIN32
-# include <signal.h> // signal()
-#endif
+#include <bpkg/bpkg.hxx>
+#include <limits>
+#include <cstdlib> // getenv()
+#include <cstring> // strcmp()
#include <iostream>
#include <exception> // set_terminate(), terminate_handler
#include <type_traits> // enable_if, is_base_of
-#include <libbutl/backtrace.mxx> // backtrace()
+#include <libbutl/backtrace.hxx> // backtrace()
+
+// Embedded build system driver.
+//
+#include <libbuild2/types.hxx>
+#include <libbuild2/utility.hxx>
+#include <libbuild2/module.hxx>
+
+#include <libbuild2/b-options.hxx>
+#include <libbuild2/b-cmdline.hxx>
+
+#include <libbuild2/dist/init.hxx>
+#include <libbuild2/test/init.hxx>
+#include <libbuild2/config/init.hxx>
+#include <libbuild2/install/init.hxx>
+
+#include <libbuild2/in/init.hxx>
+#include <libbuild2/bin/init.hxx>
+#include <libbuild2/c/init.hxx>
+#include <libbuild2/cc/init.hxx>
+#include <libbuild2/cxx/init.hxx>
+#include <libbuild2/version/init.hxx>
+
+#include <libbuild2/bash/init.hxx>
+#include <libbuild2/cli/init.hxx>
#include <bpkg/types.hxx>
#include <bpkg/utility.hxx>
@@ -22,7 +47,11 @@
#include <bpkg/help.hxx>
#include <bpkg/cfg-create.hxx>
+#include <bpkg/cfg-info.hxx>
+#include <bpkg/cfg-link.hxx>
+#include <bpkg/cfg-unlink.hxx>
+#include <bpkg/pkg-bindist.hxx>
#include <bpkg/pkg-build.hxx>
#include <bpkg/pkg-checkout.hxx>
#include <bpkg/pkg-clean.hxx>
@@ -52,6 +81,164 @@ using namespace bpkg;
namespace bpkg
{
+ // Print backtrace if terminating due to an unhandled exception. Note that
+ // custom_terminate is non-static and not a lambda to reduce the noise.
+ //
+ static terminate_handler default_terminate;
+
+ void
+ custom_terminate ()
+ {
+ *diag_stream << backtrace ();
+
+ if (default_terminate != nullptr)
+ default_terminate ();
+ }
+
+ static void
+ build2_terminate (bool trace)
+ {
+ if (!trace)
+ set_terminate (default_terminate);
+
+ std::terminate ();
+ }
+
+ strings build2_cmd_vars;
+ build2::scheduler build2_sched;
+ build2::global_mutexes build2_mutexes;
+ build2::file_cache build2_fcache;
+
+ static const char* build2_argv0;
+
+ void
+ build2_init (const common_options& co)
+ {
+ try
+ {
+ using namespace build2;
+ using build2::fail;
+ using build2::endf;
+
+ build2::tracer trace ("build2_init");
+
+ // Parse --build-option values as the build2 driver command line.
+ //
+ // With things like verbosity, progress, etc., we use values from
+ // --build-option if specified, falling back to equivalent bpkg values
+ // otherwise.
+ //
+ b_options bo;
+ b_cmdline bc;
+ {
+ small_vector<char*, 1> argv {const_cast<char*> (build2_argv0)};
+
+ if (size_t n = co.build_option ().size ())
+ {
+ argv.reserve (n + 1);
+
+ for (const string& a: co.build_option ())
+ argv.push_back (const_cast<char*> (a.c_str ()));
+ }
+
+ // Note that this function also parses the default options files and
+ // gets/sets the relevant environment variables.
+ //
+ // For now we use the same default verbosity as us (equivalent to
+ // start_b() with verb_b::normal).
+ //
+ bc = parse_b_cmdline (trace,
+ static_cast<int> (argv.size ()), argv.data (),
+ bo,
+ bpkg::verb,
+ co.jobs_specified () ? co.jobs () : 0);
+
+ if (!bc.buildspec.empty ())
+ fail << "argument specified with --build-option";
+
+ if (bo.help () || bo.version ())
+ fail << "--help or --version specified with --build-option";
+
+ // Make sure someone didn't specify a non-global override with
+ // --build-option, which messes our global/package-specific config
+ // variable split.
+ //
+ for (const string& v: bc.cmd_vars)
+ {
+ if (v[0] != '!')
+ fail << "non-global configuration variable '" << v
+ << "' specified with --build-option";
+ }
+ }
+
+ build2_cmd_vars = move (bc.cmd_vars);
+
+ init_diag (bc.verbosity,
+ bo.silent (),
+ (bc.progress ? bc.progress :
+ co.progress () ? optional<bool> (true) :
+ co.no_progress () ? optional<bool> (false) : nullopt),
+ (bc.diag_color ? bc.diag_color :
+ co.diag_color () ? optional<bool> (true) :
+ co.no_diag_color () ? optional<bool> (false) : nullopt),
+ bo.no_line (),
+ bo.no_column (),
+ bpkg::stderr_term.has_value ());
+
+ // Also note that we now use this in pkg_configure(), but serial-stop
+ // is good for it as well.
+ //
+ init (&build2_terminate,
+ build2_argv0,
+ false /* serial_stop */,
+ bc.mtime_check,
+ bc.config_sub,
+ bc.config_guess);
+
+ load_builtin_module (&build2::config::build2_config_load);
+ load_builtin_module (&build2::dist::build2_dist_load);
+ load_builtin_module (&build2::test::build2_test_load);
+ load_builtin_module (&build2::install::build2_install_load);
+
+ load_builtin_module (&build2::bin::build2_bin_load);
+ load_builtin_module (&build2::cc::build2_cc_load);
+ load_builtin_module (&build2::c::build2_c_load);
+ load_builtin_module (&build2::cxx::build2_cxx_load);
+ load_builtin_module (&build2::version::build2_version_load);
+ load_builtin_module (&build2::in::build2_in_load);
+
+ load_builtin_module (&build2::bash::build2_bash_load);
+ load_builtin_module (&build2::cli::build2_cli_load);
+
+ // Note that while all we need is serial execution (all we do is load),
+ // in the process we may need to update some build system modules (while
+ // we only support built-in and standard pre-installed modules here, we
+ // may need to build the latter during development). At the same time,
+ // this is an unlikely case and starting a parallel scheduler is not
+ // cheap. So what we will do is start a parallel scheduler pre-tuned to
+ // serial execution, which is relatively cheap. The module building
+ // logic will then re-tune it to parallel if and when necessary.
+ //
+ // Note that we now also use this in pkg_configure() where we re-tune
+ // the scheduler (it may already have been initialized as part of the
+ // package skeleton work).
+ //
+ build2_sched.startup (1 /* max_active */,
+ 1 /* init_active */,
+ bc.max_jobs,
+ bc.jobs * bo.queue_depth (),
+ bc.max_stack,
+ bc.jobs);
+
+ build2_mutexes.init (build2_sched.shard_size ());
+ build2_fcache.init (bc.fcache_compress);
+ }
+ catch (const build2::failed&)
+ {
+ throw bpkg::failed (); // Assume the diagnostics has already been issued.
+ }
+ }
+
// Deduce the default options files and the directory to start searching
// from based on the command line options and arguments.
//
@@ -141,6 +328,17 @@ namespace bpkg
main (int argc, char* argv[]);
}
+// Note that pkg-build command supports multiple configurations and
+// initializes multiple temporary directories itself. This function is,
+// however, required since pkg_build_options::directory() returns a vector and
+// the below template function cannot be used.
+//
+static inline const dir_path&
+cfg_dir (const pkg_build_options*)
+{
+ return empty_dir_path;
+}
+
// Get -d|--directory value if the option class O has it and empty path
// otherwise. Note that for some commands (like rep-info) that allow
// specifying empty path, the returned value is a string, not a dir_path.
@@ -152,6 +350,20 @@ cfg_dir (const O* o) -> decltype(o->directory ()) {return o->directory ();}
static inline auto
cfg_dir (...) -> const dir_path& {return empty_dir_path;}
+// Command line arguments starting position.
+//
+// We want the positions of the command line arguments to be after the default
+// options files (parsed in init()). Normally that would be achieved by
+// passing the last position of the previous scanner to the next. The problem
+// is that we parse the command line arguments first (for good reasons). Also
+// the default options files parsing machinery needs the maximum number of
+// arguments to be specified and assigns the positions below this value (see
+// load_default_options() for details). So we are going to "reserve" the first
+// half of the size_t value range for the default options positions and the
+// second half for the command line arguments positions.
+//
+static const size_t args_pos (numeric_limits<size_t>::max () / 2);
+
// Initialize the command option class O with the common options and then
// parse the rest of the command line placing non-option arguments to args.
// Once this is done, use the "final" values of the common options to do
@@ -161,7 +373,7 @@ template <typename O>
static O
init (const common_options& co,
cli::group_scanner& scan,
- strings& args,
+ strings& args, cli::vector_scanner& args_scan,
const char* cmd,
bool keep_sep,
bool tmp)
@@ -181,6 +393,11 @@ init (const common_options& co,
{
if (opt)
{
+ // Parse the next chunk of options until we reach an argument (or eos).
+ //
+ if (o.parse (scan) && !scan.more ())
+ break;
+
// If we see first "--", then we are done parsing options.
//
if (strcmp (scan.peek (), "--") == 0)
@@ -192,11 +409,6 @@ init (const common_options& co,
continue;
}
- // Parse the next chunk of options until we reach an argument (or eos).
- //
- if (o.parse (scan))
- continue;
-
// Fall through.
}
@@ -217,6 +429,11 @@ init (const common_options& co,
}
}
+ // Carry over the positions of the arguments. In particular, this can be
+ // used to get the max position for the options.
+ //
+ args_scan.reset (0, scan.position ());
+
// Note that the diagnostics verbosity level can only be calculated after
// default options are loaded and merged (see below). Thus, to trace the
// default options files search, we refer to the verbosity level specified
@@ -247,9 +464,26 @@ init (const common_options& co,
{
optional<dir_path> extra;
if (o.default_options_specified ())
+ {
extra = o.default_options ();
- o = merge_options (
+ // Note that load_default_options() expects absolute and normalized
+ // directory.
+ //
+ try
+ {
+ if (extra->relative ())
+ extra->complete ();
+
+ extra->normalize ();
+ }
+ catch (const invalid_path& e)
+ {
+ fail << "invalid --default-options value " << e.path;
+ }
+ }
+
+ default_options<O> dos (
load_default_options<O, cli::argv_file_scanner, cli::unknown_mode> (
nullopt /* sys_dir */,
path::home_directory (),
@@ -265,8 +499,72 @@ init (const common_options& co,
trace << "loading " << (r ? "remote " : "local ") << f;
}
},
- "--options-file"),
- o);
+ "--options-file",
+ args_pos,
+ 1024));
+
+ // Verify common options.
+ //
+ // Also merge the --*/--no-* options, overriding a less specific flag with
+ // a more specific.
+ //
+ //
+ optional<bool> progress, diag_color;
+ auto merge_no = [&progress, &diag_color] (
+ const O& o,
+ const default_options_entry<O>* e = nullptr)
+ {
+ {
+ if (o.progress () && o.no_progress ())
+ {
+ diag_record dr;
+ (e != nullptr ? dr << fail (e->file) : dr << fail)
+ << "both --progress and --no-progress specified";
+ }
+
+ if (o.progress ())
+ progress = true;
+ else if (o.no_progress ())
+ progress = false;
+ }
+
+ {
+ if (o.diag_color () && o.no_diag_color ())
+ {
+ diag_record dr;
+ (e != nullptr ? dr << fail (e->file) : dr << fail)
+ << "both --diag-color and --no-diag-color specified";
+ }
+
+ if (o.diag_color ())
+ diag_color = true;
+ else if (o.no_diag_color ())
+ diag_color = false;
+ }
+ };
+
+ for (const default_options_entry<O>& e: dos)
+ merge_no (e.options, &e);
+
+ merge_no (o);
+
+ o = merge_options (dos, o);
+
+ if (progress)
+ {
+ o.progress (*progress);
+ o.no_progress (!*progress);
+ }
+
+ if (diag_color)
+ {
+ o.diag_color (*diag_color);
+ o.no_diag_color (!*diag_color);
+ }
+ }
+ catch (const invalid_argument& e)
+ {
+ fail << "unable to load default options files: " << e;
}
catch (const pair<path, system_error>& e)
{
@@ -296,21 +594,9 @@ init (const common_options& co,
if (tmp)
init_tmp (dir_path (cfg_dir (&o)));
- return o;
-}
+ keep_tmp = o.keep_tmp ();
-// Print backtrace if terminating due to an unhandled exception. Note that
-// custom_terminate is non-static and not a lambda to reduce the noise.
-//
-static terminate_handler default_terminate;
-
-void
-custom_terminate ()
-{
- *diag_stream << backtrace ();
-
- if (default_terminate != nullptr)
- default_terminate ();
+ return o;
}
int bpkg::
@@ -321,45 +607,33 @@ try
default_terminate = set_terminate (custom_terminate);
- stderr_term = fdterm (stderr_fd ());
- exec_dir = path (argv[0]).directory ();
-
- // This is a little hack to make our baseutils for Windows work when called
- // with absolute path. In a nutshell, MSYS2's exec*p() doesn't search in the
- // parent's executable directory, only in PATH. And since we are running
- // without a shell (that would read /etc/profile which sets PATH to some
- // sensible values), we are only getting Win32 PATH values. And MSYS2 /bin
- // is not one of them. So what we are going to do is add /bin at the end of
- // PATH (which will be passed as is by the MSYS2 machinery). This will make
- // MSYS2 search in /bin (where our baseutils live). And for everyone else
- // this should be harmless since it is not a valid Win32 path.
- //
-#ifdef _WIN32
+ if (fdterm (stderr_fd ()))
{
- string mp;
- if (optional<string> p = getenv ("PATH"))
- {
- mp = move (*p);
- mp += ';';
- }
- mp += "/bin";
+ stderr_term = std::getenv ("TERM");
- setenv ("PATH", mp);
- }
+ stderr_term_color =
+#ifdef _WIN32
+ // For now we disable color on Windows since it's unclear if/where/how
+ // it is supported. Maybe one day someone will figure this out.
+ //
+ false
+#else
+ // This test was lifted from GCC (Emacs shell sets TERM=dumb).
+ //
+ *stderr_term != nullptr && strcmp (*stderr_term, "dumb") != 0
#endif
+ ;
+ }
+
+ exec_dir = path (argv[0]).directory ();
+ build2_argv0 = argv[0];
- // On POSIX ignore SIGPIPE which is signaled to a pipe-writing process if
- // the pipe reading end is closed. Note that by default this signal
- // terminates a process. Also note that there is no way to disable this
- // behavior on a file descriptor basis or for the write() function call.
+ // Note that this call sets PATH to include our baseutils /bin on Windows
+ // and ignores SIGPIPE.
//
-#ifndef _WIN32
- if (signal (SIGPIPE, SIG_IGN) == SIG_ERR)
- fail << "unable to ignore broken pipe (SIGPIPE) signal: "
- << system_error (errno, generic_category ()); // Sanitize.
-#endif
+ build2::init_process ();
- argv_file_scanner argv_scan (argc, argv, "--options-file");
+ argv_file_scanner argv_scan (argc, argv, "--options-file", false, args_pos);
group_scanner scan (argv_scan);
// First parse common options and --version/--help.
@@ -372,21 +646,22 @@ try
cout << "bpkg " << BPKG_VERSION_ID << endl
<< "libbpkg " << LIBBPKG_VERSION_ID << endl
<< "libbutl " << LIBBUTL_VERSION_ID << endl
+ << "host " << host_triplet << endl
<< "Copyright (c) " << BPKG_COPYRIGHT << "." << endl
<< "This is free software released under the MIT license." << endl;
return 0;
}
- strings argsv; // To be filled by parse() above.
- vector_scanner vect_args (argsv);
- group_scanner args (vect_args);
+ strings argsv; // To be filled by init() above.
+ vector_scanner scanv (argsv);
+ group_scanner args (scanv);
const common_options& co (o);
if (o.help ())
return help (init<help_options> (co,
scan,
- argsv,
+ argsv, scanv,
"help",
false /* keep_sep */,
false /* tmp */),
@@ -419,7 +694,7 @@ try
{
ho = init<help_options> (co,
scan,
- argsv,
+ argsv, scanv,
"help",
false /* keep_sep */,
false /* tmp */);
@@ -468,6 +743,7 @@ try
// r = pkg_verify (init<pkg_verify_options> (co,
// scan,
// argsv,
+ // scanv,
// "pkg-verify",
// false /* keep_sep */,
// true /* tmp */),
@@ -485,6 +761,7 @@ try
r = NP##CMD (init<NP##CMD##_options> (co, \
scan, \
argsv, \
+ scanv, \
SP#CMD, \
SEP, \
TMP), \
@@ -498,58 +775,73 @@ try
#define CFG_COMMAND(CMD, TMP) COMMAND_IMPL(cfg_, "cfg-", CMD, false, TMP)
CFG_COMMAND (create, false); // Temp dir initialized manually.
+ CFG_COMMAND (info, true);
+ CFG_COMMAND (link, true);
+ CFG_COMMAND (unlink, true);
// pkg-* commands
//
-#define PKG_COMMAND(CMD, SEP) COMMAND_IMPL(pkg_, "pkg-", CMD, SEP, true)
+#define PKG_COMMAND(CMD, SEP, TMP) COMMAND_IMPL(pkg_, "pkg-", CMD, SEP, TMP)
// These commands need the '--' separator to be kept in args.
//
- PKG_COMMAND (build, true);
- PKG_COMMAND (clean, true);
- PKG_COMMAND (configure, true);
- PKG_COMMAND (install, true);
- PKG_COMMAND (test, true);
- PKG_COMMAND (uninstall, true);
- PKG_COMMAND (update, true);
-
- PKG_COMMAND (checkout, false);
- PKG_COMMAND (disfigure, false);
- PKG_COMMAND (drop, false);
- PKG_COMMAND (fetch, false);
- PKG_COMMAND (purge, false);
- PKG_COMMAND (status, false);
- PKG_COMMAND (unpack, false);
- PKG_COMMAND (verify, false);
+ PKG_COMMAND (bindist, true, true);
+ PKG_COMMAND (build, true, false);
+ PKG_COMMAND (clean, true, true);
+ PKG_COMMAND (configure, true, true);
+ PKG_COMMAND (install, true, true);
+ PKG_COMMAND (test, true, true);
+ PKG_COMMAND (uninstall, true, true);
+ PKG_COMMAND (update, true, true);
+
+ PKG_COMMAND (checkout, false, true);
+ PKG_COMMAND (disfigure, false, true);
+ PKG_COMMAND (drop, false, true);
+ PKG_COMMAND (fetch, false, true);
+ PKG_COMMAND (purge, false, true);
+ PKG_COMMAND (status, false, true);
+ PKG_COMMAND (unpack, false, true);
+ PKG_COMMAND (verify, false, true);
// rep-* commands
//
-#define REP_COMMAND(CMD) COMMAND_IMPL(rep_, "rep-", CMD, false, true)
+#define REP_COMMAND(CMD, TMP) COMMAND_IMPL(rep_, "rep-", CMD, false, TMP)
- REP_COMMAND (add);
- REP_COMMAND (create);
- REP_COMMAND (fetch);
- REP_COMMAND (info);
- REP_COMMAND (list);
- REP_COMMAND (remove);
+ REP_COMMAND (add, true);
+ REP_COMMAND (create, true);
+ REP_COMMAND (fetch, true);
+ REP_COMMAND (info, false);
+ REP_COMMAND (list, true);
+ REP_COMMAND (remove, true);
assert (false);
fail << "unhandled command";
}
- // Derived from failed and so needs to be caught first.
- //
- catch (const recoverable&)
+ catch (const failed& e)
{
- r = 2;
+ r = e.code;
break;
}
- catch (const failed&)
+
+ // Shutdown the build2 scheduler if it was initialized.
+ //
+ if (build2_sched.started ())
+ build2_sched.shutdown ();
+
+ if (!keep_tmp)
{
- r = 1;
- break;
+ clean_tmp (true /* ignore_error */);
}
+ else if (verb > 1)
+ {
+ for (const auto& d: tmp_dirs)
+ {
+ const dir_path& td (d.second);
- clean_tmp (true /* ignore_error */);
+ if (exists (td))
+ info << "keeping temporary directory " << td;
+ }
+ }
if (r != 0)
return r;
@@ -567,22 +859,22 @@ try
return 0;
}
-catch (const failed&)
+catch (const failed& e)
{
- return 1; // Diagnostics has already been issued.
+ return e.code; // Diagnostics has already been issued.
}
catch (const cli::exception& e)
{
error << e;
return 1;
}
-/*
+#if 0
catch (const std::exception& e)
{
error << e;
return 1;
}
-*/
+#endif
int
main (int argc, char* argv[])
diff --git a/bpkg/bpkg.hxx b/bpkg/bpkg.hxx
new file mode 100644
index 0000000..1ebbf85
--- /dev/null
+++ b/bpkg/bpkg.hxx
@@ -0,0 +1,35 @@
+// file : bpkg/bpkg.hxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#ifndef BPKG_BPKG_HXX
+#define BPKG_BPKG_HXX
+
+// Embedded build system driver.
+//
+#include <libbuild2/context.hxx>
+#include <libbuild2/scheduler.hxx>
+#include <libbuild2/file-cache.hxx>
+
+#include <bpkg/types.hxx>
+#include <bpkg/utility.hxx>
+
+#include <bpkg/common-options.hxx>
+
+namespace bpkg
+{
+ // These are initialized by build2_init().
+ //
+ extern strings build2_cmd_vars;
+ extern build2::scheduler build2_sched;
+ extern build2::global_mutexes build2_mutexes;
+ extern build2::file_cache build2_fcache;
+
+ // Use build2_sched.started() to check if already initialized. Note that the
+ // scheduler is pre-tuned for serial execution.
+ //
+ //
+ void
+ build2_init (const common_options&);
+}
+
+#endif // BPKG_BPKG_HXX
diff --git a/bpkg/buildfile b/bpkg/buildfile
index cb09ca9..0ba60dc 100644
--- a/bpkg/buildfile
+++ b/bpkg/buildfile
@@ -8,17 +8,33 @@
define xml: file
xml{*}: extension = xml
-import libs = libbpkg%lib{bpkg}
+import libs = build2%lib{build2}
+
+# NOTE: see also module loading in bpkg.cxx if adding anything here.
+#
+for m: bash bin c cc cli cxx in version
+ import libs += build2%lib{build2-$m}
+
+import libs += libbpkg%lib{bpkg}
import libs += libbutl%lib{butl}
import libs += libodb%lib{odb}
import libs += libodb-sqlite%lib{odb-sqlite}
+# @@ TMP Only required for the database migrations to schema versions 13 and
+# 14.
+#
+import libs += libsqlite3%lib{sqlite3}
+
options_topics = \
bpkg-options \
cfg-create-options \
+cfg-info-options \
+cfg-link-options \
+cfg-unlink-options \
common-options \
configuration-options \
help-options \
+pkg-bindist-options \
pkg-build-options \
pkg-checkout-options \
pkg-clean-options \
@@ -82,8 +98,13 @@ for t: cxx{**.test...}
# Build options.
#
-obj{utility}: cxx.poptions += -DBPKG_EXE_PREFIX='"'$bin.exe.prefix'"' \
--DBPKG_EXE_SUFFIX='"'$bin.exe.suffix'"'
+
+#cxx.poptions += -DBPKG_OUTPROC_CONFIGURE
+
+obj{utility}: cxx.poptions += \
+"-DBPKG_EXE_PREFIX=\"$bin.exe.prefix\"" \
+"-DBPKG_EXE_SUFFIX=\"$bin.exe.suffix\"" \
+"-DBPKG_HOST_TRIPLET=\"$cxx.target\""
# Pass the copyright notice extracted from the LICENSE file.
#
@@ -92,7 +113,7 @@ copyright = $process.run_regex( \
'Copyright \(c\) (.+) \(see the AUTHORS and LEGAL files\)\.', \
'\1')
-obj{bpkg}: cxx.poptions += -DBPKG_COPYRIGHT=\"$copyright\"
+obj{bpkg}: cxx.poptions += "-DBPKG_COPYRIGHT=\"$copyright\""
# Disable "unknown pragma" warnings.
#
@@ -110,10 +131,34 @@ switch $cxx.class
if ($cxx.id == 'msvc' && $cxx.version.major == 19 && $cxx.version.minor < 10)
cxx.coptions += /wd4503
-# Make sure backtrace includes function names.
+# Similar option to the build system driver.
#
-if ($cxx.target.class == 'linux')
- cxx.loptions += -rdynamic
+switch $cxx.target.class
+{
+ case 'linux'
+ {
+ # Make sure backtrace includes function names.
+ #
+ if ($cc.stdlib == 'glibc')
+ cxx.loptions += -rdynamic
+
+ # Make sure we use RPATH and not RUNPATH since the latter messes up
+ # dlopen().
+ #
+ cxx.loptions += -Wl,--disable-new-dtags
+ }
+ case 'windows'
+ {
+ # Adjust stack size (affects all threads).
+ #
+ # 8M 4M
+ stack_size = ($cxx.target.cpu == "x86_64" ? 8388608 : 4194304)
+
+ cxx.loptions += ($cxx.target.system == 'win32-msvc' \
+ ? "/STACK:$stack_size" \
+ : "-Wl,--stack,$stack_size")
+ }
+}
# Generated options parser.
#
@@ -131,6 +176,7 @@ if $cli.configured
# pkg-* command.
#
+ cli.cxx{pkg-bindist-options}: cli{pkg-bindist}
cli.cxx{pkg-build-options}: cli{pkg-build}
cli.cxx{pkg-checkout-options}: cli{pkg-checkout}
cli.cxx{pkg-clean-options}: cli{pkg-clean}
@@ -150,6 +196,9 @@ if $cli.configured
# cfg-* command.
#
cli.cxx{cfg-create-options}: cli{cfg-create}
+ cli.cxx{cfg-info-options}: cli{cfg-info}
+ cli.cxx{cfg-link-options}: cli{cfg-link}
+ cli.cxx{cfg-unlink-options}: cli{cfg-unlink}
# rep-* command.
#
@@ -169,21 +218,36 @@ if $cli.configured
# Option length must be the same to get commands/topics/options aligned.
#
+ # Need global --suppress-undocumented because of few undocumented options
+ # in common.cli.
+ #
+ #
cli.options += --std c++11 -I $src_root --include-with-brackets \
--include-prefix bpkg --guard-prefix BPKG \
--cxx-prologue "#include <bpkg/types-parsers.hxx>" --cli-namespace bpkg::cli \
--generate-vector-scanner --generate-file-scanner --generate-group-scanner \
--keep-separator --generate-specifier --generate-parse --generate-merge \
---page-usage 'bpkg::print_$name$_' --ansi-color --include-base-last \
---option-length 24
+--page-usage 'bpkg::print_$name$_' --ansi-color --ascii-tree \
+--include-base-last --suppress-undocumented --option-length 25
- cli.cxx{common-options}: cli.options += --short-usage --long-usage # Both.
- cli.cxx{bpkg-options}: cli.options += --short-usage --suppress-undocumented
+ # Both --*-usage options.
+ #
+ cli.cxx{common-options}: cli.options += --short-usage --long-usage \
+--generate-modifier
+
+ cli.cxx{bpkg-options}: cli.options += --short-usage
cli.options += --long-usage # All other pages -- long usage.
- cli.cxx{pkg-build-options}: cli.options += --class-doc \
-bpkg::pkg_build_pkg_options=exclude-base --generate-modifier
+ cli.cxx{pkg-build-options}: cli.options += --generate-modifier \
+--class-doc bpkg::pkg_build_pkg_options=exclude-base
+
+ cli.cxx{pkg-bindist-options}: cli.options += \
+--class-doc bpkg::pkg_bindist_common_options=exclude-base \
+--class-doc bpkg::pkg_bindist_debian_options=exclude-base \
+--class-doc bpkg::pkg_bindist_fedora_options=exclude-base \
+--class-doc bpkg::pkg_bindist_archive_options=exclude-base \
+--omit-link-check --link-regex '%#.+%%'
# Avoid generating CLI runtime and empty inline file for help topics.
#
diff --git a/bpkg/cfg-create.cli b/bpkg/cfg-create.cli
index 0b32604..990bd08 100644
--- a/bpkg/cfg-create.cli
+++ b/bpkg/cfg-create.cli
@@ -51,6 +51,40 @@ namespace bpkg
\
bpkg create cxx. \"?cli\"
\
+
+ Configurations can be linked with each other to allow a package to be
+ built in one configuration while its dependencies in one or more linked
+ configurations. This can be used to create a \"base\" configuration with
+ common dependencies that are shared between multiple configurations. This
+ mechanism is also used to provide a host configuration that is used to
+ build build-time dependencies.
+
+ Each configuration is assigned an automatically-generated UUID unless one
+ is specified with the \cb{--uuid} option. This UUID is used to check the
+ integrity of configuration links. For convenience of referring to linked
+ configurations, a configuration can also be assigned a name with the
+ \cb{--name} option.
+
+ A configuration also has a type specified with the \cb{--type} option.
+ Three predefined types are \cb{target}, \cb{host}, and \cb{build2}. If
+ the type is not specified explicitly, then \cb{target} is assumed. When
+ satisfying a dependency of one package on another, a linked configuration
+ will only be considered if (1) it has the same type as the other
+ configuration for run-time dependencies, (2) it has the \cb{host} type
+ for regular build-time dependencies, and (3) it has the \cb{build2} type
+ for build system module build-time dependencies. Note that a host
+ configuration is a target configuration for the host machine. So to
+ create a self-hosted configuration, use type \cb{host}.
+
+ To link a configuration we use the \l{bpkg-cfg-link(1)} command. As a
+ shortcut, host and build system module configurations can also be linked
+ during the configuration creation with the \cb{--host-config} and
+ \cb{--build2-config} options, respectively. If a build-time dependency is
+ encountered in a configuration that has no linked configuration of a
+ suitable type (\cb{host} or \cb{build2}, nor is itself of a suitable
+ type), then a private host or build system module configuration named
+ \cb{host} or \cb{build2}, respectively, is created automatically inside
+ the configuration's \c{.bpkg/} subdirectory.
"
}
@@ -77,6 +111,54 @@ namespace bpkg
configuration. For safety, this option requires that you specify the
configuration directory explicitly with \cb{--directory|-d}."
}
+
+ dir_path --host-config
+ {
+ "<dir>",
+ "Link the specified host configuration with the configuration being
+ created as if by running the \l{bpkg-cfg-link(1)} command."
+ }
+
+ bool --no-host-config
+ {
+ "Ignore any specified \cb{--host-config} options."
+ }
+
+ dir_path --build2-config
+ {
+ "<dir>",
+ "Link the specified build system module configuration with the
+ configuration being created as if by running the \l{bpkg-cfg-link(1)}
+ command."
+ }
+
+ bool --no-build2-config
+ {
+ "Ignore any specified \cb{--build2-config} options."
+ }
+
+ string --name
+ {
+ "<name>",
+ "The name of the configuration being created. If this configuration is
+ linked with another configuration using \l{bpkg-cfg-link(1)}, this name
+ will be used as the link name unless overridden. By default the
+ configuration is created unnamed."
+ }
+
+ string --type = "target"
+ {
+ "<type>",
+ "The type of the configuration being created. By default, configuration
+ of type \cb{target} is created."
+ }
+
+ uuid_type --uuid
+ {
+ "<uuid>",
+ "Use the specified UUID as the configuration id instead of generating
+ one automatically."
+ }
};
"
diff --git a/bpkg/cfg-create.cxx b/bpkg/cfg-create.cxx
index f3ca80d..f125e43 100644
--- a/bpkg/cfg-create.cxx
+++ b/bpkg/cfg-create.cxx
@@ -8,29 +8,72 @@
#include <bpkg/database.hxx>
#include <bpkg/diagnostics.hxx>
+#include <bpkg/cfg-link.hxx>
+
using namespace std;
+using namespace butl;
namespace bpkg
{
- int
- cfg_create (const cfg_create_options& o, cli::scanner& args)
+ shared_ptr<configuration>
+ cfg_create (const common_options& o,
+ const dir_path& c,
+ optional<string> name,
+ string type,
+ const strings& mods,
+ const strings& vars,
+ bool existing,
+ bool wipe,
+ optional<uuid> uid,
+ const optional<dir_path>& host_config,
+ const optional<dir_path>& build2_config)
{
tracer trace ("cfg_create");
- if (o.existing () && o.wipe ())
- fail << "both --existing|-e and --wipe specified";
+ // Stash and restore the current transaction, if any.
+ //
+ namespace sqlite = odb::sqlite;
- if (o.wipe () && !o.directory_specified ())
- fail << "--wipe requires explicit --directory|-d";
+ sqlite::transaction* ct (nullptr);
+ if (sqlite::transaction::has_current ())
+ {
+ ct = &sqlite::transaction::current ();
+ sqlite::transaction::reset_current ();
+ }
- dir_path c (o.directory ());
- l4 ([&]{trace << "creating configuration in " << c;});
+ auto tg (make_guard ([ct] ()
+ {
+ if (ct != nullptr)
+ sqlite::transaction::current (*ct);
+ }));
+
+ // First, let's verify the host/build2 configurations existence and types
+ // and normalize their paths.
+ //
+ auto norm = [&trace] (const dir_path& d, const string& t)
+ {
+ dir_path r (normalize (d, string (t + " configuration").c_str ()));
+ database db (r, trace, false /* pre_attach */);
+ if (db.type != t)
+ fail << t << " configuration " << r << " is of '" << db.type
+ << "' type";
+
+ return r;
+ };
+
+ optional<dir_path> hc (host_config
+ ? norm (*host_config, host_config_type)
+ : optional<dir_path> ());
+
+ optional<dir_path> bc (build2_config
+ ? norm (*build2_config, build2_config_type)
+ : optional<dir_path> ());
// Verify the existing directory is compatible with our mode.
//
if (exists (c))
{
- if (o.existing ())
+ if (existing)
{
// Bail if the .bpkg/ directory already exists and is not empty.
//
@@ -49,7 +92,7 @@ namespace bpkg
//
if (!empty (c))
{
- if (!o.wipe ())
+ if (!wipe)
fail << "directory " << c << " is not empty" <<
info << "use --wipe to clean it up but be careful";
@@ -65,29 +108,9 @@ namespace bpkg
mk_p (c);
}
- // Sort arguments into modules and configuration variables.
- //
- strings mods;
- strings vars;
- while (args.more ())
- {
- string a (args.next ());
-
- if (a.find ('=') != string::npos)
- {
- vars.push_back (move (a));
- }
- else if (!a.empty ())
- {
- mods.push_back (move (a));
- }
- else
- fail << "empty string as argument";
- }
-
// Create and configure.
//
- if (o.existing ())
+ if (existing)
{
if (!mods.empty ())
fail << "module '" << mods[0] << "' specified with --existing|-e";
@@ -99,7 +122,7 @@ namespace bpkg
{
// Assemble the build2 create meta-operation parameters.
//
- string params ("'" + c.representation () + "'");
+ string params ('\'' + c.representation () + '\'');
if (!mods.empty ())
{
params += ',';
@@ -112,7 +135,7 @@ namespace bpkg
// Run quiet. Use path representation to get canonical trailing slash.
//
- run_b (o, verb_b::quiet, vars, "create(" + params + ")");
+ run_b (o, verb_b::quiet, vars, "create(" + params + ')');
}
// Create .bpkg/ and its subdirectories.
@@ -149,7 +172,20 @@ namespace bpkg
// Create the database.
//
- database db (open (c, trace, true));
+ shared_ptr<configuration> r (make_shared<configuration> (move (name),
+ move (type),
+ uid));
+
+ dir_paths pre_link;
+
+ if (hc)
+ pre_link.push_back (*hc);
+
+ if (bc)
+ pre_link.push_back (*bc);
+
+ database db (c, r, trace, pre_link);
+ transaction t (db);
// Add the special, root repository object with empty location and
// containing a single repository fragment having an empty location as
@@ -161,31 +197,103 @@ namespace bpkg
// locations and as a search starting point for held packages (see
// pkg-build for details).
//
- transaction t (db);
-
shared_ptr<repository_fragment> fr (
make_shared<repository_fragment> (repository_location ()));
db.persist (fr);
- shared_ptr<repository> r (
+ shared_ptr<repository> rep (
make_shared<repository> (repository_location ()));
- r->fragments.push_back (
+ rep->fragments.push_back (
repository::fragment_type {string () /* friendly_name */, move (fr)});
- db.persist (r);
+ db.persist (rep);
+
+ if (hc)
+ cfg_link (db, *hc, host_config->relative (), nullopt /* name */);
+
+ if (bc)
+ cfg_link (db, *bc, build2_config->relative (), nullopt /* name */);
t.commit ();
+ return r;
+ }
+
+ int
+ cfg_create (const cfg_create_options& o, cli::scanner& args)
+ {
+ tracer trace ("cfg_create");
+
+ if (o.name_specified ())
+ validate_configuration_name (o.name (), "--name option value");
+
+ if (o.type ().empty ())
+ fail << "empty --type option value";
+
+ if (o.existing () && o.wipe ())
+ fail << "both --existing|-e and --wipe specified";
+
+ if (o.wipe () && !o.directory_specified ())
+ fail << "--wipe requires explicit --directory|-d";
+
+ dir_path c (o.directory ());
+ l4 ([&]{trace << "creating configuration in " << c;});
+
+ // Sort arguments into modules and configuration variables.
+ //
+ strings mods;
+ strings vars;
+ while (args.more ())
+ {
+ string a (args.next ());
+
+ if (a.find ('=') != string::npos)
+ vars.push_back (move (trim (a)));
+ else if (!a.empty ())
+ mods.push_back (move (a));
+ else
+ fail << "empty string as argument";
+ }
+
+ // Auto-generate the configuration UUID, unless it is specified
+ // explicitly.
+ //
+ shared_ptr<configuration> cf (
+ cfg_create (
+ o,
+ c,
+ o.name_specified () ? o.name () : optional<string> (),
+ o.type (),
+ mods,
+ vars,
+ o.existing (),
+ o.wipe (),
+ o.uuid_specified () ? o.uuid () : optional<uuid> (),
+ (o.host_config_specified () && !o.no_host_config ()
+ ? o.host_config ()
+ : optional<dir_path> ()),
+ (o.build2_config_specified () && !o.no_build2_config ()
+ ? o.build2_config ()
+ : optional<dir_path> ())));
+
if (verb && !o.no_result ())
{
normalize (c, "configuration");
+ diag_record dr (text);
+
if (o.existing ())
- text << "initialized existing configuration in " << c;
+ dr << "initialized existing configuration in " << c << '\n';
else
- text << "created new configuration in " << c;
+ dr << "created new configuration in " << c << '\n';
+
+ dr << " uuid: " << cf->uuid << '\n'
+ << " type: " << cf->type;
+
+ if (cf->name)
+ dr << "\n name: " << *cf->name;
}
return 0;
diff --git a/bpkg/cfg-create.hxx b/bpkg/cfg-create.hxx
index 9831160..b861732 100644
--- a/bpkg/cfg-create.hxx
+++ b/bpkg/cfg-create.hxx
@@ -5,6 +5,7 @@
#define BPKG_CFG_CREATE_HXX
#include <bpkg/types.hxx>
+#include <bpkg/forward.hxx> // configuration
#include <bpkg/utility.hxx>
#include <bpkg/cfg-create-options.hxx>
@@ -14,6 +15,27 @@ namespace bpkg
int
cfg_create (const cfg_create_options&, cli::scanner& args);
+ // Create a new bpkg configuration, initialize its database (add self-link,
+ // root repository, etc), and return this configuration information. See
+ // bpkg-cfg-create(1) for arguments semantics.
+ //
+ // If there is a current transaction already open, then stash it before the
+ // database initialization and restore it afterwards (used to create private
+ // configuration on demand).
+ //
+ shared_ptr<configuration>
+ cfg_create (const common_options&,
+ const dir_path&,
+ optional<string> name,
+ string type,
+ const strings& mods,
+ const strings& vars,
+ bool existing,
+ bool wipe,
+ optional<uuid> uid = nullopt,
+ const optional<dir_path>& host_config = nullopt,
+ const optional<dir_path>& build2_config = nullopt);
+
default_options_files
options_files (const char* cmd,
const cfg_create_options&,
diff --git a/bpkg/cfg-info.cli b/bpkg/cfg-info.cli
new file mode 100644
index 0000000..8801ec9
--- /dev/null
+++ b/bpkg/cfg-info.cli
@@ -0,0 +1,103 @@
+// file : bpkg/cfg-info.cli
+// license : MIT; see accompanying LICENSE file
+
+include <bpkg/configuration.cli>;
+
+"\section=1"
+"\name=bpkg-cfg-info"
+"\summary=print configuration information"
+
+namespace bpkg
+{
+ {
+ "<options> <dir>",
+
+ "\h|SYNOPSIS|
+
+ \c{\b{bpkg cfg-info} [<options>]}
+
+ \h|DESCRIPTION|
+
+ The \cb{cfg-info} command prints the current configuration's absolute
+ path, id, type, and name. If the \cb{--link} and/or \cb{--backlink}
+ options are specified, then this information is also printed for each
+ linked and/or implicitly backlinked configuration, if any. Note that the
+ dangling implicit backlinks are silently skipped, unless \cb{--dangling}
+ is specified, in which case this information is also printed for them.
+ Note that the information is written to \cb{stdout}, not \cb{stderr}.
+
+ If the \cb{--recursive} option is specified together with \cb{--link}
+ and/or \cb{--backlink}, then this information is printed for linked
+ and/or implicitly backlinked configuration, recursively.
+
+ The output format is regular with each value printed on a separate line
+ and prefixed with the value name. If the \cb{--link}, \cb{--backlink},
+ and/or \cb{--dangling} options are specified, then information blocks
+ corresponding to linked configurations are separated with blank
+ lines. For example:
+
+ \
+ path: /path/to/cfg/
+ uuid: 8d439f03-7342-4502-8b1c-74b173869478
+ type: target
+ name: foo
+
+ path: /path/to/host-cfg/
+ uuid: 7ee4dab3-07d9-4163-81c0-3779166a7213
+ type: host
+ name: tools
+
+ path: /path/to/build2-cfg/
+ uuid: d453aa2a-92c4-4066-87e4-c8672eed06e1
+ type: build2
+ name: modules
+ \
+ "
+ }
+
+ class cfg_info_options: configuration_options
+ {
+ "\h|CFG-INFO OPTIONS|"
+
+ bool --link
+ {
+ "Print linked configurations."
+ }
+
+ bool --backlink
+ {
+ "Print implicitly backlinked configurations."
+ }
+
+ bool --dangling
+ {
+ "Print dangling implicit backlinks."
+ }
+
+ bool --recursive
+ {
+ "Print linked configurations recursively."
+ }
+ };
+
+ "
+ \h|DEFAULT OPTIONS FILES|
+
+ See \l{bpkg-default-options-files(1)} for an overview of the default
+ options files. For the \cb{cfg-info} command the search start directory is
+ the configuration directory. The following options files are searched for
+ in each directory and, if found, loaded in the order listed:
+
+ \
+ bpkg.options
+ bpkg-cfg-info.options
+ \
+
+ The following \cb{cfg-info} command options cannot be specified in the
+ default options files:
+
+ \
+ --directory|-d
+ \
+ "
+}
diff --git a/bpkg/cfg-info.cxx b/bpkg/cfg-info.cxx
new file mode 100644
index 0000000..fc65b7b
--- /dev/null
+++ b/bpkg/cfg-info.cxx
@@ -0,0 +1,129 @@
+// file : bpkg/cfg-info.cxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#include <bpkg/cfg-info.hxx>
+
+#include <set>
+#include <iostream> // cout
+
+#include <bpkg/package.hxx>
+#include <bpkg/package-odb.hxx>
+#include <bpkg/database.hxx>
+#include <bpkg/diagnostics.hxx>
+
+using namespace std;
+
+namespace bpkg
+{
+ int
+ cfg_info (const cfg_info_options& o, cli::scanner&)
+ {
+ tracer trace ("cfg_info");
+
+ dir_path c (o.directory ());
+ l4 ([&]{trace << "configuration: " << c;});
+
+ if (o.recursive () && !o.link () && !o.backlink ())
+ fail << "--recursive requires --link or --backlink";
+
+ try
+ {
+ cout.exceptions (ostream::badbit | ostream::failbit);
+
+ // Return false if the configuration information has already been
+ // printed and print the information and return true otherwise.
+ //
+ auto print = [first = true,
+ printed = set<dir_path> {}]
+ (const dir_path& path,
+ const uuid& uid,
+ const string& type,
+ const optional<string>& name) mutable
+ {
+ if (!printed.insert (path).second)
+ return false;
+
+ if (!first)
+ cout << endl;
+ else
+ first = false;
+
+ cout << "path: " << path << endl
+ << "uuid: " << uid << endl
+ << "type: " << type << endl
+ << "name: " << (name ? *name : "") << endl;
+
+ return true;
+ };
+
+ using query = odb::query<configuration>;
+
+ query q (false);
+
+ if (o.link ())
+ q = q || query::expl;
+
+ if (o.backlink () || o.dangling ())
+ q = q || (!query::expl && query::id != 0);
+
+ // Make the output consistent across runs.
+ //
+ q = q + "ORDER BY" + query::id;
+
+ auto print_db = [&o, &q, &print] (database& db,
+ bool links,
+ const auto& print_db)
+ {
+ if (!print (db.config, db.uuid, db.type, db.name))
+ return;
+
+ if (links)
+ {
+ for (auto& c: db.query<configuration> (q))
+ {
+ const dir_path& d (c.make_effective_path (db.config));
+
+ auto print_link = [&o, &db, &c, &print_db] ()
+ {
+ database& ldb (db.attach (c.path));
+ db.verify_link (c, ldb);
+
+ // While at it, also verify the backlink.
+ //
+ if (c.expl)
+ db.backlink (ldb);
+
+ print_db (ldb, o.recursive (), print_db);
+ };
+
+ if (c.expl)
+ {
+ if (o.link ())
+ print_link ();
+ }
+ else if (exists (d))
+ {
+ if (o.backlink ())
+ print_link ();
+ }
+ else if (o.dangling ())
+ print (d, c.uuid, c.type, c.name);
+ }
+ }
+ };
+
+ database db (c, trace, false /* pre_attach */);
+ transaction t (db);
+
+ print_db (db, o.link () || o.backlink () || o.dangling (), print_db);
+
+ t.commit ();
+ }
+ catch (const io_error&)
+ {
+ fail << "unable to write to stdout";
+ }
+
+ return 0;
+ }
+}
diff --git a/bpkg/cfg-info.hxx b/bpkg/cfg-info.hxx
new file mode 100644
index 0000000..d4a8818
--- /dev/null
+++ b/bpkg/cfg-info.hxx
@@ -0,0 +1,18 @@
+// file : bpkg/cfg-info.hxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#ifndef BPKG_CFG_INFO_HXX
+#define BPKG_CFG_INFO_HXX
+
+#include <bpkg/types.hxx>
+#include <bpkg/utility.hxx>
+
+#include <bpkg/cfg-info-options.hxx>
+
+namespace bpkg
+{
+ int
+ cfg_info (const cfg_info_options&, cli::scanner& args);
+}
+
+#endif // BPKG_CFG_INFO_HXX
diff --git a/bpkg/cfg-link.cli b/bpkg/cfg-link.cli
new file mode 100644
index 0000000..906a4d5
--- /dev/null
+++ b/bpkg/cfg-link.cli
@@ -0,0 +1,83 @@
+// file : bpkg/cfg-link.cli
+// license : MIT; see accompanying LICENSE file
+
+include <bpkg/configuration.cli>;
+
+"\section=1"
+"\name=bpkg-cfg-link"
+"\summary=link configuration"
+
+namespace bpkg
+{
+ {
+ "<options> <dir>",
+
+ "\h|SYNOPSIS|
+
+ \c{\b{bpkg cfg-link} [<options>] <dir>}
+
+ \h|DESCRIPTION|
+
+ The \cb{cfg-link} command links the specified \cb{bpkg} configuration
+ with the current configuration. Note that it also establishes an implicit
+ backlink from the specified to the current configuration. See
+ \l{bpkg-cfg-create(1)} for background on linked configurations. To unlink
+ previously linked configurations use \l{bpkg-cfg-unlink(1)}.
+
+ The linked configurations are normally referred to using names when
+ specified on the \cb{bpkg} command line. Unless overridden with the
+ \cb{--name} option, the original configuration name is used to name the
+ link. If the link is unnamed, then it can be referred to using the
+ numeric id that is automatically assigned when establishing the link or
+ using the configuration UUID.
+
+ If the specified configuration path is relative, then it is rebased
+ relative to the current configuration directory. This way, when the
+ linked configurations are moved around together, the stored relative
+ paths remain valid. If the specified directory path is absolute, then it
+ is stored as absolute unless the \cb{--relative} option is specified in
+ which case it is also rebased relative to the current configuration
+ directory.
+ "
+ }
+
+ class cfg_link_options: configuration_options
+ {
+ "\h|CFG-LINK OPTIONS|"
+
+ string --name
+ {
+ "<name>",
+ "Alternative link name. If this option is not specified, then the
+ configuration name is used as the link name (see \l{bpkg-cfg-create(1)}
+ for details)."
+ }
+
+ bool --relative
+ {
+ "Rebase the absolute linked configuration path relative to the current
+ configuration directory."
+ }
+ };
+
+ "
+ \h|DEFAULT OPTIONS FILES|
+
+ See \l{bpkg-default-options-files(1)} for an overview of the default
+ options files. For the \cb{cfg-link} command the search start directory is
+ the configuration directory. The following options files are searched for
+ in each directory and, if found, loaded in the order listed:
+
+ \
+ bpkg.options
+ bpkg-cfg-link.options
+ \
+
+ The following \cb{cfg-link} command options cannot be specified in the
+ default options files:
+
+ \
+ --directory|-d
+ \
+ "
+}
diff --git a/bpkg/cfg-link.cxx b/bpkg/cfg-link.cxx
new file mode 100644
index 0000000..1146d22
--- /dev/null
+++ b/bpkg/cfg-link.cxx
@@ -0,0 +1,321 @@
+// file : bpkg/cfg-link.cxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#include <bpkg/cfg-link.hxx>
+
+#include <bpkg/package.hxx>
+#include <bpkg/package-odb.hxx>
+#include <bpkg/database.hxx>
+#include <bpkg/diagnostics.hxx>
+
+using namespace std;
+
+namespace bpkg
+{
+ shared_ptr<configuration>
+ cfg_link (database& db,
+ const dir_path& ld,
+ bool rel,
+ optional<string> name,
+ bool sys_rep)
+ {
+ tracer trace ("cfg_link");
+
+ bool name_specified (name);
+ const dir_path& cd (db.config); // Note: absolute and normalized.
+
+ // Load the self-link object from the database of the configuration being
+ // linked to obtain its name, type, and uuid.
+ //
+ database& ldb (db.attach (ld, sys_rep));
+
+ string type;
+ uuid uid;
+ {
+ shared_ptr<configuration> cf (ldb.load<configuration> (0));
+
+ type = move (cf->type);
+ uid = cf->uuid;
+
+ if (!name)
+ name = move (cf->name);
+ }
+
+ if (db.uuid == uid)
+ fail << "linking configuration " << ld << " with itself" <<
+ info << "uuid: " << uid;
+
+ if (name && name == db.name)
+ fail << "linking configuration " << ld << " using current "
+ << "configuration name '" << *name << "'" <<
+ info << "consider specifying alternative name with --name";
+
+ // Verify that the name and path of the configuration being linked do not
+ // clash with already linked configurations. Fail if configurations with
+ // this uuid is already linked unless the link is implicit, in which case
+ // make it explicit and update its name and path.
+ //
+ // Note that when we make an implicit link explicit, we start treating it
+ // as an implicit and explicit simultaneously. So, for example, for cfg1
+ // the link cfg2 is explicit and the link cfg3 is both explicit and
+ // implicit:
+ //
+ // cfg2 <- cfg1 <-> cfg3
+ //
+ // Similar, if we link cfg1 with cfg2, the explicit link cfg2 in cfg1 also
+ // becomes both explicit and implicit, not being amended directly.
+ //
+ shared_ptr<configuration> lcf;
+
+ using query = query<configuration>;
+
+ for (shared_ptr<configuration> lc:
+ pointer_result (db.query<configuration> (query::id != 0)))
+ {
+ if (uid == lc->uuid)
+ {
+ if (lc->expl)
+ fail << "configuration with uuid " << uid << " is already linked "
+ << "as " << lc->path;
+
+ // Verify the existing implicit link integrity and cache it to update
+ // later, when the name/path clash check is complete.
+ //
+ db.verify_link (*lc, ldb);
+
+ lcf = move (lc);
+ continue;
+ }
+
+ if (ld == lc->effective_path (cd))
+ fail << "configuration with path " << ld << " is already linked";
+
+ // If the name clashes, then fail if it was specified by the user and
+ // issue a warning and link the configuration as unnamed otherwise.
+ //
+ if (name && name == lc->name)
+ {
+ diag_record dr (name_specified ? error : warn);
+ dr << "configuration with name " << *name << " is already linked as "
+ << lc->path;
+
+ if (name_specified)
+ {
+ dr << info << "consider specifying alternative name with --name"
+ << endf;
+ }
+ else
+ {
+ dr << ", linking as unnamed";
+ name = nullopt;
+ }
+ }
+ }
+
+ // If requested, rebase the first path relative to the second or return it
+ // as is otherwise. Fail if the rebase is not possible (e.g., paths are on
+ // different drives on Windows).
+ //
+ auto rebase = [rel] (const dir_path& x, const dir_path& y) -> dir_path
+ {
+ try
+ {
+ return rel ? x.relative (y) : x;
+ }
+ catch (const invalid_path&)
+ {
+ fail << "unable to rebase " << x << " relative to " << y <<
+ info << "specify absolute configuration directory path to save it "
+ << "as absolute" << endf;
+ }
+ };
+
+ // If the implicit link already exists, then make it explicit and update
+ // its name and path. Otherwise, create a new link.
+ //
+ // Note that in the former case the current configuration must already be
+ // explicitly linked with the configuration being linked. We verify that
+ // and the link integrity.
+ //
+ if (lcf != nullptr)
+ {
+ // Verify the backlink integrity.
+ //
+ shared_ptr<configuration> cf (
+ ldb.query_one<configuration> (query::uuid == db.uuid.string ()));
+
+ // Note: both sides of the link cannot be implicit.
+ //
+ if (cf == nullptr || !cf->expl)
+ fail << "configuration " << ld << " is already implicitly linked but "
+ << "current configuration " << cd << " is not explicitly linked "
+ << "with it";
+
+ ldb.verify_link (*cf, db);
+
+ // Finally, turn the implicit link into explicit.
+ //
+ // Note: reuse id.
+ //
+ lcf->expl = true;
+ lcf->name = move (name);
+ lcf->path = rebase (ld, cd); // Note: can't clash (see above).
+
+ db.update (lcf);
+ }
+ else
+ {
+ // If the directory path of the configuration being linked is relative
+ // or the --relative option is specified, then rebase it relative to the
+ // current configuration directory path.
+ //
+ lcf = make_shared<configuration> (uid,
+ move (name),
+ move (type),
+ rebase (ld, cd),
+ true /* explicit */);
+
+ db.persist (lcf);
+
+ // Now implicitly link ourselves with the just linked configuration.
+ // Note that we link ourselves as unnamed.
+ //
+ shared_ptr<configuration> ccf (db.load<configuration> (0));
+
+ // What if we find the current configuration to already be implicitly
+ // linked? The potential scenario could be, that the current
+ // configuration was recreated from scratch, previously being implicitly
+ // linked with the configuration we currently link. It feels like in
+ // this case we would rather overwrite the existing dead implicit link
+ // than just fail. Let's also warn for good measure.
+ //
+ shared_ptr<configuration> cf;
+
+ for (shared_ptr<configuration> lc:
+ pointer_result (ldb.query<configuration> (query::id != 0)))
+ {
+ if (cd == lc->make_effective_path (ld))
+ {
+ if (lc->expl)
+ fail << "current configuration " << cd << " is already linked "
+ << "with " << ld;
+
+ warn << "current configuration " << cd << " is already implicitly "
+ << "linked with " << ld;
+
+ cf = move (lc);
+ continue;
+ }
+
+ if (ccf->uuid == lc->uuid)
+ fail << "current configuration " << ccf->uuid << " is already "
+ << "linked with " << ld;
+ }
+
+ // It feels natural to persist explicitly and implicitly linked
+ // configuration paths both either relative or absolute.
+ //
+ if (cf != nullptr)
+ {
+ // The dead implicit link case.
+ //
+ // Note: reuse id.
+ //
+ cf->uuid = ccf->uuid;
+ cf->type = move (ccf->type);
+ cf->path = rebase (cd, ld);
+
+ ldb.update (cf);
+ }
+ else
+ {
+ ccf = make_shared<configuration> (ccf->uuid,
+ nullopt /* name */,
+ move (ccf->type),
+ rebase (cd, ld),
+ false /* explicit */);
+
+ ldb.persist (ccf);
+ }
+ }
+
+ // If explicit links of the current database are pre-attached, then also
+ // pre-attach explicit links of the newly linked database.
+ //
+ linked_configs& lcs (db.explicit_links ());
+
+ if (!lcs.empty ())
+ {
+ lcs.push_back (linked_config {*lcf->id, lcf->name, ldb});
+ ldb.attach_explicit (sys_rep);
+ }
+
+ // If the implicit links of the linked database are already cached, then
+ // also cache the current database, unless it is already there (see above
+ // for the dead link case).
+ //
+ linked_databases& lds (ldb.implicit_links (false /* attach */));
+
+ if (!lds.empty () && find (lds.begin (), lds.end (), db) == lds.end ())
+ lds.push_back (db);
+
+ return lcf;
+ }
+
+ int
+ cfg_link (const cfg_link_options& o, cli::scanner& args)
+ try
+ {
+ tracer trace ("cfg_link");
+
+ dir_path c (o.directory ());
+ l4 ([&]{trace << "configuration: " << c;});
+
+ if (o.name_specified ())
+ validate_configuration_name (o.name (), "--name option value");
+
+ if (!args.more ())
+ fail << "configuration directory argument expected" <<
+ info << "run 'bpkg help cfg-link' for more information";
+
+ dir_path ld (args.next ());
+ if (ld.empty ())
+ throw invalid_path (ld.string ());
+
+ l4 ([&]{trace << "link configuration: " << ld;});
+
+ bool rel (ld.relative () || o.relative ());
+ normalize (ld, "specified linked configuration");
+
+ database db (c, trace, false /* pre_attach */, false /* sys_rep */, {ld});
+ transaction t (db);
+
+ shared_ptr<configuration> lc (
+ cfg_link (db,
+ ld,
+ rel,
+ o.name_specified () ? o.name () : optional<string> ()));
+
+ t.commit ();
+
+ if (verb && !o.no_result ())
+ {
+ diag_record dr (text);
+
+ dr << "linked with configuration " << ld << '\n'
+ << " uuid: " << lc->uuid << '\n'
+ << " type: " << lc->type << '\n';
+
+ if (lc->name)
+ dr << " name: " << *lc->name << '\n';
+
+ dr << " id: " << *lc->id;
+ }
+
+ return 0;
+ }
+ catch (const invalid_path& e)
+ {
+ fail << "invalid path: '" << e.path << "'" << endf;
+ }
+}
diff --git a/bpkg/cfg-link.hxx b/bpkg/cfg-link.hxx
new file mode 100644
index 0000000..ee625fa
--- /dev/null
+++ b/bpkg/cfg-link.hxx
@@ -0,0 +1,40 @@
+// file : bpkg/cfg-link.hxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#ifndef BPKG_CFG_LINK_HXX
+#define BPKG_CFG_LINK_HXX
+
+#include <bpkg/types.hxx>
+#include <bpkg/forward.hxx> // configuration
+#include <bpkg/utility.hxx>
+
+#include <bpkg/cfg-link-options.hxx>
+
+namespace bpkg
+{
+ int
+ cfg_link (const cfg_link_options&, cli::scanner& args);
+
+ // Link the configuration specified as the directory path with the current
+ // configuration, attach the linked configuration database, and return the
+ // link. Note that it also establishes an implicit backlink of the current
+ // configuration with the linked one.
+ //
+ // The specified configuration path must be absolute and normalized. If the
+ // relative argument is true, then rebase this path relative to the current
+ // configuration directory path and fail if that's not possible (different
+ // drive on Windows, etc).
+ //
+ // If the current configuration database has its explicit links pre-
+ // attached, then also pre-attach explicit links of the newly linked
+ // database.
+ //
+ shared_ptr<configuration>
+ cfg_link (database&,
+ const dir_path&,
+ bool relative,
+ optional<string> name,
+ bool sys_rep = false);
+}
+
+#endif // BPKG_CFG_LINK_HXX
diff --git a/bpkg/cfg-unlink.cli b/bpkg/cfg-unlink.cli
new file mode 100644
index 0000000..6514882
--- /dev/null
+++ b/bpkg/cfg-unlink.cli
@@ -0,0 +1,81 @@
+// file : bpkg/cfg-unlink.cli
+// license : MIT; see accompanying LICENSE file
+
+include <bpkg/configuration.cli>;
+
+"\section=1"
+"\name=bpkg-cfg-unlink"
+"\summary=unlink configuration"
+
+namespace bpkg
+{
+ {
+ "<options> <dir>",
+
+ "\h|SYNOPSIS|
+
+ \c{\b{bpkg cfg-unlink} [<options>] [<dir>]\n
+ \b{bpkg cfg-unlink} [<options>] \b{--dangling}}
+
+ \h|DESCRIPTION|
+
+ The \cb{cfg-unlink} command unlinks the specified \cb{bpkg} configuration
+ from the current configuration (the first form) or removes dangling
+ implicit backlinks (the second form). See \l{bpkg-cfg-create(1)} for
+ background on linked configurations.
+
+ In the first form the configuration to unlink can be specified either as
+ configuration directory (<dir>), name (\cb{--name}), id (\cb{--id}), or
+ UUID (\cb{--uuid}).
+ "
+ }
+
+ class cfg_unlink_options: configuration_options
+ {
+ "\h|CFG-UNLINK OPTIONS|"
+
+ string --name
+ {
+ "<name>",
+ "Name of the configuration to unlink."
+ }
+
+ uint64_t --id
+ {
+ "<id>",
+ "Numeric id of the configuration to unlink."
+ }
+
+ uuid_type --uuid
+ {
+ "<uuid>",
+ "UUID of the configuration to unlink."
+ }
+
+ bool --dangling
+ {
+ "Remove dangling implicit backlinks."
+ }
+ };
+
+ "
+ \h|DEFAULT OPTIONS FILES|
+
+ See \l{bpkg-default-options-files(1)} for an overview of the default
+ options files. For the \cb{cfg-unlink} command the search start directory
+ is the configuration directory. The following options files are searched
+ for in each directory and, if found, loaded in the order listed:
+
+ \
+ bpkg.options
+ bpkg-cfg-unlink.options
+ \
+
+ The following \cb{cfg-unlink} command options cannot be specified in the
+ default options files:
+
+ \
+ --directory|-d
+ \
+ "
+}
diff --git a/bpkg/cfg-unlink.cxx b/bpkg/cfg-unlink.cxx
new file mode 100644
index 0000000..52d8969
--- /dev/null
+++ b/bpkg/cfg-unlink.cxx
@@ -0,0 +1,292 @@
+// file : bpkg/cfg-unlink.cxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#include <bpkg/cfg-unlink.hxx>
+
+#include <bpkg/package.hxx>
+#include <bpkg/package-odb.hxx>
+#include <bpkg/database.hxx>
+#include <bpkg/diagnostics.hxx>
+
+using namespace std;
+
+namespace bpkg
+{
+ static int
+ cfg_unlink_config (const cfg_unlink_options& o, cli::scanner& args)
+ try
+ {
+ tracer trace ("cfg_unlink_config");
+
+ dir_path c (o.directory ());
+ l4 ([&]{trace << "configuration: " << c;});
+
+ database mdb (c, trace, true /* pre_attach */);
+ transaction t (mdb);
+
+ // Find the configuration to be unlinked.
+ //
+ // Note that we exclude the current configuration from the search.
+ //
+ database& udb (o.name_specified () ? mdb.find_attached (o.name (), false) :
+ o.id_specified () ? mdb.find_attached (o.id (), false) :
+ o.uuid_specified () ? mdb.find_attached (o.uuid (), false) :
+ mdb.find_attached (
+ normalize (dir_path (args.next ()),
+ "specified linked configuration"),
+ false));
+
+ l4 ([&]{trace << "unlink configuration: " << udb.config;});
+
+ bool priv (udb.private_ ());
+
+ // If the configuration being unlinked contains any prerequisites of
+ // packages in other configurations, make sure that they will stay
+ // resolvable for their dependents after the configuration is unlinked
+ // (see _selected_package_ref::to_ptr() for the resolution details).
+ //
+ // Specifically, if the configuration being unlinked is private, make sure
+ // it doesn't contain any prerequisites of any dependents in any other
+ // configurations (since we will remove it). Otherwise, do not consider
+ // those dependent configurations which will still be linked with the
+ // unlinked configuration (directly or indirectly through some different
+ // path).
+ //
+ // So, for example, for the following link chain where cfg1 contains a
+ // dependent of a prerequisite in cfg3, unlinking cfg3 from cfg2 will
+ // result with the "cfg3 still depends on cfg1" error.
+ //
+ // cfg1 (target) -> cfg2 (target) -> cfg3 (host)
+ //
+ {
+ // Note: needs to come before the subsequent unlinking.
+ //
+ // Also note that this call also verifies integrity of the implicit
+ // links of the configuration being unlinked, which we rely upon below.
+ //
+ linked_databases dcs (udb.dependent_configs ());
+
+ // Unlink the configuration in the in-memory model, so we can evaluate
+ // if the dependent configurations are still linked with it.
+ //
+ // Note that we don't remove the backlink here, since this is not
+ // required for the check.
+ //
+ if (!priv)
+ {
+ linked_configs& ls (mdb.explicit_links ());
+
+ auto i (find_if (ls.begin (), ls.end (),
+ [&udb] (const linked_config& lc)
+ {
+ return lc.db == udb;
+ }));
+
+ assert (i != ls.end ()); // By definition.
+
+ ls.erase (i);
+ }
+
+ // Now go through the packages configured in the unlinked configuration
+ // and check it they have some dependents in other configurations which
+ // now unable to resolve them as prerequisites. Issue diagnostics and
+ // fail if that's the case.
+ //
+ using query = query<selected_package>;
+
+ for (shared_ptr<selected_package> sp:
+ pointer_result (
+ udb.query<selected_package> (query::state == "configured")))
+ {
+ for (auto i (dcs.begin_linked ()); i != dcs.end (); ++i)
+ {
+ database& db (*i);
+
+ odb::result<package_dependent> ds (
+ query_dependents (db, sp->name, udb));
+
+ // Skip the dependent configuration if it doesn't contain any
+ // dependents of the package.
+ //
+ if (ds.empty ())
+ continue;
+
+ // Skip the dependent configuration if it is still (potentially
+ // indirectly) linked with the unlinked configuration.
+ //
+ if (!priv)
+ {
+ linked_databases cs (db.dependency_configs ());
+
+ if (find_if (cs.begin (), cs.end (),
+ [&udb] (const database& db)
+ {
+ return db == udb;
+ }) != cs.end ())
+ continue;
+ }
+
+ diag_record dr (fail);
+
+ dr << "configuration " << db.config_orig
+ << " still depends on " << (priv ? "private " : "")
+ << "configuration " << udb.config_orig <<
+ info << "package " << sp->name << udb << " has dependents:";
+
+ for (const package_dependent& pd: ds)
+ {
+ dr << info << "package " << pd.name << db;
+
+ if (pd.constraint)
+ dr << " on " << sp->name << " " << *pd.constraint;
+ }
+ }
+ }
+ }
+
+ // Now unlink the configuration for real, in the database.
+ //
+ // Specifically, load the current and the being unlinked configurations
+ // and remove their respective explicit and implicit links.
+ //
+ {
+ using query = query<configuration>;
+
+ // Explicit link.
+ //
+ shared_ptr<configuration> uc (
+ mdb.query_one<configuration> (query::uuid == udb.uuid.string ()));
+
+ // The integrity of the current configuration explicit links is verified
+ // by the database constructor.
+ //
+ assert (uc != nullptr);
+
+ // Implicit backlink.
+ //
+ shared_ptr<configuration> cc (
+ udb.query_one<configuration> (query::uuid == mdb.uuid.string ()));
+
+ // The integrity of the implicit links of the configuration being
+ // unlinked is verified by the above dependent_configs() call.
+ //
+ assert (cc != nullptr);
+
+ // If the backlink turns out to be explicit, then, unless the
+ // configuration being unlinked is private, we just turn the explicit
+ // link into an implicit one rather then remove the direct and back
+ // links.
+ //
+ if (cc->expl && !priv)
+ {
+ info << "configurations " << udb.config_orig << " and "
+ << mdb.config_orig << " are mutually linked, turning the link "
+ << "to " << udb.config_orig << " into implicit backlink";
+
+ uc->expl = false;
+ mdb.update (uc);
+ }
+ else
+ {
+ mdb.erase (uc);
+ udb.erase (cc);
+ }
+ }
+
+ t.commit ();
+
+ // If the unlinked configuration is private, then detach its database and
+ // remove its directory. But first, stash the directory path for the
+ // subsequent removal and diagnostics.
+ //
+ dir_path ud (udb.config);
+
+ if (priv)
+ {
+ mdb.detach_all ();
+ rm_r (ud);
+ }
+
+ if (verb && !o.no_result ())
+ text << "unlinked " << (priv ? "and removed " : "") << "configuration "
+ << ud;
+
+ return 0;
+ }
+ catch (const invalid_path& e)
+ {
+ fail << "invalid path: '" << e.path << "'" << endf;
+ }
+
+ static int
+ cfg_unlink_dangling (const cfg_unlink_options& o, cli::scanner&)
+ {
+ tracer trace ("cfg_unlink_dangling");
+
+ dir_path c (o.directory ());
+ l4 ([&]{trace << "configuration: " << c;});
+
+ database db (c, trace, false /* pre_attach */);
+ transaction t (db);
+
+ using query = query<configuration>;
+
+ size_t count (0);
+ for (auto& c: db.query<configuration> (query::id != 0 && !query::expl))
+ {
+ if (!exists (c.effective_path (db.config)))
+ {
+ if (verb > 1)
+ text << "removing dangling implicit backlink " << c.path;
+
+ db.erase (c);
+ ++count;
+ }
+ }
+
+ t.commit ();
+
+ if (verb && !o.no_result ())
+ text << "removed " << count << " dangling implicit backlink(s)";
+
+ return 0;
+ }
+
+ int
+ cfg_unlink (const cfg_unlink_options& o, cli::scanner& args)
+ {
+ // Verify that the unlink mode is specified unambiguously.
+ //
+ // Points to the mode, if any is specified and NULL otherwise.
+ //
+ const char* mode (nullptr);
+
+ // If the mode is specified, then check that it hasn't been specified yet
+ // and set it, if that's the case, or fail otherwise.
+ //
+ auto verify = [&mode] (const char* m, bool specified)
+ {
+ if (specified)
+ {
+ if (mode == nullptr)
+ mode = m;
+ else
+ fail << "both " << mode << " and " << m << " specified";
+ }
+ };
+
+ verify ("--dangling", o.dangling ());
+ verify ("--name", o.name_specified ());
+ verify ("--id", o.id_specified ());
+ verify ("--uuid", o.uuid_specified ());
+ verify ("directory argument", args.more ());
+
+ if (mode == nullptr)
+ fail << "expected configuration to unlink or --dangling option" <<
+ info << "run 'bpkg help cfg-unlink' for more information";
+
+ return o.dangling ()
+ ? cfg_unlink_dangling (o, args)
+ : cfg_unlink_config (o, args);
+ }
+}
diff --git a/bpkg/cfg-unlink.hxx b/bpkg/cfg-unlink.hxx
new file mode 100644
index 0000000..50256f3
--- /dev/null
+++ b/bpkg/cfg-unlink.hxx
@@ -0,0 +1,18 @@
+// file : bpkg/cfg-unlink.hxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#ifndef BPKG_CFG_UNLINK_HXX
+#define BPKG_CFG_UNLINK_HXX
+
+#include <bpkg/types.hxx>
+#include <bpkg/utility.hxx>
+
+#include <bpkg/cfg-unlink-options.hxx>
+
+namespace bpkg
+{
+ int
+ cfg_unlink (const cfg_unlink_options&, cli::scanner& args);
+}
+
+#endif // BPKG_CFG_UNLINK_HXX
diff --git a/bpkg/checksum.cxx b/bpkg/checksum.cxx
index 65ed377..b761d0b 100644
--- a/bpkg/checksum.cxx
+++ b/bpkg/checksum.cxx
@@ -331,7 +331,7 @@ namespace bpkg
}
string
- sha256 (const common_options& o, const path& f)
+ sha256sum (const common_options& o, const path& f)
{
if (!exists (f))
fail << "file " << f << " does not exist";
diff --git a/bpkg/checksum.hxx b/bpkg/checksum.hxx
index fdf6fe5..54e5b3c 100644
--- a/bpkg/checksum.hxx
+++ b/bpkg/checksum.hxx
@@ -4,8 +4,6 @@
#ifndef BPKG_CHECKSUM_HXX
#define BPKG_CHECKSUM_HXX
-#include <libbutl/sha256.mxx>
-
#include <bpkg/types.hxx>
#include <bpkg/utility.hxx>
@@ -16,7 +14,7 @@ namespace bpkg
// Calculate SHA256 sum of the specified memory buffer in binary mode.
//
inline string
- sha256 (const char* buf, size_t n) {return butl::sha256 (buf, n).string ();}
+ sha256sum (const char* buf, size_t n) {return sha256 (buf, n).string ();}
// The same but for a file. Issue diagnostics and throw failed if anything
// goes wrong.
@@ -26,7 +24,7 @@ namespace bpkg
// optimized for the platform.
//
string
- sha256 (const common_options&, const path& file);
+ sha256sum (const common_options&, const path& file);
}
#endif // BPKG_CHECKSUM_HXX
diff --git a/bpkg/common.cli b/bpkg/common.cli
index 49a7788..c7d28bc 100644
--- a/bpkg/common.cli
+++ b/bpkg/common.cli
@@ -80,6 +80,14 @@ namespace bpkg
\li|Even more detailed information.||"
}
+ bpkg::stdout_format --stdout-format = bpkg::stdout_format::lines
+ {
+ "<format>",
+ "Representation format to use for printing to \cb{stdout}. Valid values
+ for this option are \cb{lines} (default) and \cb{json}. See the JSON
+ OUTPUT section below for details on the \cb{json} format."
+ }
+
size_t --jobs|-j
{
"<num>",
@@ -90,32 +98,63 @@ namespace bpkg
\cb{test}, etc."
}
- // In the future we may also have --structured-result, similar to the
- // build system.
- //
bool --no-result
{
"Don't print informational messages about the outcome of performing
- a command."
+ a command or some of its parts. Note that if this option is specified,
+ then for certain long-running command parts progress is displayed
+ instead, unless suppressed with \cb{--no-progress}."
+ }
+
+ string --structured-result
+ {
+ "<fmt>",
+ "Write the result of performing a command in a structured form. In
+ this mode, instead of printing to \cb{stderr} informational messages
+ about the outcome of performing a command or some of its parts,
+ \cb{bpkg} writes to \cb{stdout} a machine-readable result description
+ in the specified format. Not all commands support producing structured
+ result and valid <fmt> values are command-specific. Consult the command
+ documentation for details."
}
// When it comes to external programs (such as curl, git, etc), if stderr
// is not a terminal, the logic is actually tri-state: With --no-progress
- // we suppress any progress. With --progress (which we may add in the
- // future), we request full progress. Finally, without any --*progress
- // options we let the external program decide what to do: it may do
- // something intelligent (like curl) and produce non-terminal-friendly
- // progress (such as status lines printed periodically) or it may disable
- // progress all together (like git). Of course, it may also do no
- // detection and dump non-terminal-unfriendly progress in which case we
- // should probably do the detection ourselves and suppress it.
+ // we suppress any progress. With --progress we request full progress.
+ // Finally, without any --*progress options we let the external program
+ // decide what to do: it may do something intelligent (like curl) and
+ // produce non-terminal-friendly progress (such as status lines printed
+ // periodically) or it may disable progress all together (like git). Of
+ // course, it may also do no detection and dump non-terminal-unfriendly
+ // progress in which case we should probably do the detection ourselves
+ // and suppress it.
//
+ bool --progress
+ {
+ "Display progress indicators for long-lasting operations, such as
+ network transfers, building, etc. If printing to a terminal the
+ progress is displayed by default for low verbosity levels. Use
+ \cb{--no-progress} to suppress."
+ }
+
bool --no-progress
{
"Suppress progress indicators for long-lasting operations, such as
network transfers, building, etc."
}
+ bool --diag-color
+ {
+ "Use color in diagnostics. If printing to a terminal the color is used
+ by default provided the terminal is not dumb. Use \cb{--no-diag-color}
+ to suppress."
+ }
+
+ bool --no-diag-color
+ {
+ "Don't use color in diagnostics."
+ }
+
path --build
{
"<path>",
@@ -149,8 +188,7 @@ namespace bpkg
If the fetch program is not specified, then \cb{bpkg} will try to
discover if one of the above programs is available and use that.
- Currently, \cb{bpkg} has the following preference order: \cb{wget}
- 1.16 or higher (supports \cb{--show-progress}), \cb{curl},
+ Currently, \cb{bpkg} has the following preference order: \cb{curl},
\cb{wget}, and \cb{fetch}."
}
@@ -162,6 +200,13 @@ namespace bpkg
specify multiple fetch options."
}
+ // Undocumented equivalents to bdep's --curl* options. We "merge" them
+ // into --fetch/--fetch-option in an ad hoc manner (see fetch.cxx for
+ // details).
+ //
+ path --curl;
+ strings --curl-option;
+
size_t --fetch-timeout
{
"<sec>",
@@ -277,13 +322,17 @@ namespace bpkg
only applicable to the specific command, for example:
\
- bpkg rep-create \
- --openssl rsautl:/path/to/openssl \
- --openssl-option rsautl:-engine \
- --openssl-option rsautl:pkcs11 \
+ bpkg rep-create \
+ --openssl pkeyutl:/path/to/openssl \
+ --openssl-option pkeyutl:-engine \
+ --openssl-option pkeyutl:pkcs11 \
...
\
+ Note that for \cb{openssl} versions prior to \cb{3.0.0} \cb{bpkg} uses
+ the \cb{rsautl} command instead of \cb{pkeyutl} for the data signing
+ and recovery operations.
+
An unqualified value that contains a colon can be specified as
qualified with an empty command, for example, \cb{--openssl
:C:\\bin\\openssl}. To see openssl commands executed by \cb{bpkg}, use
@@ -341,6 +390,24 @@ namespace bpkg
"Assume the answer to all authentication prompts is \cb{no}."
}
+ git_capabilities_map --git-capabilities
+ {
+ "<up>=<pc>",
+ "Protocol capabilities (<pc>) for a \cb{git} repository URL prefix
+ (<up>). Valid values for the capabilities are \cb{dumb} (no shallow
+ clone support), \cb{smart} (support for shallow clone, but not for
+ fetching unadvertised commits), \cb{unadv} (support for shallow clone
+ and for fetching unadvertised commits). For example:
+
+ \
+ bpkg build https://example.org/foo.git#master \
+ --git-capabilities https://example.org=smart
+ \
+
+ See \l{bpkg-repository-types(1)} for details on the \cb{git} protocol
+ capabilities."
+ }
+
string --pager // String to allow empty value.
{
"<path>",
@@ -394,5 +461,80 @@ namespace bpkg
{
"Don't load default options files."
}
+
+ bool --keep-tmp
+ {
+ "Don't remove the \cb{bpkg}'s temporary directory at the end of the
+ command execution and print its path at the verbosity level 2 or
+ higher. This option is primarily useful for troubleshooting."
+ }
};
+
+ {
+ "",
+ "
+ \h|JSON OUTPUT|
+
+ Commands that support the JSON output specify their formats as a
+ serialized representation of a C++ \cb{struct} or an array thereof. For
+ example:
+
+ \
+ struct package
+ {
+ string name;
+ };
+
+ struct configuration
+ {
+ uint64_t id;
+ string path;
+ optional<string> name;
+ bool default;
+ vector<package> packages;
+ };
+ \
+
+ An example of the serialized JSON representation of \cb{struct}
+ \cb{configuration}:
+
+ \
+ {
+ \"id\": 1,
+ \"path\": \"/tmp/hello-gcc\",
+ \"name\": \"gcc\",
+ \"default\": true,
+ \"packages\": [
+ {
+ \"name\": \"hello\"
+ }
+ ]
+ }
+ \
+
+ This sections provides details on the overall properties of such formats
+ and the semantics of the \cb{struct} serialization.
+
+ The order of members in a JSON object is fixed as specified in the
+ corresponding \cb{struct}. While new members may be added in the
+ future (and should be ignored by older consumers), the semantics of the
+ existing members (including whether the top-level entry is an object or
+ array) may not change.
+
+ An object member is required unless its type is \cb{optional<>},
+ \cb{bool}, or \cb{vector<>} (array). For \cb{bool} members absent means
+ \cb{false}. For \cb{vector<>} members absent means empty. An empty
+ top-level array is always present.
+
+ For example, the following JSON text is a possible serialization of
+ the above \cb{struct} \cb{configuration}:
+
+ \
+ {
+ \"id\": 1,
+ \"path\": \"/tmp/hello-gcc\"
+ }
+ \
+ "
+ }
}
diff --git a/bpkg/database.cxx b/bpkg/database.cxx
index a866274..65e3af8 100644
--- a/bpkg/database.cxx
+++ b/bpkg/database.cxx
@@ -3,140 +3,1128 @@
#include <bpkg/database.hxx>
+#include <sqlite3.h> // @@ TMP sqlite3_libversion_number()
+
+#include <map>
+
#include <odb/schema-catalog.hxx>
#include <odb/sqlite/exceptions.hxx>
#include <bpkg/package.hxx>
#include <bpkg/package-odb.hxx>
#include <bpkg/diagnostics.hxx>
-#include <bpkg/system-repository.hxx>
using namespace std;
namespace bpkg
{
- using namespace odb::sqlite;
- using odb::schema_catalog;
+ namespace sqlite = odb::sqlite;
- // Use a custom connection factory to automatically set and clear the
- // BPKG_OPEN_CONFIG environment variable. A bit heavy-weight but seems like
- // the best option.
+ // Configuration types.
//
- static const string open_name ("BPKG_OPEN_CONFIG");
+ const string host_config_type ("host");
+ const string build2_config_type ("build2");
- class conn_factory: public single_connection_factory // No need for pool.
+ const string&
+ buildtime_dependency_type (const package_name& nm)
{
- public:
- conn_factory (const dir_path& d)
- {
- setenv (open_name, normalize (d, "configuration").string ());
- }
+ return build2_module (nm) ? build2_config_type : host_config_type;
+ }
- virtual
- ~conn_factory ()
+ // Configuration names.
+ //
+ void
+ validate_configuration_name (const string& s, const char* what)
+ {
+ if (s.empty ())
+ fail << "empty " << what;
+
+ if (!(alpha (s[0]) || s[0] == '_'))
+ fail << "invalid " << what << " '" << s << "': illegal first character "
+ << "(must be alphabetic or underscore)";
+
+ for (auto i (s.cbegin () + 1), e (s.cend ()); i != e; ++i)
{
- unsetenv (open_name);
+ char c (*i);
+
+ if (!(alnum (c) || c == '_' || c == '-'))
+ fail << "invalid " << what << " '" << s << "': illegal character "
+ << "(must be alphabetic, digit, underscore, or dash)";
}
- };
+ }
// Register the data migration functions.
//
- // NOTE: remember to qualify table names if using native statements.
+ // NOTE: remember to qualify table/index names with \"main\". if using
+ // native statements.
//
template <odb::schema_version v>
using migration_entry = odb::data_migration_entry<v, DB_SCHEMA_VERSION_BASE>;
- static const migration_entry<8>
- migrate_v8 ([] (odb::database& db)
+ // @@ Since there is no proper support for dropping table columns not in
+ // SQLite prior to 3.35.5 nor in ODB, we will drop the
+ // available_package_dependency_alternatives.dep_* columns manually. We,
+ // however, cannot do it here since ODB will try to set the dropped
+ // column values to NULL at the end of migration. Thus, we will do it
+ // ad hoc after the below schema_catalog::migrate() call.
+ //
+ // NOTE: remove the mentioned ad hoc migration when removing this
+ // function.
+ //
+ static const migration_entry<13>
+ migrate_v13 ([] (odb::database& db)
{
- for (shared_ptr<repository> r: pointer_result (db.query<repository> ()))
- {
- if (!r->name.empty ()) // Non-root repository?
- {
- r->local = r->location.local ();
- db.update (r);
- }
- }
+ // Note that
+ // available_package_dependency_alternative_dependencies.alternative_index
+ // is copied from available_package_dependency_alternatives.index and
+ // available_package_dependency_alternative_dependencies.index is set to 0.
+ //
+ db.execute (
+ "INSERT INTO \"main\".\"available_package_dependency_alternative_dependencies\" "
+ "(\"name\", "
+ "\"version_epoch\", "
+ "\"version_canonical_upstream\", "
+ "\"version_canonical_release\", "
+ "\"version_revision\", "
+ "\"version_iteration\", "
+ "\"dependency_index\", "
+ "\"alternative_index\", "
+ "\"index\", "
+ "\"dep_name\", "
+ "\"dep_min_version_epoch\", "
+ "\"dep_min_version_canonical_upstream\", "
+ "\"dep_min_version_canonical_release\", "
+ "\"dep_min_version_revision\", "
+ "\"dep_min_version_iteration\", "
+ "\"dep_min_version_upstream\", "
+ "\"dep_min_version_release\", "
+ "\"dep_max_version_epoch\", "
+ "\"dep_max_version_canonical_upstream\", "
+ "\"dep_max_version_canonical_release\", "
+ "\"dep_max_version_revision\", "
+ "\"dep_max_version_iteration\", "
+ "\"dep_max_version_upstream\", "
+ "\"dep_max_version_release\", "
+ "\"dep_min_open\", "
+ "\"dep_max_open\") "
+ "SELECT "
+ "\"name\", "
+ "\"version_epoch\", "
+ "\"version_canonical_upstream\", "
+ "\"version_canonical_release\", "
+ "\"version_revision\", "
+ "\"version_iteration\", "
+ "\"dependency_index\", "
+ "\"index\", "
+ "0, "
+ "\"dep_name\", "
+ "\"dep_min_version_epoch\", "
+ "\"dep_min_version_canonical_upstream\", "
+ "\"dep_min_version_canonical_release\", "
+ "\"dep_min_version_revision\", "
+ "\"dep_min_version_iteration\", "
+ "\"dep_min_version_upstream\", "
+ "\"dep_min_version_release\", "
+ "\"dep_max_version_epoch\", "
+ "\"dep_max_version_canonical_upstream\", "
+ "\"dep_max_version_canonical_release\", "
+ "\"dep_max_version_revision\", "
+ "\"dep_max_version_iteration\", "
+ "\"dep_max_version_upstream\", "
+ "\"dep_max_version_release\", "
+ "\"dep_min_open\", "
+ "\"dep_max_open\" "
+ "FROM \"main\".\"available_package_dependency_alternatives\"");
});
- database
- open (const dir_path& d, tracer& tr, bool create)
+ // @@ Since there is no proper support for dropping table columns not in
+ // SQLite prior to 3.35.5 nor in ODB, we will drop the
+ // available_package_dependencies.conditional column manually. We,
+ // however, cannot do it here since ODB will try to set the dropped
+ // column values to NULL at the end of migration. Thus, we will do it
+ // ad hoc after the below schema_catalog::migrate() call.
+ //
+ // NOTE: remove the mentioned ad hoc migration when removing this
+ // function.
+ //
+ static const migration_entry<14>
+ migrate_v14 ([] (odb::database&)
{
- tracer trace ("open");
+ });
+ static inline path
+ cfg_path (const dir_path& d, bool create)
+ {
path f (d / bpkg_dir / "bpkg.sqlite3");
if (!create && !exists (f))
fail << d << " does not look like a bpkg configuration directory";
+ return f;
+ }
+
+ // The BPKG_OPEN_CONFIGS environment variable.
+ //
+ // Automatically set it to the configuration directory path and clear in the
+ // main database constructor and destructor, respectively. Also append the
+ // attached database configuration paths in their constructors and clear
+ // them in detach_all(). The paths are absolute, normalized, double-quoted,
+ // and separated with spaces.
+ //
+ static const string open_name ("BPKG_OPEN_CONFIGS");
+
+ struct database::impl
+ {
+ sqlite::connection_ptr conn; // Main connection.
+
+ map<dir_path, database> attached_map;
+
+ impl (sqlite::connection_ptr&& c): conn (move (c)) {}
+ };
+
+ database::
+ database (const dir_path& d,
+ configuration* create,
+ odb::tracer& tr,
+ bool pre_attach,
+ bool sys_rep,
+ const dir_paths& pre_link,
+ std::string str_repr)
+ : sqlite::database (
+ cfg_path (d, create != nullptr).string (),
+ SQLITE_OPEN_READWRITE | (create != nullptr ? SQLITE_OPEN_CREATE : 0),
+ true, // Enable FKs.
+ "", // Default VFS.
+ unique_ptr<sqlite::connection_factory> (
+ new sqlite::serial_connection_factory)), // Single connection.
+ config (normalize (d, "configuration")),
+ config_orig (d),
+ string (move (str_repr))
+ {
+ bpkg::tracer trace ("database");
+
+ // Cache the (single) main connection we will be using.
+ //
+ unique_ptr<impl> ig ((impl_ = new impl (connection ())));
+
try
{
- database db (f.string (),
- SQLITE_OPEN_READWRITE | (create ? SQLITE_OPEN_CREATE : 0),
- true, // Enable FKs.
- "", // Default VFS.
- unique_ptr<connection_factory> (new conn_factory (d)));
+ tracer_guard tg (*this, trace);
- db.tracer (trace);
-
- // Lock the database for as long as the connection is active. First
- // we set locking_mode to EXCLUSIVE which instructs SQLite not to
- // release any locks until the connection is closed. Then we force
- // SQLite to acquire the write lock by starting exclusive transaction.
- // See the locking_mode pragma documentation for details. This will
- // also fail if the database is inaccessible (e.g., file does not
- // exist, already used by another process, etc).
+ // Lock the database for as long as the connection is active. First we
+ // set locking_mode to EXCLUSIVE which instructs SQLite not to release
+ // any locks until the connection is closed. Then we force SQLite to
+ // acquire the write lock by starting exclusive transaction. See the
+ // locking_mode pragma documentation for details. This will also fail if
+ // the database is inaccessible (e.g., file does not exist, already used
+ // by another process, etc).
+ //
+ // Note that here we assume that any database that is ATTACHED within an
+ // exclusive transaction gets the same treatment.
//
- using odb::sqlite::transaction; // Skip the wrapper.
+ using odb::schema_catalog;
+
+ impl_->conn->execute ("PRAGMA locking_mode = EXCLUSIVE");
+
+ add_env (true /* reset */);
+ auto g (make_exception_guard ([] () {unsetenv (open_name);}));
- try
{
- db.connection ()->execute ("PRAGMA locking_mode = EXCLUSIVE");
- transaction t (db.begin_exclusive ());
+ sqlite::transaction t (impl_->conn->begin_exclusive ());
- if (create)
+ if (create != nullptr)
{
- // Create the new schema.
+ // Create the new schema and persist the self-link.
//
- if (db.schema_version () != 0)
- fail << f << ": already has database schema";
+ if (schema_version () != 0)
+ fail << sqlite::database::name () << ": already has database "
+ << "schema";
- schema_catalog::create_schema (db);
+ schema_catalog::create_schema (*this);
+
+ // To speed up the query_dependents() function create the multi-
+ // column index for the configuration and prerequisite columns of
+ // the selected_package_prerequisites table.
+ //
+ // @@ Use ODB pragma if/when support for container indexes is added.
+ //
+ execute (
+ "CREATE INDEX "
+ "selected_package_prerequisites_configuration_prerequisite_i "
+ "ON selected_package_prerequisites (configuration, "
+ "prerequisite)");
+
+ persist (*create); // Also assigns link id.
+
+ // Cache the configuration information.
+ //
+ cache_config (create->uuid, create->name, create->type);
}
else
{
- // Migrate the database if necessary.
+ // Migrate the linked databases cluster.
+ //
+ migrate ();
+
+ // Cache the configuration information.
//
- schema_catalog::migrate (db);
+ shared_ptr<configuration> c (load<configuration> (0));
+ cache_config (c->uuid, move (c->name), move (c->type));
+
+ // Load the system repository, if requested.
+ //
+ if (sys_rep)
+ load_system_repository ();
}
+ // Migrate the pre-linked databases and the database clusters they
+ // belong to.
+ //
+ for (const dir_path& d: pre_link)
+ attach (d).migrate ();
+
t.commit ();
}
+
+ // Detach potentially attached during migration the (pre-)linked
+ // databases.
+ //
+ detach_all ();
+
+ if (pre_attach)
+ {
+ sqlite::transaction t (begin_exclusive ());
+ attach_explicit (sys_rep);
+ t.commit ();
+ }
+ }
+ catch (odb::timeout&)
+ {
+ fail << "configuration " << d << " is already used by another process";
+ }
+ catch (const sqlite::database_exception& e)
+ {
+ fail << sqlite::database::name () << ": " << e.message ();
+ }
+
+ tracer (tr);
+
+ // Note: will be leaked if anything further throws.
+ //
+ ig.release ();
+ }
+
+ // NOTE: if we ever load/persist any dynamically allocated objects in this
+ // constructor, make sure such objects do not use the session or the session
+ // is temporarily suspended in the attach() function (see its implementation
+ // for the reasoning note) since the database will be moved.
+ //
+ database::
+ database (impl* i,
+ const dir_path& d,
+ std::string schema,
+ bool sys_rep)
+ : sqlite::database (i->conn,
+ cfg_path (d, false /* create */).string (),
+ move (schema)),
+ config (d),
+ impl_ (i)
+ {
+ bpkg::tracer trace ("database");
+
+ // Derive the configuration original directory path.
+ //
+ database& mdb (main_database ());
+
+ if (mdb.config_orig.relative ())
+ {
+ // Fallback to absolute path if the configuration is on a different
+ // drive on Windows.
+ //
+ if (optional<dir_path> c = config.try_relative (current_directory ()))
+ config_orig = move (*c);
+ else
+ config_orig = config;
+ }
+ else
+ config_orig = config;
+
+ string = '[' + config_orig.representation () + ']';
+
+ try
+ {
+ tracer_guard tg (*this, trace);
+
+ // Cache the configuration information.
+ //
+ shared_ptr<configuration> c (load<configuration> (0));
+ cache_config (c->uuid, move (c->name), move (c->type));
+
+ // Load the system repository, if requested.
+ //
+ if (sys_rep)
+ load_system_repository ();
+ }
+ catch (const sqlite::database_exception& e)
+ {
+ fail << sqlite::database::name () << ": " << e.message ();
+ }
+
+ add_env ();
+
+ // Set the tracer used by the linked configurations cluster.
+ //
+ sqlite::database::tracer (mdb.tracer ());
+ }
+
+ database::
+ ~database ()
+ {
+ if (impl_ != nullptr && // Not a moved-from database?
+ main ())
+ {
+ delete impl_;
+
+ unsetenv (open_name);
+ }
+ }
+
+ database::
+ database (database&& db)
+ : sqlite::database (move (db)),
+ uuid (db.uuid),
+ name (move (db.name)),
+ type (move (db.type)),
+ config (move (db.config)),
+ config_orig (move (db.config_orig)),
+ string (move (db.string)),
+ system_repository (move (db.system_repository)),
+ impl_ (db.impl_),
+ explicit_links_ (move (db.explicit_links_)),
+ implicit_links_ (move (db.implicit_links_))
+ {
+ db.impl_ = nullptr; // See ~database().
+ }
+
+ void database::
+ add_env (bool reset) const
+ {
+ using std::string;
+
+ string v;
+
+ if (!reset)
+ {
+ if (optional<string> e = getenv (open_name))
+ v = move (*e);
+ }
+
+ v += (v.empty () ? "\"" : " \"") + config.string () + '"';
+
+ setenv (open_name, v);
+ }
+
+ void database::
+ tracer (tracer_type* t)
+ {
+ main_database ().sqlite::database::tracer (t);
+
+ for (auto& db: impl_->attached_map)
+ db.second.sqlite::database::tracer (t);
+ }
+
+ void database::
+ migrate ()
+ {
+ using odb::schema_catalog;
+
+ odb::schema_version sv (schema_version ());
+ odb::schema_version scv (schema_catalog::current_version (*this));
+
+ if (sv != scv)
+ {
+ if (sv < schema_catalog::base_version (*this))
+ fail << "configuration " << config_orig << " is too old";
+
+ if (sv > scv)
+ fail << "configuration " << config_orig << " is too new";
+
+ // Note that we need to migrate the current database before the linked
+ // ones to properly handle link cycles.
+ //
+ schema_catalog::migrate (*this);
+
+ // Note that the potential data corruption with `DROP COLUMN` is fixed
+ // in 3.35.5.
+ //
+ // @@ TMP Get rid of manual column dropping when ODB starts supporting
+ // that properly. Not doing so will result in failure of the below
+ // queries.
+ //
+ if (sqlite3_libversion_number () >= 3035005)
+ {
+ auto drop = [this] (const char* table, const char* column)
+ {
+ execute (std::string ("ALTER TABLE \"main\".") + table +
+ " DROP COLUMN \"" + column + '"');
+ };
+
+ // @@ TMP See migrate_v13() for details.
+ //
+ if (sv < 13)
+ {
+ const char* cs[] = {"dep_name",
+ "dep_min_version_epoch",
+ "dep_min_version_canonical_upstream",
+ "dep_min_version_canonical_release",
+ "dep_min_version_revision",
+ "dep_min_version_iteration",
+ "dep_min_version_upstream",
+ "dep_min_version_release",
+ "dep_max_version_epoch",
+ "dep_max_version_canonical_upstream",
+ "dep_max_version_canonical_release",
+ "dep_max_version_revision",
+ "dep_max_version_iteration",
+ "dep_max_version_upstream",
+ "dep_max_version_release",
+ "dep_min_open",
+ "dep_max_open",
+ nullptr};
+
+ for (const char** c (cs); *c != nullptr; ++c)
+ drop ("available_package_dependency_alternatives", *c);
+ }
+
+ // @@ TMP See migrate_v14() for details.
+ //
+ if (sv < 14)
+ drop ("available_package_dependencies", "conditional");
+ }
+
+ for (auto& c: query<configuration> (odb::query<configuration>::id != 0))
+ {
+ dir_path d (c.effective_path (config));
+
+ // Remove the dangling implicit link.
+ //
+ if (!c.expl && !exists (d))
+ {
+ warn << "implicit link " << c.path << " of configuration "
+ << config_orig << " no longer exists, removing";
+
+ erase (c);
+ continue;
+ }
+
+ attach (d).migrate ();
+ }
+ }
+ }
+
+ void database::
+ cache_config (const uuid_type& u, optional<std::string> n, std::string t)
+ {
+ // NOTE: remember to update database(database&&) if changing anything
+ // here.
+ //
+ uuid = u;
+ name = move (n);
+ type = move (t);
+ }
+
+ void database::
+ load_system_repository ()
+ {
+ assert (!system_repository); // Must only be loaded once.
+
+ system_repository = bpkg::system_repository ();
+
+ // Query for all the packages with the system substate and enter their
+ // versions into system_repository as non-authoritative. This way an
+ // available_package (e.g., a stub) will automatically "see" system
+ // version, if one is known.
+ //
+ assert (transaction::has_current ());
+
+ for (const auto& p: query<selected_package> (
+ odb::query<selected_package>::substate == "system"))
+ system_repository->insert (p.name,
+ p.version,
+ false /* authoritative */);
+ }
+
+ database& database::
+ attach (const dir_path& d, bool sys_rep)
+ {
+ assert (d.absolute () && d.normalized ());
+
+ // Check if we are trying to attach the main database.
+ //
+ database& md (main_database ());
+ if (d == md.config)
+ return md;
+
+ auto& am (impl_->attached_map);
+
+ auto i (am.find (d));
+
+ if (i == am.end ())
+ {
+ // We know from the implementation that 4-character schema names are
+ // optimal. So try to come up with a unique abbreviated hash that is 4
+ // or more characters long.
+ //
+ std::string schema;
+ {
+ sha256 h (d.string ());
+
+ for (size_t n (4);; ++n)
+ {
+ schema = h.abbreviated_string (n);
+
+ if (find_if (am.begin (), am.end (),
+ [&schema] (const map<dir_path, database>::value_type& v)
+ {
+ return v.second.schema () == schema;
+ }) == am.end ())
+ break;
+ }
+ }
+
+ // If attaching out of an exclusive transaction (all our transactions
+ // are exclusive), start one to force database locking (see the above
+ // locking_mode discussion for details).
+ //
+ sqlite::transaction t;
+ if (!sqlite::transaction::has_current ())
+ t.reset (begin_exclusive ());
+
+ try
+ {
+ // NOTE: we need to be careful here not to bind any persistent objects
+ // the database constructor may load/persist to the temporary database
+ // object in the session cache.
+ //
+ i = am.insert (
+ make_pair (d, database (impl_, d, move (schema), sys_rep))).first;
+ }
catch (odb::timeout&)
{
fail << "configuration " << d << " is already used by another process";
}
- // Query for all the packages with the system substate and enter their
- // versions into system_repository as non-authoritative. This way an
- // available_package (e.g., a stub) will automatically "see" system
- // version, if one is known.
+ if (!t.finalized ())
+ t.commit ();
+ }
+
+ return i->second;
+ }
+
+ void database::
+ detach_all ()
+ {
+ assert (main ());
+
+ explicit_links_.clear ();
+ implicit_links_.clear ();
+
+ for (auto i (impl_->attached_map.begin ());
+ i != impl_->attached_map.end (); )
+ {
+ i->second.detach ();
+ i = impl_->attached_map.erase (i);
+ }
+
+ // Remove the detached databases from the environment.
+ //
+ add_env (true /* reset */);
+ }
+
+ void database::
+ verify_link (const configuration& lc, database& ldb)
+ {
+ const dir_path& c (ldb.config_orig);
+
+ if (lc.uuid != ldb.uuid)
+ fail << "configuration " << c << " uuid mismatch" <<
+ info << "uuid " << ldb.uuid <<
+ info << (!lc.expl ? "implicitly " : "") << "linked with "
+ << config_orig << " as " << lc.uuid;
+
+ if (lc.type != ldb.type)
+ fail << "configuration " << c << " type mismatch" <<
+ info << "type " << ldb.type <<
+ info << (!lc.expl ? "implicitly " : "") << "linked with "
+ << config_orig << " as " << lc.type;
+
+ if (lc.effective_path (config) != ldb.config)
+ fail << "configuration " << c << " path mismatch" <<
+ info << (!lc.expl ? "implicitly " : "") << "linked with "
+ << config_orig << " as " << lc.path;
+ }
+
+ void database::
+ attach_explicit (bool sys_rep)
+ {
+ assert (transaction::has_current ());
+
+ if (explicit_links_.empty ())
+ {
+ // Note that the self-link is implicit.
+ //
+ explicit_links_.push_back (linked_config {0, name, *this});
+
+ for (auto& lc: query<configuration> (odb::query<configuration>::expl))
+ {
+ database& db (attach (lc.effective_path (config), sys_rep));
+ verify_link (lc, db);
+
+ explicit_links_.push_back (linked_config {*lc.id, move (lc.name), db});
+ db.attach_explicit (sys_rep);
+ }
+ }
+ }
+
+ linked_databases& database::
+ implicit_links (bool attach_, bool sys_rep)
+ {
+ assert (transaction::has_current ());
+
+ // Note that cached implicit links must at least contain the self-link,
+ // if the databases are already attached and cached.
+ //
+ if (implicit_links_.empty () && attach_)
+ {
+ implicit_links_.push_back (*this);
+
+ using q = odb::query<configuration>;
+
+ for (const auto& lc: query<configuration> (q::id != 0))
+ {
+ dir_path d (lc.effective_path (config));
+
+ // Skip the dangling implicit link.
+ //
+ if (!lc.expl && !exists (d))
+ {
+ if (verb > 1)
+ info << "skipping dangling implicit backlink " << lc.path <<
+ info << "use 'cfg-unlink --dangling' to clean up";
+
+ continue;
+ }
+
+ database& db (attach (d, sys_rep));
+
+ // Verify the link integrity.
+ //
+ verify_link (lc, db);
+
+ // If the link is explicit, also check if it is also implicit (see
+ // cfg_link() for details) and skip if it is not.
+ //
+ if (lc.expl)
+ {
+ shared_ptr<configuration> cf (backlink (db));
+
+ if (!cf->expl)
+ continue;
+ }
+
+ // If the explicitly linked databases are pre-attached, normally to
+ // make the selected packages loadable, then we also pre-attach
+ // explicit links of the database being attached implicitly, by the
+ // same reason. Indeed, think of loading the package dependent from
+ // the implicitly linked database as a selected package.
+ //
+ if (!explicit_links_.empty ())
+ db.attach_explicit (sys_rep);
+
+ implicit_links_.push_back (db);
+ }
+ }
+
+ return implicit_links_;
+ }
+
+ shared_ptr<configuration> database::
+ backlink (database& db)
+ {
+ using q = odb::query<configuration>;
+
+ shared_ptr<configuration> cf (
+ db.query_one<configuration> (q::uuid == uuid.string ()));
+
+ if (cf == nullptr)
+ fail << "configuration " << db.config_orig << " is linked with "
+ << config_orig << " but latter is not implicitly linked "
+ << "with former";
+
+ // While at it, verify the integrity of the other end of the link.
+ //
+ db.verify_link (*cf, *this);
+ return cf;
+ }
+
+ linked_databases database::
+ dependent_configs (bool sys_rep)
+ {
+ linked_databases r;
+
+ // Note that if this configuration is of a build-time dependency type
+ // (host or build2) we need to be carefull during recursion and do not
+ // cross the build-time dependency type boundary. So for example, for the
+ // following implicit links only cfg1, cfg2, and cfg3 configurations are
+ // included.
+ //
+ // cfg1 (this, host) -> cfg2 (host) -> cfg3 (build2) -> cfg4 (target)
+ //
+ // Add the linked database to the resulting list if it is of the linking
+ // database type (t) or this type (t) is of the expected build-time
+ // dependency type (bt).
+ //
+ auto add = [&r, sys_rep] (database& db,
+ const std::string& t,
+ const std::string& bt,
+ const auto& add)
+ {
+ if (!(db.type == t || t == bt) ||
+ std::find (r.begin (), r.end (), db) != r.end ())
+ return;
+
+ r.push_back (db);
+
+ const linked_databases& lds (db.implicit_links (true /* attach */,
+ sys_rep));
+
+ // New boundary type.
+ //
+ const std::string& nbt (db.type == bt ? bt : empty_string);
+
+ for (auto i (lds.begin_linked ()); i != lds.end (); ++i)
+ {
+ database& ldb (*i);
+ add (ldb, db.type, nbt, add);
+
+ // If this configuration is of the build2 type, then also add the
+ // private host configurations of its implicitly linked
+ // configurations.
+ //
+ if (db.type == build2_config_type)
+ {
+ if (database* hdb = ldb.private_config (host_config_type))
+ add (*hdb, db.type, nbt, add);
+ }
+ }
+ };
+
+ add (*this,
+ type,
+ (type == host_config_type || type == build2_config_type
+ ? type
+ : empty_string),
+ add);
+
+ return r;
+ }
+
+ linked_databases database::
+ dependency_configs (optional<bool> buildtime, const std::string& tp)
+ {
+ // The type only makes sense if build-time dependency configurations are
+ // requested.
+ //
+ if (buildtime)
+ assert (!*buildtime ||
+ tp == host_config_type ||
+ tp == build2_config_type);
+ else
+ assert (tp.empty ());
+
+ linked_databases r;
+
+ // Allow dependency configurations of the dependent configuration own type
+ // if all or runtime dependency configurations are requested.
+ //
+ bool allow_own_type (!buildtime || !*buildtime);
+
+ // Allow dependency configurations of the host type if all or regular
+ // build-time dependency configurations are requested.
+ //
+ bool allow_host_type (!buildtime ||
+ (*buildtime && tp == host_config_type));
+
+ // Allow dependency configurations of the build2 type if all or build2
+ // module dependency configurations are requested.
+ //
+ bool allow_build2_type (!buildtime ||
+ (*buildtime && tp == build2_config_type));
+
+ // Add the linked database to the resulting list if it is of the linking
+ // database type and allow_own_type is true, or it is of the host type and
+ // allow_host_type is true, or it is of the build2 type and
+ // allow_build2_type is true. Call itself recursively for the explicitly
+ // linked configurations.
+ //
+ // Note that the linked database of the linking database type is not added
+ // if allow_own_type is false, however its own linked databases of the
+ // host/build2 type are added, if allow_host_type/allow_build2_type is
+ // true.
+ //
+ linked_databases chain; // Note: we may not add but still descend.
+ auto add = [&r,
+ allow_own_type,
+ allow_host_type,
+ allow_build2_type,
+ &chain]
+ (database& db,
+ const std::string& t,
+ const auto& add)
+ {
+ if (std::find (r.begin (), r.end (), db) != r.end () ||
+ std::find (chain.begin (), chain.end (), db) != chain.end ())
+ return;
+
+ bool own (db.type == t);
+ bool host (db.type == host_config_type);
+ bool build2 (db.type == build2_config_type);
+
+ // Bail out if we are not allowed to descend.
+ //
+ if (!own && !(allow_host_type && host) && !(allow_build2_type && build2))
+ return;
+
+ // Add the database to the list, if allowed, and descend afterwards.
//
- transaction t (db.begin ());
+ if ((allow_own_type && own) ||
+ (allow_host_type && host) ||
+ (allow_build2_type && build2))
+ r.push_back (db);
+
+ chain.push_back (db);
+
+ {
+ const linked_configs& lcs (db.explicit_links ());
+ for (auto i (lcs.begin_linked ()); i != lcs.end (); ++i)
+ add (i->db, db.type, add);
+ }
+
+ // If this is a private host configuration, then also add the parent's
+ // explicitly linked configurations of the build2 type.
+ //
+ if (db.private_ () && db.type == host_config_type)
+ {
+ const linked_configs& lcs (db.parent_config ().explicit_links ());
+
+ for (auto i (lcs.begin_linked ()); i != lcs.end (); ++i)
+ {
+ database& ldb (i->db);
+ if (ldb.type == build2_config_type)
+ add (ldb, db.type, add);
+ }
+ }
+
+ chain.pop_back ();
+ };
+
+ add (*this, type, add);
+ return r;
+ }
+
+ linked_databases database::
+ dependency_configs (const package_name& n, bool buildtime)
+ {
+ return dependency_configs (buildtime,
+ (buildtime
+ ? buildtime_dependency_type (n)
+ : empty_string));
+ }
+
+ linked_databases database::
+ dependency_configs ()
+ {
+ return dependency_configs (nullopt /* buildtime */,
+ empty_string /* type */);
+ }
+
+ linked_databases database::
+ cluster_configs (bool sys_rep)
+ {
+ linked_databases r;
+
+ // If the database is not in the resulting list, then add it and its
+ // dependent and dependency configurations, recursively.
+ //
+ auto add = [&r, sys_rep] (database& db, const auto& add)
+ {
+ if (std::find (r.begin (), r.end (), db) != r.end ())
+ return;
+
+ r.push_back (db);
+
+ {
+ linked_databases cs (db.dependency_configs ());
+ for (auto i (cs.begin_linked ()); i != cs.end (); ++i)
+ add (*i, add);
+ }
+
+ {
+ linked_databases cs (db.dependent_configs (sys_rep));
+ for (auto i (cs.begin_linked ()); i != cs.end (); ++i)
+ add (*i, add);
+ }
+ };
+
+ add (*this, add);
+
+ return r;
+ }
+
+ database& database::
+ find_attached (uint64_t id, bool s)
+ {
+ assert (!explicit_links_.empty ());
+
+ // Note that there shouldn't be too many databases, so the linear search
+ // is OK.
+ //
+ auto r (find_if (explicit_links_.begin (), explicit_links_.end (),
+ [&id] (const linked_config& lc)
+ {
+ return lc.id == id;
+ }));
+
+ if (r == explicit_links_.end () || (!s && r == explicit_links_.begin ()))
+ fail << "no configuration with id " << id << " is linked with "
+ << config_orig;
+
+ return r->db;
+ }
+
+ database& database::
+ find_attached (const std::string& name, bool s)
+ {
+ assert (!explicit_links_.empty ());
- for (const auto& p:
- db.query<selected_package> (
- query<selected_package>::substate == "system"))
- system_repository.insert (p.name, p.version, false);
+ auto r (find_if (explicit_links_.begin (), explicit_links_.end (),
+ [&name] (const linked_config& lc)
+ {
+ return lc.name && *lc.name == name;
+ }));
+
+ if (r == explicit_links_.end () || (!s && r == explicit_links_.begin ()))
+ fail << "no configuration with name '" << name << "' is linked with "
+ << config_orig;
+
+ return r->db;
+ }
- t.commit ();
+ database& database::
+ find_attached (const uuid_type& uid, bool s)
+ {
+ assert (!explicit_links_.empty ());
+
+ auto r (find_if (explicit_links_.begin (), explicit_links_.end (),
+ [&uid] (const linked_config& lc)
+ {
+ return lc.db.get ().uuid == uid;
+ }));
+
+ if (r == explicit_links_.end () || (!s && r == explicit_links_.begin ()))
+ fail << "no configuration with uuid " << uid << " is linked with "
+ << config_orig;
+
+ return r->db;
+ }
- db.tracer (tr); // Switch to the caller's tracer.
- return db;
+ database& database::
+ find_attached (const dir_path& d, bool s)
+ {
+ assert (!explicit_links_.empty ());
+
+ auto r (find_if (explicit_links_.begin (), explicit_links_.end (),
+ [&d] (const linked_config& lc)
+ {
+ return lc.db.get ().config == d;
+ }));
+
+ if (r == explicit_links_.end () || (!s && r == explicit_links_.begin ()))
+ fail << "no configuration with path " << d << " is linked with "
+ << config_orig;
+
+ return r->db;
+ }
+
+ database* database::
+ try_find_dependency_config (const uuid_type& uid)
+ {
+ for (database& ldb: dependency_configs ())
+ {
+ if (uid == ldb.uuid)
+ return &ldb;
}
- catch (const database_exception& e)
+
+ return nullptr;
+ }
+
+ database& database::
+ find_dependency_config (const uuid_type& uid)
+ {
+ if (database* db = try_find_dependency_config (uid))
+ return *db;
+
+ fail << "no configuration with uuid " << uid << " is linked with "
+ << config_orig << endf;
+ }
+
+ database& database::
+ parent_config (bool sys_rep)
+ {
+ assert (private_ ());
+
+ dir_path pd (config.directory ().directory ()); // Parent configuration.
+ const linked_databases& lds (implicit_links (true /* attach */, sys_rep));
+
+ for (auto i (lds.begin_linked ()); i != lds.end (); ++i)
{
- fail << f << ": " << e.message () << endf;
+ if (i->get ().config == pd)
+ return *i;
}
+
+ // This should not happen normally and is likely to be the result of some
+ // bpkg misuse.
+ //
+ fail << "configuration " << pd << " is not linked to its private "
+ << "configuration " << config << endf;
+ }
+
+ database* database::
+ private_config (const std::string& type)
+ {
+ assert (!explicit_links_.empty ());
+
+ auto r (find_if (explicit_links_.begin_linked (), explicit_links_.end (),
+ [&type] (const linked_config& lc)
+ {
+ database& db (lc.db);
+ return db.private_ () && db.type == type;
+ }));
+
+ return r != explicit_links_.end () ? &r->db.get () : nullptr;
+ }
+
+ bool database::
+ main ()
+ {
+ return *this == main_database ();
+ }
+
+ // compare_lazy_ptr
+ //
+ bool compare_lazy_ptr::
+ less (const odb::database& x, const odb::database& y) const
+ {
+ return static_cast<const database&> (x) < static_cast<const database&> (y);
}
}
diff --git a/bpkg/database.hxx b/bpkg/database.hxx
index 42270d8..035ff60 100644
--- a/bpkg/database.hxx
+++ b/bpkg/database.hxx
@@ -16,48 +16,620 @@
#include <bpkg/utility.hxx>
#include <bpkg/diagnostics.hxx>
+#include <bpkg/system-repository.hxx>
namespace bpkg
{
using odb::query;
+ using odb::prepared_query;
using odb::result;
using odb::session;
- using odb::sqlite::database;
+ class configuration;
+ class database;
+
+ struct linked_config
+ {
+ uint64_t id;
+ optional<string> name;
+ reference_wrapper<database> db; // Needs to be move-assignable.
+ };
+
+ // Used for the immediate explicit links which are normally not many (one
+ // entry for the self-link, which normally comes first).
+ //
+ class linked_configs: public small_vector<linked_config, 2>
+ {
+ public:
+ using base_type = small_vector<linked_config, 2>;
+
+ using base_type::base_type;
+
+ // Skip the self-link.
+ //
+ const_iterator
+ begin_linked () const
+ {
+ assert (!empty ());
+ return begin () + 1;
+ }
+
+ iterator
+ begin_linked ()
+ {
+ assert (!empty ());
+ return begin () + 1;
+ }
+ };
+
+ // In particular, is used for implicit links which can potentially be many
+ // (with the self-link which normally comes first). Think of a dependency in
+ // a shared configuration with dependents in multiple implicitly linked
+ // configurations.
+ //
+ class linked_databases: public small_vector<reference_wrapper<database>, 16>
+ {
+ public:
+ using base_type = small_vector<reference_wrapper<database>, 16>;
+
+ using base_type::base_type;
+
+ // Skip the self-link.
+ //
+ const_iterator
+ begin_linked () const
+ {
+ assert (!empty ());
+ return begin () + 1;
+ }
+
+ iterator
+ begin_linked ()
+ {
+ assert (!empty ());
+ return begin () + 1;
+ }
+ };
+
+ // Derive a custom database class that handles attaching/detaching
+ // additional configurations.
+ //
+ class database: public odb::sqlite::database
+ {
+ public:
+ using uuid_type = bpkg::uuid;
+
+ // Create new main database.
+ //
+ // The specified self-link object is persisted and its uuid and type are
+ // cached in the database object.
+ //
+ // If the pre-link list is not empty, then these configurations are
+ // treated as linked configurations for schema migration purposes. If
+ // specified, these paths should be absolute and normalized.
+ //
+ // Optionally, specify the database string representation for use in
+ // diagnostics.
+ //
+ database (const dir_path& cfg,
+ const shared_ptr<configuration>& self,
+ odb::tracer& tr,
+ const dir_paths& pre_link = dir_paths (),
+ std::string str_repr = "")
+ : database (cfg,
+ self.get (),
+ tr,
+ false,
+ false,
+ pre_link,
+ move (str_repr))
+ {
+ assert (self != nullptr);
+ }
+
+ // Open existing main database.
+ //
+ // If configured non-system selected packages can potentially be loaded
+ // from this database, then pass true as the pre_attach argument to
+ // recursively pre-attach the explicitly linked configuration databases,
+ // so that package prerequisites can be loaded from the linked
+ // configurations as well (see _selected_package_ref::to_ptr()
+ // implementation for details). Note that selected packages are loaded by
+ // some functions internally (package_iteration(), etc). Such functions
+ // are marked with the 'Note: loads selected packages.' note.
+ //
+ database (const dir_path& cfg,
+ odb::tracer& tr,
+ bool pre_attach,
+ bool sys_rep = false,
+ const dir_paths& pre_link = dir_paths (),
+ std::string str_repr = "")
+ : database (cfg,
+ nullptr,
+ tr,
+ pre_attach,
+ sys_rep,
+ pre_link,
+ move (str_repr))
+ {
+ }
+
+ ~database ();
+
+ // Move-constructible but not move-assignable.
+ //
+ // Note: noexcept is not specified since
+ // odb::sqlite::database(odb::sqlite::database&&) can throw.
+ //
+ database (database&&);
+ database& operator= (database&&) = delete;
+
+ database (const database&) = delete;
+ database& operator= (const database&) = delete;
+
+ // Attach another (existing) database. The configuration directory should
+ // be absolute and normalized.
+ //
+ // Note that if the database is already attached, then the existing
+ // instance reference is returned and the sys_rep argument is ignored.
+ //
+ database&
+ attach (const dir_path&, bool sys_rep = false);
+
+ // Attach databases of all the explicitly linked configurations,
+ // recursively. Must be called inside the transaction.
+ //
+ void
+ attach_explicit (bool sys_rep = false);
+
+ // Note that while attach*() can be called on the attached database,
+ // detach_all() should only be called on the main database.
+ //
+ void
+ detach_all ();
+
+ database&
+ main_database ()
+ {
+ return static_cast<database&> (odb::sqlite::database::main_database ());
+ }
+
+ // Return true if this is the main database.
+ //
+ bool
+ main ();
+
+ // Return the explicit links and the self-link (comes first) if the main
+ // database has been created with the pre_attach flag set to true and an
+ // empty list otherwise.
+ //
+ linked_configs&
+ explicit_links ()
+ {
+ return explicit_links_;
+ }
+
+ // By default attach and cache the implicitly linked configuration
+ // databases on the first call and return them along with the self-link
+ // (comes first), silently skipping the dangling links. If attach is
+ // false, then return an empty list if links were not yet cached by this
+ // function's previous call.
+ //
+ // Note that we skip dangling links without any warning since they can be
+ // quite common. Think of a shared host configuration with a bunch of
+ // implicitly linked configurations which are removed and potentially
+ // recreated later during the host configuration lifetime. Note however,
+ // that we remove the dangling implicit links during migration (see
+ // migrate() on details).
+ //
+ // Also note that for implicitly linked configurations the link
+ // information (id, etc) is useless, thus we only return the databases
+ // rather than the link information.
+ //
+ linked_databases&
+ implicit_links (bool attach = true, bool sys_rep = false);
+
+ // Return configurations of potential dependencies of packages selected in
+ // the current configuration.
+ //
+ // Specifically, return the self-link (comes first if included) and
+ // explicitly linked databases recursively, including them into the
+ // resulting list according to the following rules:
+ //
+ // - If dependency name and type are not specified, then return
+ // configurations of all dependencies (runtime and build-time). In this
+ // case include configurations of the linking configuration type and the
+ // host and build2 types and do not descended into links of different
+ // types.
+ //
+ // So, for example, for the following (not very sensible) link chain
+ // only the cfg1 and cfg2 configurations are included. The cfg3 type is
+ // not host and differs from type of cfg2 which links it and thus it is
+ // not included.
+ //
+ // cfg1 (this, target) -> cfg2 (host) -> cfg3 (target)
+ //
+ // - If buildtime is false, then return configurations of only runtime
+ // dependencies, regardless of the dependency name. In this case include
+ // configurations of only the linking configuration type and do not
+ // descend into links of different types.
+ //
+ // So for the above link chain only cfg1 configuration is included.
+ //
+ // - If buildtime is true, then return configurations of only build-time
+ // dependencies, suitable for building the specified dependency. In this
+ // case include configurations of only the build2 type for a build2
+ // module (named as libbuild2-*) and of the host type otherwise. Only
+ // descend into links of the same type and the appropriate dependency
+ // type (host or build2, depending on the dependency name).
+ //
+ // So for the above link chain only cfg2 configuration is included for a
+ // build-time dependency foo and none for libbuild2-foo.
+ //
+ // - While traversing through a private configuration of the host type
+ // consider the parent's explicitly linked configurations of the build2
+ // type as also being explicitly linked to this private
+ // configuration. Note that build system module dependencies of packages
+ // in private host configurations are resolved from the parent's
+ // explicitly linked configurations of the build2 type.
+ //
+ linked_databases
+ dependency_configs ();
+
+ linked_databases
+ dependency_configs (const package_name& dependency_name, bool buildtime);
+
+ // Return configurations of potential dependents of packages selected in
+ // the current configuration.
+ //
+ // Specifically, return the implicitly linked configuration databases
+ // recursively, including the self-link (comes first). Only include a
+ // linked configuration into the resulting list if it is of the same type
+ // as the linking configuration or the linking configuration is of the
+ // host or build2 type (think of searching through the target
+ // configurations for dependents of a build-time dependency in host
+ // configuration).
+ //
+ // While traversing through a configuration of the build2 type consider
+ // private host configurations of its implicitly linked configurations as
+ // also being implicitly linked to this build2 configuration. Note that
+ // build system module dependencies of packages in private host
+ // configurations are resolved from the parent's explicitly linked
+ // configurations of the build2 type.
+ //
+ linked_databases
+ dependent_configs (bool sys_rep = false);
+
+ // Return configurations of the linked cluster which the current
+ // configuration belongs to.
+ //
+ linked_databases
+ cluster_configs (bool sys_rep = false);
+
+ // The following find_*() functions assume that the main database has been
+ // created with the pre_attach flag set to true.
+ //
+
+ // The following find_attached() overloads include the self reference into
+ // the search by default and skip it if requested.
+ //
+
+ // Return the self reference if the id is 0. Otherwise, return the
+ // database of an explicitly linked configuration with the specified link
+ // id and issue diagnostics and fail if no link is found.
+ //
+ database&
+ find_attached (uint64_t id, bool self = true);
+
+ // Return the self reference if this is the current configuration
+ // name. Otherwise, return the database of an explicitly linked
+ // configuration with the specified name and issue diagnostics and fail if
+ // no link is found.
+ //
+ database&
+ find_attached (const std::string& name, bool self = true);
+
+ // Return the self reference if this is the current configuration
+ // uuid. Otherwise, return the database of an explicitly linked
+ // configuration with the specified uuid and issue diagnostics and fail if
+ // no link is found.
+ //
+ database&
+ find_attached (const uuid_type&, bool self = true);
+
+ // Return the self reference if this is the current configuration
+ // path. Otherwise, return the database of an explicitly linked
+ // configuration with the specified path and issue diagnostics and fail if
+ // no link is found. The configuration directory should be absolute and
+ // normalized.
+ //
+ database&
+ find_attached (const dir_path&, bool self = true);
+
+ // Return the dependency configuration with the specified uuid and issue
+ // diagnostics and fail if not found.
+ //
+ database&
+ find_dependency_config (const uuid_type&);
+
+ // As above but return NULL if not found, rather then failing.
+ //
+ database*
+ try_find_dependency_config (const uuid_type&);
+
+ // Return true if this configuration is private (i.e. its parent directory
+ // name is `.bpkg`).
+ //
+ bool
+ private_ ()
+ {
+ return config.directory ().leaf () == bpkg_dir;
+ }
+
+ // Return the implicitly linked configuration containing this
+ // configuration and issue diagnostics and fail if not found. Assume that
+ // this configuration is private.
+ //
+ database&
+ parent_config (bool sys_rep = false);
+
+ // Return a private configuration of the specified type, if present, and
+ // NULL otherwise.
+ //
+ database*
+ private_config (const string& type);
+
+ // Verify that the link information (uuid, type, etc) matches the linked
+ // configuration. Issue diagnostics and fail if that's not the case.
+ //
+ void
+ verify_link (const configuration&, database&);
+
+ // Assuming that the passed configuration is explicitly linked to the
+ // current one, return the corresponding backlink. Issue diagnostics and
+ // fail if the backlink is not found.
+ //
+ shared_ptr<configuration>
+ backlink (database&);
+
+ // Set the specified tracer for the whole linked databases cluster.
+ //
+ using tracer_type = odb::tracer;
+
+ void
+ tracer (tracer_type*);
+
+ void
+ tracer (tracer_type& t) {tracer (&t);}
+
+ using odb::sqlite::database::tracer;
+
+ public:
+ // Cached configuration information.
+ //
+ uuid_type uuid;
+ optional<std::string> name;
+ std::string type;
+
+ // Absolute and normalized configuration directory path. In particular, it
+ // is used as the configuration database identity.
+ //
+ dir_path config;
+
+ // For the main database, this is the original configuration directory
+ // path as specified by the user on the command line and `./` if
+ // unspecified. For other (linked) databases, it is the absolute
+ // configuration path if the main database's original configuration path
+ // is absolute and the path relative to the current directory otherwise.
+ // This is used in diagnostics.
+ //
+ dir_path config_orig;
+
+ // The database string representation for use in diagnostics.
+ //
+ // By default it is empty for the main database and the original
+ // configuration directory path in the `[<dir>]` form otherwise.
+ //
+ // NOTE: remember to update pkg_command_vars::string() and pkg-build.cxx
+ // if changing the format.
+ //
+ std::string string;
+
+ // Per-configuration system repository (only loaded if sys_rep constructor
+ // argument is true).
+ //
+ optional<bpkg::system_repository> system_repository;
+
+ private:
+ struct impl;
+
+ // Create/open main database.
+ //
+ database (const dir_path& cfg,
+ configuration* create,
+ odb::tracer&,
+ bool pre_attach,
+ bool sys_rep,
+ const dir_paths& pre_link,
+ std::string str_repr);
+
+ // Create attached database.
+ //
+ database (impl*,
+ const dir_path& cfg,
+ std::string schema,
+ bool sys_rep);
+
+ // If necessary, migrate this database and all the linked (both explicitly
+ // and implicitly) databases, recursively. Leave the linked databases
+ // attached. Must be called inside the transaction.
+ //
+ // Note that since the whole linked databases cluster is migrated at once,
+ // it is assumed that if migration is unnecessary for this database then
+ // it is also unnecessary for its linked databases. By this reason, we
+ // also drop the dangling implicit links rather than skip them, as we do
+ // for normal operations (see implicit_links () for details).
+ //
+ void
+ migrate ();
+
+ // Cache the configuration information.
+ //
+ void
+ cache_config (const uuid_type&,
+ optional<std::string> name,
+ std::string type);
+
+ // Note: must be called inside the transaction.
+ //
+ void
+ load_system_repository ();
+
+ // Add the configuration path to the BPKG_OPEN_CONFIGS environment
+ // variable which contains a list of the space-separated double-quoted
+ // absolute directory paths. Optionally, reset the list to this database's
+ // single path.
+ //
+ void
+ add_env (bool reset = false) const;
+
+ // Common implementation for the public overloads.
+ //
+ linked_databases
+ dependency_configs (optional<bool> buildtime, const std::string& type);
+
+ impl* impl_;
+
+ linked_configs explicit_links_;
+ linked_databases implicit_links_;
+ };
+
+ // NOTE: remember to update package_key and package_version_key comparison
+ // operators and compare_lazy_ptr if changing the database comparison
+ // operators.
+ //
+ // Note that here we use the database address as the database identity since
+ // we don't suppose two database instances for the same configuration to
+ // exist simultaneously due to the EXCLUSIVE locking mode (see database
+ // constructor for details).
+ //
+ inline bool
+ operator== (const database& x, const database& y)
+ {
+ return &x == &y;
+ }
+
+ inline bool
+ operator!= (const database& x, const database& y)
+ {
+ return !(x == y);
+ }
+
+ inline bool
+ operator< (const database& x, const database& y)
+ {
+ // Note that we used to compare the database addresses here (as for the
+ // equality operator) until we needed the database ordering to be
+ // consistent across runs (to support --rebuild-checksum, etc).
+ //
+ return x.config < y.config;
+ }
+
+ inline ostream&
+ operator<< (ostream& os, const database& db)
+ {
+ const string& s (db.string);
+
+ if (!s.empty ())
+ os << ' ' << s;
+
+ return os;
+ }
+
+ // Verify that a string is a valid configuration name, that is non-empty,
+ // containing only alpha-numeric characters, '_', '-' (except for the first
+ // character which can only be alphabetic or '_'). Issue diagnostics and
+ // fail if that's not the case.
+ //
+ void
+ validate_configuration_name (const string&, const char* what);
+
+ // The build-time dependency configuration types.
+ //
+ // Note that these are also used as private configuration names.
+ //
+ extern const string host_config_type;
+ extern const string build2_config_type;
+
+ // Return the configuration type suitable for building the specified
+ // build-time dependency: `build2` for build2 modules and `host` for others.
+ //
+ const string&
+ buildtime_dependency_type (const package_name&);
+
+ // Return the configuration type suitable for building a dependency of the
+ // dependent in the specified configuration: `build2` for build2 modules,
+ // `host` for other (regular) build-time dependencies, and the dependent
+ // configuration type for the runtime dependencies.
+ //
+ inline const string&
+ dependency_type (database& dependent_db,
+ const package_name& dependency_name,
+ bool buildtime)
+ {
+ return buildtime
+ ? buildtime_dependency_type (dependency_name)
+ : dependent_db.type;
+ }
// Transaction wrapper that allow the creation of dummy transactions (start
// is false) that in reality use an existing transaction.
//
- struct transaction
+ // Note that there can be multiple databases attached to the main database
+ // and normally a transaction object is passed around together with a
+ // specific database. Thus, we don't provide the database accessor function,
+ // so that the database is always chosen deliberately.
+ //
+ class transaction
{
+ public:
using database_type = bpkg::database;
explicit
transaction (database_type& db, bool start = true)
- : db_ (db), start_ (start), t_ () // Finalized.
+ : start_ (start), t_ () // Finalized.
{
if (start)
- t_.reset (db.begin ());
+ t_.reset (db.begin_exclusive ()); // See locking_mode for details.
}
void
commit ()
{
if (start_)
+ {
t_.commit ();
+ start_ = false;
+ }
}
void
rollback ()
{
if (start_)
+ {
t_.rollback ();
+ start_ = false;
+ }
}
- database_type&
- database ()
+ void
+ start (database_type& db)
{
- return db_;
+ assert (!start_);
+
+ start_ = true;
+ t_.reset (db.begin_exclusive ());
}
static bool
@@ -66,26 +638,16 @@ namespace bpkg
return odb::sqlite::transaction::has_current ();
}
- static odb::sqlite::transaction&
- current ()
- {
- return odb::sqlite::transaction::current ();
- }
-
private:
- database_type& db_;
bool start_;
odb::sqlite::transaction t_;
};
- database
- open (const dir_path& configuration, tracer&, bool create = false);
-
struct tracer_guard
{
tracer_guard (database& db, tracer& t)
: db_ (db), t_ (db.tracer ()) {db.tracer (t);}
- ~tracer_guard () {db_.tracer (*t_);}
+ ~tracer_guard () {db_.tracer (t_);}
private:
database& db_;
@@ -128,6 +690,83 @@ namespace bpkg
{
return pointer_result_range<R> (forward<R> (r));
}
+
+ // Note that lazy_shared_ptr and lazy_weak_ptr are defined in types.hxx.
+ //
+ template <typename T>
+ inline database& lazy_shared_ptr<T>::
+ database () const
+ {
+ return static_cast<bpkg::database&> (base_type::database ());
+ }
+
+ template <typename T>
+ inline database& lazy_weak_ptr<T>::
+ database () const
+ {
+ return static_cast<bpkg::database&> (base_type::database ());
+ }
+
+ // Map databases to values of arbitrary types.
+ //
+ // Note that keys are stored as non-constant references (since they are
+ // normally passed around as such), but they should never be changed
+ // directly.
+ //
+ template <typename V>
+ class database_map:
+ public small_vector<pair<reference_wrapper<database>, V>, 16>
+ {
+ public:
+ using value_type = pair<reference_wrapper<database>, V>;
+ using base_type = small_vector<value_type, 16>;
+ using iterator = typename base_type::iterator;
+ using const_iterator = typename base_type::const_iterator;
+
+ using base_type::begin;
+ using base_type::end;
+
+ iterator
+ find (database& db)
+ {
+ return find_if (begin (), end (),
+ [&db] (const value_type& i) -> bool
+ {
+ return i.first == db;
+ });
+ }
+
+ const_iterator
+ find (database& db) const
+ {
+ return find_if (begin (), end (),
+ [&db] (const value_type& i) -> bool
+ {
+ return i.first == db;
+ });
+ }
+
+ pair<iterator, bool>
+ insert (database& db, V&& v)
+ {
+ iterator i (find (db));
+ if (i != end ())
+ return make_pair (i, false);
+
+ return make_pair (base_type::emplace (end (), db, move (v)), true);
+ }
+
+ V&
+ operator[] (database& db)
+ {
+ iterator i (find (db));
+
+ if (i == end ())
+ i = base_type::emplace (end (), db, V ());
+
+ return i->second;
+ }
+ };
}
#endif // BPKG_DATABASE_HXX
diff --git a/bpkg/diagnostics.cxx b/bpkg/diagnostics.cxx
index a471d07..cf096d1 100644
--- a/bpkg/diagnostics.cxx
+++ b/bpkg/diagnostics.cxx
@@ -7,8 +7,8 @@
#include <odb/statement.hxx>
-#include <libbutl/process.mxx> // process_args
-#include <libbutl/process-io.mxx> // operator<<(ostream, process_*)
+#include <libbutl/process.hxx> // process_args
+#include <libbutl/process-io.hxx> // operator<<(ostream, process_*)
#include <bpkg/utility.hxx>
@@ -103,7 +103,7 @@ namespace bpkg
void trace_mark_base::
execute (odb::connection&, const char* stmt)
{
- if (verb >= 5)
+ if (verb >= 6)
static_cast<trace_mark&> (*this) << stmt;
}
@@ -129,7 +129,7 @@ namespace bpkg
const basic_mark error ("error");
const basic_mark warn ("warning");
const basic_mark info ("info");
- const basic_mark text (nullptr);
+ const basic_mark text (nullptr, nullptr, nullptr, nullptr); // No frame.
const fail_mark fail ("error");
const fail_end endf;
}
diff --git a/bpkg/diagnostics.hxx b/bpkg/diagnostics.hxx
index 925b080..a01d90c 100644
--- a/bpkg/diagnostics.hxx
+++ b/bpkg/diagnostics.hxx
@@ -8,7 +8,7 @@
#include <odb/tracer.hxx>
-#include <libbutl/diagnostics.mxx>
+#include <libbutl/diagnostics.hxx>
#include <bpkg/types.hxx> // Note: not <bpkg/utility.hxx>
@@ -16,15 +16,27 @@ namespace bpkg
{
using butl::diag_record;
- // Throw this exception to terminate the process. The handler should
- // assume that the diagnostics has already been issued.
+ // Throw this exception to terminate the process potentially with a custom
+ // exit code. The handler should assume that suitable diagnostics has
+ // already been issued.
//
- class failed: public std::exception {};
+ class failed: public std::exception
+ {
+ public:
+ uint16_t code;
+
+ explicit
+ failed (uint16_t c = 1): code (c) {}
+ };
// As above but needs to be used for recoverable errors which are likely to
// disappear on the command retry.
//
- class recoverable: public failed {};
+ class recoverable: public failed
+ {
+ public:
+ recoverable (): failed (2) {}
+ };
// Print process commmand line. If the number of elements is specified
// (or the second version is used), then it will print the piped multi-
@@ -97,13 +109,46 @@ namespace bpkg
template <typename F> inline void l5 (const F& f) {if (verb >= 5) f ();}
template <typename F> inline void l6 (const F& f) {if (verb >= 6) f ();}
+ // Progress reporting.
+ //
+ using butl::diag_progress;
+ using butl::diag_progress_lock;
+
// Diagnostic facility, base infrastructure.
//
using butl::diag_stream;
using butl::diag_epilogue;
+ using butl::diag_frame;
// Diagnostic facility, project specifics.
//
+
+ // Note: diag frames are not applied to text/trace diagnostics.
+ //
+ template <typename F>
+ struct diag_frame_impl: diag_frame
+ {
+ explicit
+ diag_frame_impl (F f): diag_frame (&thunk), func_ (move (f)) {}
+
+ private:
+ static void
+ thunk (const diag_frame& f, const butl::diag_record& r)
+ {
+ static_cast<const diag_frame_impl&> (f).func_ (
+ const_cast<diag_record&> (static_cast<const diag_record&> (r)));
+ }
+
+ const F func_;
+ };
+
+ template <typename F>
+ inline diag_frame_impl<F>
+ make_diag_frame (F f)
+ {
+ return diag_frame_impl<F> (move (f));
+ }
+
struct simple_prologue_base
{
explicit
@@ -167,7 +212,7 @@ namespace bpkg
basic_mark_base (const char* type,
const char* name = nullptr,
const void* data = nullptr,
- diag_epilogue* epilogue = nullptr)
+ diag_epilogue* epilogue = &diag_frame::apply)
: type_ (type), name_ (name), data_ (data), epilogue_ (epilogue) {}
simple_prologue
@@ -269,9 +314,10 @@ namespace bpkg
: basic_mark_base (type,
nullptr,
data,
- [](const diag_record& r)
+ [](const diag_record& r, butl::diag_writer* w)
{
- r.flush ();
+ diag_frame::apply (r);
+ r.flush (w);
throw failed ();
}) {}
};
diff --git a/bpkg/fetch-git.cxx b/bpkg/fetch-git.cxx
index 0c21af6..d2c30a1 100644
--- a/bpkg/fetch-git.cxx
+++ b/bpkg/fetch-git.cxx
@@ -5,12 +5,11 @@
#include <map>
-#include <libbutl/git.mxx>
-#include <libbutl/utility.mxx> // digit(), xdigit()
-#include <libbutl/filesystem.mxx> // path_entry
-#include <libbutl/path-pattern.mxx>
-#include <libbutl/semantic-version.mxx>
-#include <libbutl/standard-version.mxx> // parse_standard_version()
+#include <libbutl/git.hxx>
+#include <libbutl/filesystem.hxx> // path_entry
+#include <libbutl/path-pattern.hxx>
+#include <libbutl/semantic-version.hxx>
+#include <libbutl/standard-version.hxx> // parse_standard_version()
#include <bpkg/diagnostics.hxx>
@@ -286,20 +285,7 @@ namespace bpkg
try
{
- ifdstream is (move (pipe.in), fdstream_mode::skip, ifdstream::badbit);
-
- // We could probably write something like this, instead:
- //
- // *diag_stream << is.rdbuf () << flush;
- //
- // However, it would never throw and we could potentially miss the
- // reading failure, unless we decide to additionally mess with the
- // diagnostics stream exception mask.
- //
- for (string l; !eof (getline (is, l)); )
- *diag_stream << l << endl;
-
- is.close ();
+ dump_stderr (move (pipe.in));
// Fall through.
}
@@ -550,7 +536,11 @@ namespace bpkg
// For HTTP(S) sense the protocol type by sending the first HTTP request of
// the fetch operation handshake and analyzing the first line of the
// response. Fail if connecting to the server failed, the response code
- // differs from 200, or reading the response body failed.
+ // differs from 200 and 401, or reading the response body failed. If the
+ // response code is 401 (requires authentication), then consider protocol as
+ // smart. The thinking here is that a git repository with support for
+ // authentication is likely one of the hosting places (like git{hub,lab})
+ // and is unlikely to be dumb.
//
// Note that, as a side-effect, this function checks the HTTP(S) server
// availability and so must be called prior to any git command that involves
@@ -566,21 +556,16 @@ namespace bpkg
// URLs, if possible. That's why the function requires the git version
// parameter.
//
- enum class capabilities
- {
- dumb, // No shallow clone support.
- smart, // Support for shallow clone, but not for unadvertised refs fetch.
- unadv // Support for shallow clone and for unadvertised refs fetch.
- };
+ using capabilities = git_protocol_capabilities;
static capabilities
sense_capabilities (const common_options& co,
- repository_url url,
+ const repository_url& repo_url,
const semantic_version& git_ver)
{
- assert (url.path);
+ assert (repo_url.path);
- switch (url.scheme)
+ switch (repo_url.scheme)
{
case repository_protocol::git:
case repository_protocol::ssh:
@@ -589,6 +574,9 @@ namespace bpkg
case repository_protocol::https: break; // Ask the server (see below).
}
+ // Craft the URL for sensing the capabilities.
+ //
+ repository_url url (repo_url);
path& up (*url.path);
if (!up.to_directory ())
@@ -602,19 +590,94 @@ namespace bpkg
url.query = "service=git-upload-pack";
string u (url.string ());
- process pr (start_fetch (co,
- u,
- path () /* out */,
- "git/" + git_ver.string ()));
+
+ // Start fetching, also trying to retrieve the HTTP status code.
+ //
+ // We unset failbit to properly handle an empty response (no refs) from
+ // the dumb server.
+ //
+ ifdstream is (ifdstream::badbit);
+
+ pair<process, uint16_t> ps (
+ start_fetch_http (co,
+ u,
+ is /* out */,
+ fdstream_mode::skip | fdstream_mode::binary,
+ stderr_mode::redirect_quiet,
+ "git/" + git_ver.string ()));
+
+ process& pr (ps.first);
+
+ // If the fetch program stderr is redirected, then read it out and pass
+ // through.
+ //
+ auto dump_stderr = [&pr] ()
+ {
+ if (pr.in_efd != nullfd)
+ try
+ {
+ bpkg::dump_stderr (move (pr.in_efd));
+ }
+ catch (const io_error&)
+ {
+ // Not much we can do here.
+ }
+ };
try
{
- // We unset failbit to properly handle an empty response (no refs) from
- // the dumb server.
+ // If authentication is required (HTTP status code is 401), then
+ // consider the protocol as smart. Drop the diagnostics if that's the
+ // case and dump it otherwise.
//
- ifdstream is (move (pr.in_ofd),
- fdstream_mode::skip | fdstream_mode::binary,
- ifdstream::badbit);
+ if (ps.second == 401)
+ {
+ if (verb >= 2)
+ {
+ info << "smart git protocol assumed for repository " << repo_url
+ << " due to authentication requirement" <<
+ info << "use --git-capabilities to override or suppress this "
+ << "diagnostics";
+ }
+
+ // Note that we don't care about the process exit code here and just
+ // silently wait for the process completion in the process object
+ // destructor. We, however, close the stream (reading out the
+ // content), so that the process won't get blocked writing to it.
+ //
+ // Also note that we drop the potentially redirected process stderr
+ // stream content. We even don't read it out, since we assume it fully
+ // fits into the pipe buffer.
+ //
+ is.close ();
+
+ return capabilities::smart;
+ }
+
+ // Fail on any other HTTP error (e.g., 404). In the case of a success
+ // code other than 200 (e.g. 204 (No Content)) just let the capabilities
+ // detection to take its course.
+ //
+ if (ps.second != 0 && (ps.second < 200 || ps.second >= 300))
+ {
+ // Note that we don't care about the process exit code here (see above
+ // for the reasoning).
+ //
+ is.close ();
+
+ // Dump the potentially redirected process stderr stream content since
+ // it may be helpful to the user.
+ //
+ // Note, however, that we don't know if it really contains the error
+ // description since the fetch program may even exit successfully (see
+ // start_fetch_http() for details). Thus, we additionally print the
+ // HTTP status code in the diagnostics.
+ //
+ dump_stderr ();
+
+ fail << "unable to fetch " << url <<
+ info << "HTTP status code " << ps.second << endg;
+ }
string l;
getline (is, l); // Is empty if no refs returned by the dumb server.
@@ -640,7 +703,7 @@ namespace bpkg
? capabilities::smart
: capabilities::dumb);
- // If the transport is smart let's see it the server also supports
+ // If the transport is smart let's see if the server also supports
// unadvertised refs fetch.
//
if (r == capabilities::smart && !is.eof ())
@@ -668,6 +731,8 @@ namespace bpkg
is.close ();
+ dump_stderr ();
+
if (pr.wait ())
return r;
@@ -675,6 +740,8 @@ namespace bpkg
}
catch (const io_error&)
{
+ dump_stderr ();
+
if (pr.wait ())
fail << "unable to read fetched " << url << endg;
@@ -874,28 +941,29 @@ namespace bpkg
if (i != repository_refs.end ())
return i->second;
- if (verb && !co.no_progress ())
+ if ((verb && !co.no_progress ()) || co.progress ())
text << "querying " << url;
refs rs;
- fdpipe pipe (open_pipe ());
-
- // Note: ls-remote doesn't print anything to stderr, so no progress
- // suppression is required.
- //
- process pr (start_git (co,
- pipe, 2 /* stderr */,
- timeout_opts (co, url.scheme),
- co.git_option (),
- "ls-remote",
- to_git_url (url)));
-
- // Shouldn't throw, unless something is severely damaged.
- //
- pipe.out.close ();
for (;;) // Breakout loop.
{
+ fdpipe pipe (open_pipe ());
+
+ // Note: ls-remote doesn't print anything to stderr, so no progress
+ // suppression is required.
+ //
+ process pr (start_git (co,
+ pipe, 2 /* stderr */,
+ timeout_opts (co, url.scheme),
+ co.git_option (),
+ "ls-remote",
+ to_git_url (url)));
+
+ // Shouldn't throw, unless something is severely damaged.
+ //
+ pipe.out.close ();
+
try
{
ifdstream is (move (pipe.in), fdstream_mode::skip, ifdstream::badbit);
@@ -1084,7 +1152,25 @@ namespace bpkg
// the first call, and so git version get assigned (and checked).
//
if (!cap)
- cap = sense_capabilities (co, url (), git_ver);
+ {
+ const repository_url& u (url ());
+
+ // Check if the protocol capabilities are overridden for this
+ // repository.
+ //
+ const git_capabilities_map& gcs (co.git_capabilities ());
+
+ if (!gcs.empty () && u.scheme != repository_protocol::file)
+ {
+ auto i (gcs.find_sup (u.string ()));
+
+ if (i != gcs.end ())
+ cap = i->second;
+ }
+
+ if (!cap)
+ cap = sense_capabilities (co, u, git_ver);
+ }
return *cap;
};
@@ -1402,7 +1488,7 @@ namespace bpkg
catch (const logic_error&)
{
fail << "'" << s << "' doesn't appear to contain a git commit "
- "timestamp" << endg;
+ << "timestamp" << endg;
}
}
@@ -1505,7 +1591,7 @@ namespace bpkg
if (progress)
{
- if (verb == 1 && stderr_term)
+ if ((verb == 1 && stderr_term) || co.progress ())
v.push_back ("--progress");
}
else
@@ -1557,7 +1643,7 @@ namespace bpkg
// Print progress.
//
- if (verb && !co.no_progress ())
+ if ((verb && !co.no_progress ()) || co.progress ())
{
// Note that the clone command prints the following line prior to the
// progress lines:
@@ -1581,7 +1667,7 @@ namespace bpkg
dr << "from " << url ();
if (verb >= 2)
- dr << " in '" << dir.posix_string () << "'"; // Is used by tests.
+ dr << " in '" << dir.string () << "'"; // Used by tests.
}
// Print information messages prior to the deep fetching.
@@ -1702,6 +1788,255 @@ namespace bpkg
submodule_failure (d, prefix, e);
};
+ // Use git-config to obtain the submodules names/paths and then
+ // git-ls-files to obtain their commits.
+ //
+ // Note that previously we used git-submodule--helper-list subcommand to
+ // obtain the submodules commits/paths and then git-submodule--helper-name
+ // to obtain their names. However, git 2.38 has removed these subcommands.
+
+ // Obtain the submodules names/paths.
+ //
+ for (;;) // Breakout loop.
+ {
+ fdpipe pipe (open_pipe ());
+
+ process pr (start_git (co,
+ pipe, 2 /* stderr */,
+ co.git_option (),
+ "-C", dir,
+ "config",
+ "--list",
+ "--file", gitmodules_file,
+ "-z"));
+
+ // Shouldn't throw, unless something is severely damaged.
+ //
+ pipe.out.close ();
+
+ try
+ {
+ ifdstream is (move (pipe.in), fdstream_mode::skip, ifdstream::badbit);
+
+ for (string l; !eof (getline (is, l, '\0')); )
+ {
+ auto bad = [&l] ()
+ {
+ throw runtime_error ("invalid submodule option '" + l + '\'');
+ };
+
+ // The submodule configuration option line is NULL-terminated and
+ // has the following form:
+ //
+ // submodule.<submodule-name>.<option-name><NEWLINE><value>
+ //
+ // For example:
+ //
+ // submodule.style.path
+ // doc/style
+ //
+ l4 ([&]{trace << "submodule option: " << l;});
+
+ // If this is a submodule path option, then extract its name and
+ // path and add the entry to the resulting list.
+ //
+ size_t n (l.find ('\n'));
+
+ if (n != string::npos &&
+ n >= 15 &&
+ l.compare (0, 10, "submodule.") == 0 &&
+ l.compare (n - 5, 5, ".path") == 0)
+ {
+ string nm (l, 10, n - 15);
+ dir_path p (l, n + 1, l.size () - n - 1);
+
+ // For good measure verify that the name and path are not empty.
+ //
+ if (nm.empty () || p.empty ())
+ bad ();
+
+ r.push_back (submodule {move (p), move (nm), empty_string});
+ }
+ }
+
+ is.close ();
+
+ if (pr.wait ())
+ break;
+
+ // Fall through.
+ }
+ catch (const invalid_path& e)
+ {
+ if (pr.wait ())
+ failure ("invalid submodule directory path '" + e.path + '\'');
+
+ // Fall through.
+ }
+ catch (const io_error& e)
+ {
+ if (pr.wait ())
+ failure ("unable to read submodule options", &e);
+
+ // Fall through.
+ }
+ // Note that the io_error class inherits from the runtime_error class,
+ // so this catch-clause must go last.
+ //
+ catch (const runtime_error& e)
+ {
+ if (pr.wait ())
+ failure (e.what ());
+
+ // Fall through.
+ }
+
+ // We should only get here if the child exited with an error status.
+ //
+ assert (!pr.wait ());
+
+ failure ("unable to list submodule options");
+ }
+
+ // Note that we could potentially bail out here if the submodules list is
+ // empty. Let's however continue and verify that via git-ls-files, for
+ // good measure.
+
+ // Complete the resulting submodules information with their commits.
+ //
+ for (;;) // Breakout loop.
+ {
+ fdpipe pipe (open_pipe ());
+
+ process pr (start_git (co,
+ pipe, 2 /* stderr */,
+ co.git_option (),
+ "-C", dir,
+ "ls-files",
+ "--stage",
+ "-z"));
+
+ // Shouldn't throw, unless something is severely damaged.
+ //
+ pipe.out.close ();
+
+ try
+ {
+ ifdstream is (move (pipe.in), fdstream_mode::skip, ifdstream::badbit);
+
+ for (string l; !eof (getline (is, l, '\0')); )
+ {
+ auto bad = [&l] ()
+ {
+ throw runtime_error ("invalid file description '" + l + '\'');
+ };
+
+ // The line describing a file is NULL-terminated and has the
+ // following form:
+ //
+ // <mode><SPACE><object><SPACE><stage><TAB><path>
+ //
+ // The mode is a 6-digit octal representation of the file type and
+ // permission bits mask. For a submodule directory it is 160000 (see
+ // git index format documentation for gitlink object type). For
+ // example:
+ //
+ // 160000 59dcc1bea3509e37b65905ac472f86f4c55eb510 0 doc/style
+ //
+ if (!(l.size () > 50 && l[48] == '0' && l[49] == '\t'))
+ bad ();
+
+ // For submodules permission bits are always zero, so we can match
+ // the mode as a string.
+ //
+ if (l.compare (0, 6, "160000") == 0)
+ {
+ l4 ([&]{trace << "submodule: " << l;});
+
+ dir_path d (l, 50, l.size () - 50);
+
+ auto i (find_if (r.begin (), r.end (),
+ [&d] (const submodule& sm) {return sm.path == d;}));
+
+ if (i == r.end ())
+ bad ();
+
+ i->commit = string (l, 7, 40);
+ }
+ }
+
+ is.close ();
+
+ if (pr.wait ())
+ break;
+
+ // Fall through.
+ }
+ catch (const invalid_path& e)
+ {
+ if (pr.wait ())
+ failure ("invalid submodule directory path '" + e.path + '\'');
+
+ // Fall through.
+ }
+ catch (const io_error& e)
+ {
+ if (pr.wait ())
+ failure ("unable to read repository file list", &e);
+
+ // Fall through.
+ }
+ // Note that the io_error class inherits from the runtime_error class,
+ // so this catch-clause must go last.
+ //
+ catch (const runtime_error& e)
+ {
+ if (pr.wait ())
+ failure (e.what ());
+
+ // Fall through.
+ }
+
+ // We should only get here if the child exited with an error status.
+ //
+ assert (!pr.wait ());
+
+ failure ("unable to list repository files");
+ }
+
+ // Make sure that we have deduced commits for all the submodules.
+ //
+ for (const submodule& sm: r)
+ {
+ if (sm.commit.empty ())
+ failure ("unable to deduce commit for submodule " + sm.name);
+ }
+
+ return r;
+ }
+
+ // @@ TMP Old, submodule--helper-{list,name} subcommands-based,
+ // implementation of find_submodules().
+ //
+#if 0
+ static submodules
+ find_submodules (const common_options& co,
+ const dir_path& dir,
+ const dir_path& prefix,
+ bool gitmodules = true)
+ {
+ tracer trace ("find_submodules");
+
+ submodules r;
+
+ if (gitmodules && !exists (dir / gitmodules_file))
+ return r;
+
+ auto failure = [&prefix] (const string& d, const exception* e = nullptr)
+ {
+ submodule_failure (d, prefix, e);
+ };
+
fdpipe pipe (open_pipe ());
process pr (start_git (co,
@@ -1731,7 +2066,7 @@ namespace bpkg
l4 ([&]{trace << "submodule: " << l;});
if (!(l.size () > 50 && l[48] == '0' && l[49] == '\t'))
- throw runtime_error ("invalid submodule description '" + l + "'");
+ throw runtime_error ("invalid submodule description '" + l + '\'');
dir_path d (string (l, 50));
@@ -1765,7 +2100,7 @@ namespace bpkg
catch (const invalid_path& e)
{
if (pr.wait ())
- failure ("invalid submodule path '" + e.path + "'");
+ failure ("invalid submodule path '" + e.path + '\'');
// Fall through.
}
@@ -1793,6 +2128,7 @@ namespace bpkg
submodule_failure ("unable to list submodules", prefix);
}
+#endif
// Return commit id for the submodule directory or nullopt if the submodule
// is not initialized (directory doesn't exist, doesn't contain .git entry,
@@ -1840,13 +2176,15 @@ namespace bpkg
co.git_option (),
"-C", dir,
- // Note that older git versions don't recognize the --super-prefix
- // option but seem to behave correctly without any additional
- // efforts when it is omitted.
+ // Note that git versions outside the [2.14.0 2.38.0) range don't
+ // recognize the --super-prefix option but seem to behave correctly
+ // without any additional efforts when it is omitted.
//
- !prefix.empty () && git_ver >= semantic_version {2, 14, 0}
- ? strings ({"--super-prefix", prefix.posix_representation ()})
- : strings (),
+ (!prefix.empty () &&
+ git_ver >= semantic_version {2, 14, 0} &&
+ git_ver < semantic_version {2, 38, 0}
+ ? strings ({"--super-prefix", prefix.posix_representation ()})
+ : strings ()),
"submodule--helper", "init",
verb < 2 ? "-q" : nullptr))
@@ -1884,7 +2222,7 @@ namespace bpkg
if (u && *u == "none")
{
- if (verb >= 2 && !co.no_progress ())
+ if ((verb >= 2 && !co.no_progress ()) || co.progress ())
text << "skipping submodule '" << psd << "'";
// Note that the submodule can be enabled for some other snapshot we
@@ -1959,7 +2297,7 @@ namespace bpkg
catch (const invalid_path& e)
{
failure ("invalid submodule '" + sm.name + "' repository path '" +
- e.path + "'");
+ e.path + '\'');
}
catch (const invalid_argument& e)
{
@@ -1994,7 +2332,7 @@ namespace bpkg
// Let's make the message match the git-submodule script output (again,
// except for capitalization).
//
- if (verb && !co.no_progress ())
+ if ((verb && !co.no_progress ()) || co.progress ())
text << "submodule path '" << psd << "': checked out '" << sm.commit
<< "'";
@@ -2149,21 +2487,11 @@ namespace bpkg
dir_path () /* prefix */);
}
-#ifndef _WIN32
-
- // Noop on POSIX.
- //
- bool
- git_fixup_worktree (const common_options&, const dir_path&, bool)
- {
- return false;
- }
-
-#else
-
- // Find symlinks in the repository (non-recursive submodule-wise).
+ // Find symlinks in a working tree of a top repository or submodule
+ // (non-recursive submodule-wise) and return their relative paths together
+ // with the respective git object ids.
//
- static paths
+ static vector<pair<path, string>>
find_symlinks (const common_options& co,
const dir_path& dir,
const dir_path& prefix)
@@ -2194,12 +2522,12 @@ namespace bpkg
try
{
- paths r;
+ vector<pair<path, string>> r;
ifdstream is (move (pipe.in), fdstream_mode::skip, ifdstream::badbit);
for (string l; !eof (getline (is, l, '\0')); )
{
- // The line describing a file is NUL-terminated and has the following
+ // The line describing a file is NULL-terminated and has the following
// form:
//
// <mode><SPACE><object><SPACE><stage><TAB><path>
@@ -2209,16 +2537,18 @@ namespace bpkg
//
// 100644 165b42ec7a10fb6dd4a60b756fa1966c1065ef85 0 README
//
- l4 ([&]{trace << "file: " << l;});
-
if (!(l.size () > 50 && l[48] == '0' && l[49] == '\t'))
- throw runtime_error ("invalid file description '" + l + "'");
+ throw runtime_error ("invalid file description '" + l + '\'');
// For symlinks permission bits are always zero, so we can match the
// mode as a string.
//
if (l.compare (0, 6, "120000") == 0)
- r.push_back (path (string (l, 50)));
+ {
+ l4 ([&]{trace << "symlink: " << l;});
+
+ r.push_back (make_pair (path (string (l, 50)), string (l, 7, 40)));
+ }
}
is.close ();
@@ -2231,7 +2561,7 @@ namespace bpkg
catch (const invalid_path& e)
{
if (pr.wait ())
- failure ("invalid repository symlink path '" + e.path + "'");
+ failure ("invalid repository symlink path '" + e.path + '\'');
// Fall through.
}
@@ -2263,6 +2593,127 @@ namespace bpkg
submodule_failure ("unable to list repository files", prefix);
}
+ // Verify symlinks in a working tree of a top repository or submodule,
+ // recursively.
+ //
+ // Specifically, fail if the symlink target is not a valid relative path or
+ // refers outside the top repository directory.
+ //
+ static void
+ verify_symlinks (const common_options& co,
+ const dir_path& dir,
+ const dir_path& prefix)
+ {
+ auto failure = [&prefix] (const string& d, const exception* e = nullptr)
+ {
+ submodule_failure (d, prefix, e);
+ };
+
+ for (const auto& l: find_symlinks (co, dir, prefix))
+ {
+ const path& lp (l.first);
+
+ // Obtain the symlink target path.
+ //
+ path tp;
+
+ fdpipe pipe (open_pipe ());
+ process pr (start_git (co,
+ pipe, 2 /* stderr */,
+ co.git_option (),
+ "-C", dir,
+ "cat-file",
+ "-p",
+ l.second + "^{object}"));
+
+ // Shouldn't throw, unless something is severely damaged.
+ //
+ pipe.out.close ();
+
+ try
+ {
+ ifdstream is (move (pipe.in), fdstream_mode::skip);
+ string s (is.read_text ()); // Note: is not newline-terminated.
+ is.close ();
+
+ if (pr.wait () && !s.empty ())
+ try
+ {
+ tp = path (move (s));
+ }
+ catch (const invalid_path& e)
+ {
+ failure ("invalid target path '" + e.path + "' for symlink '" +
+ lp.string () + '\'',
+ &e);
+ }
+
+ // Fall through.
+ }
+ catch (const io_error&)
+ {
+ // Fall through.
+ }
+
+ if (tp.empty ())
+ failure ("unable to read target path for symlink '" + lp.string () +
+ "'");
+
+ // Verify that the symlink target path is relative.
+ //
+ if (tp.absolute ())
+ failure ("absolute target path '" + tp.string () + "' for symlink '" +
+ lp.string () + '\'');
+
+ // Verify that the symlink target path refers inside the top repository
+ // directory.
+ //
+ path rtp (prefix / lp.directory () / tp); // Relative to top directory.
+ rtp.normalize (); // Note: can't throw since the path is relative.
+
+ // Normalizing non-empty path can't end up with an empty path.
+ //
+ assert (!rtp.empty ());
+
+ // Make sure that the relative to the top repository directory target
+ // path doesn't start with '..'.
+ //
+ if (dir_path::traits_type::parent (*rtp.begin ()))
+ failure ("target path '" + tp.string () + "' for symlink '" +
+ lp.string () + "' refers outside repository");
+ }
+
+ // Verify symlinks for submodules.
+ //
+ for (const submodule& sm: find_submodules (co, dir, prefix))
+ verify_symlinks (co, dir / sm.path, prefix / sm.path);
+ }
+
+ void
+ git_verify_symlinks (const common_options& co, const dir_path& dir)
+ {
+ if ((verb && !co.no_progress ()) || co.progress ())
+ text << "verifying symlinks...";
+
+ verify_symlinks (co, dir, dir_path () /* prefix */);
+ }
+
+#ifndef _WIN32
+
+ // Noop on POSIX.
+ //
+ optional<bool>
+ git_fixup_worktree (const common_options&,
+ const dir_path&,
+ bool revert,
+ bool)
+ {
+ assert (!revert);
+ return false;
+ }
+
+#else
+
// Fix up or revert the previously made fixes in a working tree of a top
// repository or submodule (see git_fixup_worktree() description for
// details). Return nullopt if no changes are required (because real symlink
@@ -2304,7 +2755,7 @@ namespace bpkg
//
if (r)
failure ("unexpected real symlink in submodule '" +
- sm.path.string () + "'");
+ sm.path.string () + '\'');
return nullopt;
}
@@ -2324,13 +2775,35 @@ namespace bpkg
// skipping those with not-yet-existing target, unless no links were
// created at the previous run, in which case we fail.
//
- paths ls (find_symlinks (co, dir, prefix));
+ vector<pair<path, string>> ls (find_symlinks (co, dir, prefix));
vector<pair<path, path>> links; // List of the link/target path pairs.
+ // Mark the being replaced in the working tree links as unchanged,
+ // running git-update-index(1) for multiple links per run.
+ //
+ strings unchanged_links; // Links to mark as unchanged.
+
+ auto mark_unchanged = [&unchanged_links, &co, &dir, &failure] ()
+ {
+ if (!unchanged_links.empty ())
+ {
+ if (!run_git (co,
+ co.git_option (),
+ "-C", dir,
+ "update-index",
+ "--assume-unchanged",
+ unchanged_links))
+ failure ("unable to mark symlinks as unchanged");
+
+ unchanged_links.clear ();
+ }
+ };
+
// Cache/remove filesystem-agnostic symlinks.
//
- for (auto& l: ls)
+ for (auto& li: ls)
{
+ path& l (li.first);
path lp (dir / l); // Absolute or relative to the current directory.
// Check the symlink type to see if we need to replace it or can bail
@@ -2340,7 +2813,7 @@ namespace bpkg
// "elevated console mode":
//
// - file symlinks are currently not supported (see
- // libbutl/filesystem.mxx for details).
+ // libbutl/filesystem.hxx for details).
//
// - git creates symlinks to directories, rather than junctions. This
// makes things to fall apart as Windows API seems to be unable to
@@ -2356,14 +2829,14 @@ namespace bpkg
if (e.second.type == entry_type::symlink)
{
if (r)
- failure ("unexpected real symlink '" + l.string () + "'");
+ failure ("unexpected real symlink '" + l.string () + '\'');
return nullopt;
}
}
catch (const system_error& e)
{
- failure ("unable to stat symlink '" + l.string () + "'", &e);
+ failure ("unable to stat symlink '" + l.string () + '\'', &e);
}
// Read the symlink target path.
@@ -2378,7 +2851,7 @@ namespace bpkg
catch (const invalid_path& e)
{
failure ("invalid target path '" + e.path + "' for symlink '" +
- l.string () + "'",
+ l.string () + '\'',
&e);
}
catch (const io_error& e)
@@ -2390,14 +2863,14 @@ namespace bpkg
// Mark the symlink as unchanged and remove it.
//
- if (!run_git (co,
- co.git_option (),
- "-C", dir,
- "update-index",
- "--assume-unchanged",
- l))
- failure ("unable to mark symlink '" + l.string () +
- "' as unchanged");
+ // Note that we restrict the batch to 100 symlinks not to exceed the
+ // Windows command line max size, which is about 32K, and assuming
+ // that _MAX_PATH is 256 characters.
+ //
+ unchanged_links.push_back (l.string ());
+
+ if (unchanged_links.size () == 100)
+ mark_unchanged ();
links.emplace_back (move (l), move (t));
@@ -2405,6 +2878,8 @@ namespace bpkg
r = true;
}
+ mark_unchanged (); // Mark the rest.
+
// Create real links (hardlinks, symlinks, and junctions).
//
while (!links.empty ())
@@ -2440,7 +2915,7 @@ namespace bpkg
catch (const system_error& e)
{
failure ("unable to stat target '" + t.string () +
- "' for symlink '" + l.string () + "'",
+ "' for symlink '" + l.string () + '\'',
&e);
}
@@ -2458,7 +2933,7 @@ namespace bpkg
{
failure (string ("unable to create ") +
(dir_target ? "junction" : "hardlink") + " '" +
- l.string () + "' with target '" + t.string () + "'",
+ l.string () + "' with target '" + t.string () + '\'',
&e);
}
@@ -2486,8 +2961,10 @@ namespace bpkg
// filesystem entry. To prevent this, we remove all links ourselves
// first.
//
- for (const path& l: find_symlinks (co, dir, prefix))
+ for (const auto& li: find_symlinks (co, dir, prefix))
{
+ const path& l (li.first);
+
try
{
try_rmfile (dir / l);
@@ -2495,7 +2972,7 @@ namespace bpkg
catch (const system_error& e)
{
failure ("unable to remove hardlink, symlink, or junction '" +
- l.string () + "'",
+ l.string () + '\'',
&e);
}
}
@@ -2523,15 +3000,29 @@ namespace bpkg
return r;
}
- bool
+ optional<bool>
git_fixup_worktree (const common_options& co,
const dir_path& dir,
- bool revert)
+ bool revert,
+ bool ie)
{
- optional<bool> r (
- fixup_worktree (co, dir, revert, dir_path () /* prefix */));
+ if (!revert && ((verb && !co.no_progress ()) || co.progress ()))
+ text << "fixing up symlinks...";
+
+ try
+ {
+ optional<bool> r (
+ fixup_worktree (co, dir, revert, dir_path () /* prefix */));
- return r ? *r : false;
+ return r ? *r : false;
+ }
+ catch (const failed&)
+ {
+ if (ie)
+ return nullopt;
+
+ throw;
+ }
}
#endif
diff --git a/bpkg/fetch-pkg.cxx b/bpkg/fetch-pkg.cxx
index 81d4131..721e4b8 100644
--- a/bpkg/fetch-pkg.cxx
+++ b/bpkg/fetch-pkg.cxx
@@ -5,8 +5,8 @@
#include <sstream>
-#include <libbutl/filesystem.mxx> // cpfile ()
-#include <libbutl/manifest-parser.mxx>
+#include <libbutl/filesystem.hxx> // cpfile ()
+#include <libbutl/manifest-parser.hxx>
#include <bpkg/checksum.hxx>
#include <bpkg/diagnostics.hxx>
@@ -52,7 +52,7 @@ namespace bpkg
is.close ();
string s (bs.str ());
- string sha256sum (sha256 (s.c_str (), s.size ()));
+ string cs (sha256sum (s.c_str (), s.size ()));
istringstream ts (s); // Text mode.
@@ -60,7 +60,7 @@ namespace bpkg
M m (mp, ignore_unknown);
if (pr.wait ())
- return make_pair (move (m), move (sha256sum));
+ return make_pair (move (m), move (cs));
// Child existed with an error, fall through.
}
@@ -97,20 +97,80 @@ namespace bpkg
if (exists (df))
fail << "file " << df << " already exists";
+ // Currently we only expect fetching a package archive via the HTTP(S)
+ // protocol.
+ //
+ switch (u.scheme)
+ {
+ case repository_protocol::git:
+ case repository_protocol::ssh:
+ case repository_protocol::file: assert (false);
+ case repository_protocol::http:
+ case repository_protocol::https: break;
+ }
+
auto_rmfile arm (df);
- process pr (start_fetch (o,
- u.string (),
- df,
- string () /* user_agent */,
- o.pkg_proxy ()));
- if (!pr.wait ())
+ // Note that a package file may not be present in the repository due to
+ // outdated repository information. Thus, while fetching the file we also
+ // try to retrieve the HTTP status code. If the HTTP status code is
+ // retrieved and is 404 (not found) or the fetch program doesn't support
+ // its retrieval and fails, then we also advise the user to re-fetch the
+ // repositories.
+ //
+ pair<process, uint16_t> ps (
+ start_fetch_http (o,
+ u.string (),
+ df,
+ string () /* user_agent */,
+ o.pkg_proxy ()));
+
+ process& pr (ps.first);
+ uint16_t sc (ps.second);
+
+ // Fail if the fetch process didn't exit normally with 0 code or the HTTP
+ // status code is retrieved and differs from 200.
+ //
+ // Note that the diagnostics may potentially look as follows:
+ //
+ // foo-1.0.0.tar.gz:
+ // ###################################################### 100.0%
+ // error: unable to fetch package https://example.org/1/foo-1.0.0.tar.gz
+ // info: repository metadata could be stale
+ // info: run 'bpkg rep-fetch' (or equivalent) to update
+ //
+ // It's a bit unfortunate that the 100% progress indicator can be shown
+ // for a potential HTTP error and it doesn't seem that we can easily fix
+ // that. Note, however, that this situation is not very common and
+ // probably that's fine.
+ //
+ if (!pr.wait () || (sc != 0 && sc != 200))
{
// While it is reasonable to assuming the child process issued
// diagnostics, some may not mention the URL.
//
- fail << "unable to fetch " << u <<
- info << "re-run with -v for more information";
+ diag_record dr (fail);
+ dr << "unable to fetch package " << u;
+
+ // Print the HTTP status code in the diagnostics on the request failure,
+ // unless it cannot be retrieved or is 404. Note that the fetch program
+ // may even exit successfully on such a failure (see start_fetch_http()
+ // for details) and issue no diagnostics at all.
+ //
+ if (sc != 0 && sc != 200 && sc != 404)
+ dr << info << "HTTP status code " << sc;
+
+ // If not found, advise the user to re-fetch the repositories. Note that
+ // if the status code cannot be retrieved, we assume it could be 404 and
+ // advise.
+ //
+ if (sc == 404 || sc == 0)
+ {
+ dr << info << "repository metadata could be stale" <<
+ info << "run 'bpkg rep-fetch' (or equivalent) to update";
+ }
+ else if (verb < 2)
+ dr << info << "re-run with -v for more information";
}
arm.cancel ();
@@ -146,14 +206,14 @@ namespace bpkg
// and reading the manifest. The file should be opened in the binary
// mode for the first operation and in the text mode for the second one.
//
- string sha256sum;
+ string cs;
if (o != nullptr)
- sha256sum = sha256 (*o, f); // Read file in the binary mode.
+ cs = sha256sum (*o, f); // Read file in the binary mode.
ifdstream ifs (f); // Open file in the text mode.
manifest_parser mp (ifs, f.string ());
- return make_pair (M (mp, ignore_unknown), move (sha256sum));
+ return make_pair (M (mp, ignore_unknown), move (cs));
}
catch (const manifest_parsing& e)
{
@@ -168,8 +228,14 @@ namespace bpkg
pkg_repository_manifests
pkg_fetch_repositories (const dir_path& d, bool iu)
{
- return fetch_manifest<pkg_repository_manifests> (
- nullptr, d / repositories_file, iu).first;
+ pkg_repository_manifests r (
+ fetch_manifest<pkg_repository_manifests> (
+ nullptr, d / repositories_file, iu).first);
+
+ if (r.empty ())
+ r.emplace_back (repository_manifest ()); // Add the base repository.
+
+ return r;
}
pair<pkg_repository_manifests, string/*checksum*/>
@@ -184,9 +250,15 @@ namespace bpkg
path& f (*u.path);
f /= repositories_file;
- return rl.remote ()
+ pair<pkg_repository_manifests, string> r (
+ rl.remote ()
? fetch_manifest<pkg_repository_manifests> (o, u, iu)
- : fetch_manifest<pkg_repository_manifests> (&o, f, iu);
+ : fetch_manifest<pkg_repository_manifests> (&o, f, iu));
+
+ if (r.first.empty ())
+ r.first.emplace_back (repository_manifest ()); // Add the base repository.
+
+ return r;
}
pkg_package_manifests
diff --git a/bpkg/fetch.cxx b/bpkg/fetch.cxx
index 9db4920..5b59d42 100644
--- a/bpkg/fetch.cxx
+++ b/bpkg/fetch.cxx
@@ -3,9 +3,12 @@
#include <bpkg/fetch.hxx>
+#include <libbutl/curl.hxx>
+
#include <bpkg/diagnostics.hxx>
using namespace std;
+using namespace butl;
namespace bpkg
{
@@ -84,12 +87,21 @@ namespace bpkg
}
}
- static process
+ // Note that there is no easy way to retrieve the HTTP status code for wget
+ // (there is no reliable way to redirect the status line/headers to stdout)
+ // and thus we always return 0. Due to the status code unavailability there
+ // is no need to redirect stderr and thus we ignore the stderr mode.
+ //
+ static pair<process, uint16_t>
start_wget (const path& prog,
const optional<size_t>& timeout,
+ bool progress,
bool no_progress,
+ stderr_mode,
const strings& ops,
const string& url,
+ ifdstream* out_is,
+ fdstream_mode out_ism,
const path& out,
const string& user_agent,
const string& http_proxy)
@@ -98,7 +110,7 @@ namespace bpkg
const string& ua (user_agent.empty ()
? BPKG_USER_AGENT " wget/" + to_string (wget_major) +
- "." + to_string (wget_minor)
+ '.' + to_string (wget_minor)
: user_agent);
cstrings args {
@@ -106,12 +118,21 @@ namespace bpkg
"-U", ua.c_str ()
};
+ // Wget 1.16 introduced the --show-progress option which in the quiet mode
+ // (-q) shows a nice and tidy progress bar (if only it also showed errors,
+ // then it would have been perfect).
+ //
+ bool has_show_progress (wget_major > 1 ||
+ (wget_major == 1 && wget_minor >= 16));
+
// Map verbosity level. If we are running quiet or at level 1
// and the output is stdout, then run wget quiet. If at level
// 1 and the output is a file, then show the progress bar. At
// level 2 and 3 run it at the default level (so we will print
// the command line and it will display the progress, error
// messages, etc). Higher than that -- run it with debug output.
+ // Always show the progress bar if requested explicitly, even in
+ // the quiet mode.
//
// In the wget world quiet means don't print anything, not even
// error messages. There is also the -nv mode (aka "non-verbose")
@@ -122,16 +143,29 @@ namespace bpkg
//
if (verb < (fo ? 1 : 2))
{
- args.push_back ("-q");
- no_progress = false; // Already suppressed with -q.
+ bool quiet (true);
+
+ if (progress)
+ {
+ // If --show-progress options is supported, then pass both
+ // --show-progress and -q, otherwise pass none of them and run
+ // verbose.
+ //
+ if (has_show_progress)
+ args.push_back ("--show-progress");
+ else
+ quiet = false;
+ }
+
+ if (quiet)
+ {
+ args.push_back ("-q");
+ no_progress = false; // Already suppressed with -q.
+ }
}
else if (fo && verb == 1)
{
- // Wget 1.16 introduced the --show-progress option which in the
- // quiet mode shows a nice and tidy progress bar (if only it also
- // showed errors, then it would have been perfect).
- //
- if (wget_major > 1 || (wget_major == 1 && wget_minor >= 16))
+ if (has_show_progress)
{
args.push_back ("-q");
@@ -199,12 +233,19 @@ namespace bpkg
// just the file name (rather than the whole path) in the progress
// report. Process exceptions must be handled by the caller.
//
- return fo
- ? process (pp, args.data (),
- 0, 1, 2,
- out.directory ().string ().c_str (),
- env.vars)
- : process (pp, args.data (), 0, -1, 2, nullptr /* cwd */, env.vars);
+ process pr (fo
+ ? process (pp, args.data (),
+ 0, 1, 2,
+ out.directory ().string ().c_str (),
+ env.vars)
+ : process (pp, args.data (),
+ 0, -1, 2,
+ nullptr /* cwd */, env.vars));
+
+ if (!fo && out_is != nullptr)
+ out_is->open (move (pr.in_ofd), out_ism);
+
+ return make_pair (move (pr), 0);
}
// curl
@@ -252,12 +293,23 @@ namespace bpkg
return false;
}
- static process
+ // If HTTP status code needs to be retrieved (out_is != NULL), then open the
+ // passed stream and read out the status line(s) extracting the status code
+ // and the headers. Otherwise, return 0 indicating that the status code is
+ // not available. In the former case if the output file is also specified,
+ // then read out and save the file if the status code is 200 and drop the
+ // HTTP response body otherwise.
+ //
+ static pair<process, uint16_t>
start_curl (const path& prog,
const optional<size_t>& timeout,
+ bool progress,
bool no_progress,
+ stderr_mode err_mode,
const strings& ops,
const string& url,
+ ifdstream* out_is,
+ fdstream_mode out_ism,
const path& out,
const string& user_agent,
const string& http_proxy)
@@ -270,7 +322,6 @@ namespace bpkg
cstrings args {
prog.string ().c_str (),
- "-f", // Fail on HTTP errors (e.g., 404).
"-L", // Follow redirects.
"-A", ua.c_str ()
};
@@ -286,27 +337,36 @@ namespace bpkg
// 1 and the output is a file, then show the progress bar. At
// level 2 and 3 run it at the default level (so we will print
// the command line and it will display its elaborate progress).
- // Higher than that -- run it verbose.
+ // Higher than that -- run it verbose. Always show the progress
+ // bar if requested explicitly, even in the quiet mode.
//
- if (verb < (fo ? 1 : 2))
- {
- suppress_progress ();
- no_progress = false; // Already suppressed.
- }
- else if (fo && verb == 1)
+ bool quiet (err_mode == stderr_mode::redirect_quiet);
+
+ if (!quiet)
{
- if (!no_progress)
- args.push_back ("--progress-bar");
+ if (verb < (fo ? 1 : 2))
+ {
+ if (!progress)
+ {
+ suppress_progress ();
+ no_progress = false; // Already suppressed.
+ }
+ }
+ else if (fo && verb == 1)
+ {
+ if (!no_progress)
+ args.push_back ("--progress-bar");
+ }
+ else if (verb > 3)
+ args.push_back ("-v");
}
- else if (verb > 3)
- args.push_back ("-v");
// Suppress progress.
//
// Note: the `-v -s` options combination is valid and results in a verbose
// output without progress.
//
- if (no_progress)
+ if (no_progress || quiet)
suppress_progress ();
// Set download timeout if requested.
@@ -327,7 +387,7 @@ namespace bpkg
// Output. By default curl writes to stdout.
//
- if (fo)
+ if (fo && out_is == nullptr) // Output to file and don't query HTTP status?
{
args.push_back ("-o");
args.push_back (out.string ().c_str ());
@@ -341,11 +401,31 @@ namespace bpkg
args.push_back (http_proxy.c_str ());
}
+ // Status code.
+ //
+ // Add the --include|-i option if HTTP status code needs to be retrieved
+ // in order to include the HTTP response headers to the output. Otherwise,
+ // add the --fail|-f option not to print the response body and exit with
+ // non-zero status code on HTTP error (e.g., 404), so that the caller can
+ // recognize the request failure.
+ //
+ // Note that older versions of curl (e.g., 7.55.1) ignore the --include|-i
+ // option in the presence of the --fail|-f option on HTTP errors and don't
+ // print the response status line and headers.
+ //
+ if (out_is != nullptr)
+ args.push_back ("-i");
+ else
+ args.push_back ("-f");
+
args.push_back (url.c_str ());
args.push_back (nullptr);
process_path pp (process::path_search (args[0]));
+ // Let's still print the command line in the quiet mode to ease the
+ // troubleshooting.
+ //
if (verb >= 2)
print_process (args);
else if (verb == 1 && fo && !no_progress)
@@ -358,9 +438,115 @@ namespace bpkg
// Process exceptions must be handled by the caller.
//
- return fo
- ? process (pp, args.data ())
- : process (pp, args.data (), 0, -1);
+ process pr (fo && out_is == nullptr
+ ? process (pp, args.data ())
+ : process (pp, args.data (),
+ 0, -1, err_mode == stderr_mode::pass ? 2 : -1));
+
+ // Close the process stdout stream and read stderr stream out and dump.
+ //
+ // Needs to be called prior to failing, so that the process won't get
+ // blocked writing to stdout and so that stderr get dumped before the
+ // error message we issue.
+ //
+ auto close_streams = [&pr, out_is, err_mode] ()
+ {
+ try
+ {
+ assert (out_is != nullptr);
+
+ out_is->close ();
+
+ if (err_mode != stderr_mode::pass)
+ bpkg::dump_stderr (move (pr.in_efd));
+ }
+ catch (const io_error&)
+ {
+ // Not much we can do here.
+ }
+ };
+
+ // If HTTP status code needs to be retrieved, then open the passed stream
+ // and read out the status line(s) and headers.
+ //
+ // Note that this implementation is inspired by the bdep's
+ // http_service::post() function.
+ //
+ uint16_t sc (0);
+
+ if (out_is != nullptr)
+ try
+ {
+ ifdstream& is (*out_is);
+ is.open (move (pr.in_ofd), out_ism);
+ sc = curl::read_http_status (*out_is).code;
+ }
+ catch (const invalid_argument& e)
+ {
+ close_streams ();
+
+ fail << "unable to read HTTP response status line for " << url << ": "
+ << e;
+ }
+ catch (const io_error&)
+ {
+ close_streams ();
+
+ fail << "unable to read HTTP response status line for " << url;
+ }
+
+ // If the output file is specified and the HTTP status code needs to also
+ // be retrieved, then read out and save the file if the status code is 200
+ // and drop the HTTP response body otherwise.
+ //
+ bool io_read; // If true then io_error relates to a read operation.
+ if (fo && out_is != nullptr)
+ try
+ {
+ ifdstream& is (*out_is);
+
+ // Read and save the file if the HTTP status code is 200.
+ //
+ if (sc == 200)
+ {
+ io_read = false;
+ ofdstream os (out, fdopen_mode::binary);
+
+ bufstreambuf* buf (dynamic_cast<bufstreambuf*> (is.rdbuf ()));
+ assert (buf != nullptr);
+
+ for (io_read = true;
+ is.peek () != istream::traits_type::eof (); // Potentially reads.
+ io_read = true)
+ {
+ size_t n (buf->egptr () - buf->gptr ());
+
+ io_read = false;
+ os.write (buf->gptr (), n);
+
+ buf->gbump (static_cast<int> (n));
+ }
+
+ io_read = false;
+ os.close ();
+ }
+
+ // Close the stream, skipping the remaining content, if present.
+ //
+ io_read = true;
+ is.close ();
+ }
+ catch (const io_error& e)
+ {
+ close_streams ();
+
+ if (io_read)
+ fail << "unable to read fetched " << url << ": " << e;
+ else
+ fail << "unable to write to " << out << ": " << e;
+ }
+
+ return make_pair (move (pr), sc);
}
// fetch
@@ -410,12 +596,24 @@ namespace bpkg
return false;
}
- static process
+ // Note that there is no easy way to retrieve the HTTP status code for the
+ // fetch program and thus we always return 0.
+ //
+ // Also note that in the redirect* stderr modes we nevertheless redirect
+ // stderr to prevent the fetch program from interactively querying the user
+ // for the credentials. Thus, we also respect the redirect_quiet mode in
+ // contrast to start_wget().
+ //
+ static pair<process, uint16_t>
start_fetch (const path& prog,
const optional<size_t>& timeout,
+ bool progress,
bool no_progress,
+ stderr_mode err_mode,
const strings& ops,
const string& url,
+ ifdstream* out_is,
+ fdstream_mode out_ism,
const path& out,
const string& user_agent,
const string& http_proxy)
@@ -437,7 +635,8 @@ namespace bpkg
// Map verbosity level. If we are running quiet then run fetch quiet.
// If we are at level 1 and we are fetching into a file or we are at
// level 2 or 3, then run it at the default level (so it will display
- // the progress). Higher than that -- run it verbose.
+ // the progress). Higher than that -- run it verbose. Always show the
+ // progress bar if requested explicitly, even in the quiet mode.
//
// Note that the only way to suppress progress for the fetch program is to
// run it quiet (-q). However, it prints nothing but the progress by
@@ -446,20 +645,28 @@ namespace bpkg
// unless the verbosity level is greater than three, in which case we will
// run verbose (and with progress). That's the best we can do.
//
- if (verb < (fo ? 1 : 2))
- {
- args.push_back ("-q");
- no_progress = false; // Already suppressed with -q.
- }
- else if (verb > 3)
+ bool quiet (err_mode == stderr_mode::redirect_quiet);
+
+ if (!quiet)
{
- args.push_back ("-v");
- no_progress = false; // Don't be quiet in the verbose mode (see above).
+ if (verb < (fo ? 1 : 2))
+ {
+ if (!progress)
+ {
+ args.push_back ("-q");
+ no_progress = false; // Already suppressed with -q.
+ }
+ }
+ else if (verb > 3)
+ {
+ args.push_back ("-v");
+ no_progress = false; // Don't be quiet in the verbose mode (see above).
+ }
}
// Suppress progress.
//
- if (no_progress)
+ if (no_progress || quiet)
args.push_back ("-q");
// Set download timeout if requested.
@@ -501,6 +708,9 @@ namespace bpkg
env.vars = evars;
}
+ // Let's still print the command line in the quiet mode to ease the
+ // troubleshooting.
+ //
if (verb >= 2)
print_process (env, args);
@@ -509,12 +719,19 @@ namespace bpkg
// just the file name (rather than the whole path) in the progress
// report. Process exceptions must be handled by the caller.
//
- return fo
- ? process (pp, args.data (),
- 0, 1, 2,
- out.directory ().string ().c_str (),
- env.vars)
- : process (pp, args.data (), 0, -1, 2, nullptr /* cwd */, env.vars);
+ process pr (fo
+ ? process (pp, args.data (),
+ 0, 1, 2,
+ out.directory ().string ().c_str (),
+ env.vars)
+ : process (pp, args.data (),
+ 0, -1, err_mode == stderr_mode::pass ? 2 : -1,
+ nullptr /* cwd */, env.vars));
+
+ if (!fo && out_is != nullptr)
+ out_is->open (move (pr.in_ofd), out_ism);
+
+ return make_pair (move (pr), 0);
}
// The dispatcher.
@@ -522,7 +739,7 @@ namespace bpkg
// Cache the result of finding/testing the fetch program. Sometimes a simple
// global variable is really the right solution...
//
- enum class fetch_kind {wget, curl, fetch};
+ enum class fetch_kind {curl, wget, fetch};
static path path_;
static fetch_kind kind_;
@@ -542,20 +759,20 @@ namespace bpkg
const path& n (p.leaf ());
const string& s (n.string ());
- if (s.find ("wget") != string::npos)
- {
- if (!check_wget (p))
- fail << p << " does not appear to be the 'wget' program";
-
- kind_ = fetch_kind::wget;
- }
- else if (s.find ("curl") != string::npos)
+ if (s.find ("curl") != string::npos)
{
if (!check_curl (p))
fail << p << " does not appear to be the 'curl' program";
kind_ = fetch_kind::curl;
}
+ else if (s.find ("wget") != string::npos)
+ {
+ if (!check_wget (p))
+ fail << p << " does not appear to be the 'wget' program";
+
+ kind_ = fetch_kind::wget;
+ }
else if (s.find ("fetch") != string::npos)
{
if (!check_fetch (p))
@@ -566,15 +783,45 @@ namespace bpkg
else
fail << "unknown fetch program " << p;
}
+ else if (o.curl_specified ())
+ {
+ const path& p (path_ = o.curl ());
+
+ if (!check_curl (p))
+ fail << p << " does not appear to be the 'curl' program";
+
+ kind_ = fetch_kind::curl;
+ }
else
{
// See if any is available. The preference order is:
//
+ // curl
+ // wget
+ // fetch
+#if 1
+ if (check_curl (path_ = path ("curl")))
+ {
+ kind_ = fetch_kind::curl;
+ }
+ else if (check_wget (path_ = path ("wget")))
+ {
+ kind_ = fetch_kind::wget;
+ }
+#else
+ // Old preference order:
+ //
// wget 1.16 or up
// curl
// wget
// fetch
//
+ // We used to prefer wget 1.16 because it has --show-progress which
+ // results in nicer progress. But experience shows that wget is quite
+ // unreliable plus with bdep always using curl, it would be strange
+ // to use both curl and wget (and expecting the user to setup proxy,
+ // authentication, etc., for both).
+ //
bool wg (check_wget (path_ = path ("wget")));
if (wg && (wget_major > 1 || (wget_major == 1 && wget_minor >= 16)))
@@ -590,12 +837,13 @@ namespace bpkg
path_ = path ("wget");
kind_ = fetch_kind::wget;
}
+#endif
else if (check_fetch (path_ = path ("fetch")))
{
kind_ = fetch_kind::fetch;
}
else
- fail << "unable to find 'wget', 'curl', or 'fetch'" <<
+ fail << "unable to find 'curl', 'wget', or 'fetch'" <<
info << "use --fetch to specify the fetch program location";
if (verb >= 3)
@@ -606,26 +854,47 @@ namespace bpkg
return kind_;
}
- process
+ static pair<process, uint16_t>
start_fetch (const common_options& o,
const string& src,
+ ifdstream* out_is,
+ fdstream_mode out_ism,
+ stderr_mode err_mode,
const path& out,
const string& user_agent,
const url& proxy)
{
- process (*f) (const path&,
- const optional<size_t>&,
- bool,
- const strings&,
- const string&,
- const path&,
- const string&,
- const string&) = nullptr;
-
- switch (check (o))
+ // Currently, for the sake of simplicity, we don't support redirecting
+ // stderr if we fetch into a file.
+ //
+ assert (out.empty () || err_mode == stderr_mode::pass);
+
+ // If out_is is not NULL and out is not empty, then the former argument is
+ // unused by the caller and only indicates that the HTTP status code still
+ // needs to be retrieved while the requested file needs to be saved. In
+ // this case if the fetch program doesn't provide an easy way to retrieve
+ // the HTTP status code, then the respective start_*() function can just
+ // ignore the referred stream. Otherwise, it may or may not use it for
+ // convenience but should close it before returning if it does.
+ //
+ pair<process, uint16_t> (*f) (const path&,
+ const optional<size_t>&,
+ bool,
+ bool,
+ stderr_mode,
+ const strings&,
+ const string&,
+ ifdstream*,
+ fdstream_mode,
+ const path&,
+ const string&,
+ const string&) = nullptr;
+
+ fetch_kind fk (check (o));
+ switch (fk)
{
- case fetch_kind::wget: f = &start_wget; break;
case fetch_kind::curl: f = &start_curl; break;
+ case fetch_kind::wget: f = &start_wget; break;
case fetch_kind::fetch: f = &start_fetch; break;
}
@@ -698,11 +967,40 @@ namespace bpkg
}
}
+ // Note that the merge semantics here is not 100% accurate since we may
+ // override "later" --fetch-option with "earlier" --curl-option.
+ // However, this should be close enough for our use-case, which is
+ // bdep's --curl-option values overriding --fetch-option specified in
+ // the default options file. The situation that we will mis-handle is
+ // when both are specified on the command line, for example,
+ // --curl-option --max-time=2 --bpkg-option --fetch-option=--max-time=1,
+ // but that feel quite far fetched to complicate things here.
+ //
+ const strings& fos (o.fetch_option ());
+ const strings& cos (o.curl_option ());
+
+ const strings& os (
+ fk != fetch_kind::curl || cos.empty ()
+ ? fos
+ : (fos.empty ()
+ ? cos
+ : [&fos, &cos] ()
+ {
+ strings r (fos.begin (), fos.end ());
+ r.insert (r.end (), cos.begin (), cos.end ());
+ return r;
+ } ()));
+
+
return f (path_,
timeout,
+ o.progress (),
o.no_progress (),
- o.fetch_option (),
+ err_mode,
+ os,
!http_url.empty () ? http_url : src,
+ out_is,
+ out_ism,
out,
user_agent,
http_proxy);
@@ -717,4 +1015,61 @@ namespace bpkg
throw failed ();
}
}
+
+ process
+ start_fetch (const common_options& o,
+ const string& src,
+ const path& out,
+ const string& user_agent,
+ const url& proxy)
+ {
+ return start_fetch (o,
+ src,
+ nullptr /* out_is */,
+ fdstream_mode::none,
+ stderr_mode::pass,
+ out,
+ user_agent,
+ proxy).first;
+ }
+
+ pair<process, uint16_t>
+ start_fetch_http (const common_options& o,
+ const string& src,
+ ifdstream& out,
+ fdstream_mode out_mode,
+ stderr_mode err_mode,
+ const string& user_agent,
+ const url& proxy)
+ {
+ return start_fetch (o,
+ src,
+ &out,
+ out_mode,
+ err_mode,
+ path () /* out */,
+ user_agent,
+ proxy);
+ }
+
+ pair<process, uint16_t>
+ start_fetch_http (const common_options& o,
+ const string& src,
+ const path& out,
+ const string& user_agent,
+ const url& proxy)
+ {
+ assert (!out.empty ());
+
+ ifdstream is (ifdstream::badbit | ifdstream::failbit);
+
+ return start_fetch (o,
+ src,
+ &is,
+ fdstream_mode::skip | fdstream_mode::binary,
+ stderr_mode::pass,
+ out,
+ user_agent,
+ proxy);
+ }
}
diff --git a/bpkg/fetch.hxx b/bpkg/fetch.hxx
index d57dcf3..daf1ffe 100644
--- a/bpkg/fetch.hxx
+++ b/bpkg/fetch.hxx
@@ -102,18 +102,29 @@ namespace bpkg
const repository_location&,
const dir_path&);
+
+ // Verify that the symlinks target paths in the working tree are valid,
+ // relative, and none of them refer outside the repository directory.
+ //
+ void
+ git_verify_symlinks (const common_options&, const dir_path&);
+
// Fix up or revert the fixes (including in submodules, recursively) in a
// working tree previously checked out by git_checkout() or
// git_checkout_submodules(). Return true if any changes have been made to
- // the filesystem.
+ // the filesystem. On error issue diagnostics and return nullopt in the
+ // ignore errors mode and throw failed otherwise.
//
// Noop on POSIX. On Windows it may replace git's filesystem-agnostic
// symlinks with hardlinks for the file targets and junctions for the
// directory targets. Note that it still makes sure the working tree is
// being treated by git as "clean" despite the changes.
//
- bool
- git_fixup_worktree (const common_options&, const dir_path&, bool revert);
+ optional<bool>
+ git_fixup_worktree (const common_options&,
+ const dir_path&,
+ bool revert,
+ bool ignore_errors = false);
// Low-level fetch API (fetch.cxx).
//
@@ -127,11 +138,65 @@ namespace bpkg
// option for details).
//
process
- start_fetch (const common_options& o,
+ start_fetch (const common_options&,
const string& url,
const path& out = {},
const string& user_agent = {},
const butl::url& proxy = {});
+
+ // Similar to the above but can only be used for fetching HTTP(S) URL to a
+ // file. Additionally return the HTTP status code, if the underlying fetch
+ // program provides an easy way to retrieve it, and 0 otherwise.
+ //
+ pair<process, uint16_t>
+ start_fetch_http (const common_options&,
+ const string& url,
+ const path& out,
+ const string& user_agent = {},
+ const butl::url& proxy = {});
+
+ // As above but fetches HTTP(S) URL to stdout, which can be read by the
+ // caller from the specified stream. On HTTP errors (e.g., 404) this stream
+ // may contain the error description returned by the server and the process
+ // may exit with 0 code.
+ //
+ // Fetch process stderr redirect mode.
+ //
+ enum class stderr_mode
+ {
+ // Don't redirect stderr.
+ //
+ pass,
+
+ // If the underlying fetch program provides an easy way to retrieve the
+ // HTTP status code, then redirect the fetch process stderr to a pipe, so
+ // that depending on the returned status code the caller can either drop
+ // or dump the fetch process diagnostics. Otherwise, may still redirect
+ // stderr for some implementation-specific reasons (to prevent the
+ // underlying fetch program from interacting with the user, etc). The
+ // caller can detect whether stderr is redirected or not by checking
+ // process::in_efd.
+ //
+ redirect,
+
+ // As above but if stderr is redirected, minimize the amount of
+ // diagnostics printed by the fetch program by only printing errors. That
+ // allows the caller to read stdout and stderr streams sequentially in the
+ // blocking mode by assuming that the diagnostics always fits into the
+ // pipe buffer. If stderr is not redirected, then ignore this mode in
+ // favor of the more informative diagnostics.
+ //
+ redirect_quiet
+ };
+
+ pair<process, uint16_t>
+ start_fetch_http (const common_options&,
+ const string& url,
+ ifdstream& out,
+ fdstream_mode out_mode,
+ stderr_mode,
+ const string& user_agent = {},
+ const butl::url& proxy = {});
}
#endif // BPKG_FETCH_HXX
diff --git a/bpkg/forward.hxx b/bpkg/forward.hxx
index becf628..ebf8cbd 100644
--- a/bpkg/forward.hxx
+++ b/bpkg/forward.hxx
@@ -4,15 +4,17 @@
#ifndef BPKG_FORWARD_HXX
#define BPKG_FORWARD_HXX
-#include <odb/sqlite/forward.hxx>
-
namespace bpkg
{
- using odb::sqlite::database;
- struct transaction;
+ // <bpkg/database.hxx>
+ //
+ class database;
+ class linked_databases;
+ class transaction;
// <bpkg/package.hxx>
//
+ class configuration;
class repository;
class repository_fragment;
class selected_package;
diff --git a/bpkg/help.cxx b/bpkg/help.cxx
index 20b3805..ed0f58a 100644
--- a/bpkg/help.cxx
+++ b/bpkg/help.cxx
@@ -3,7 +3,7 @@
#include <bpkg/help.hxx>
-#include <libbutl/pager.mxx>
+#include <libbutl/pager.hxx>
#include <bpkg/diagnostics.hxx>
#include <bpkg/bpkg-options.hxx>
diff --git a/bpkg/manifest-utility.cxx b/bpkg/manifest-utility.cxx
index 0332f4d..afcb1f7 100644
--- a/bpkg/manifest-utility.cxx
+++ b/bpkg/manifest-utility.cxx
@@ -3,10 +3,11 @@
#include <bpkg/manifest-utility.hxx>
+#include <sstream>
#include <cstring> // strcspn()
-#include <libbutl/b.mxx>
-#include <libbutl/sha256.mxx>
+#include <libbutl/b.hxx>
+#include <libbutl/filesystem.hxx> // dir_iterator
#include <bpkg/package.hxx> // wildcard_version
#include <bpkg/diagnostics.hxx>
@@ -22,6 +23,45 @@ namespace bpkg
const path signature_file ("signature.manifest");
const path manifest_file ("manifest");
+ vector<package_info>
+ package_b_info (const common_options& o,
+ const dir_paths& ds,
+ b_info_flags fl)
+ {
+ path b (name_b (o));
+
+ vector<package_info> r;
+ try
+ {
+ b_info (r,
+ ds,
+ fl,
+ verb,
+ [] (const char* const args[], size_t n)
+ {
+ if (verb >= 2)
+ print_process (args, n);
+ },
+ b,
+ exec_dir,
+ o.build_option ());
+ return r;
+ }
+ catch (const b_error& e)
+ {
+ if (e.normal ())
+ throw failed (); // Assume the build2 process issued diagnostics.
+
+ diag_record dr (fail);
+ dr << "unable to parse project ";
+ if (r.size () < ds.size ()) dr << ds[r.size ()] << ' ';
+ dr << "info: " << e <<
+ info << "produced by '" << b << "'; use --build to override" << endf;
+
+ return vector<package_info> (); // Work around GCC 13.2.1 segfault.
+ }
+ }
+
package_scheme
parse_package_scheme (const char*& s)
{
@@ -40,36 +80,21 @@ namespace bpkg
package_name
parse_package_name (const char* s, bool allow_version)
{
- if (!allow_version)
- try
- {
- return package_name (s);
- }
- catch (const invalid_argument& e)
- {
- fail << "invalid package name '" << s << "': " << e;
- }
-
- // Calculate the package name length as a length of the prefix that
- // doesn't contain spaces, slashes and the version constraint starting
- // characters. Note that none of them are valid package name characters.
- //
- size_t n (strcspn (s, " /=<>([~^"));
-
try
{
- return package_name (string (s, n));
+ return extract_package_name (s, allow_version);
}
catch (const invalid_argument& e)
{
- fail << "invalid package name in '" << s << "': " << e << endf;
+ fail << "invalid package name " << (allow_version ? "in " : "")
+ << "'" << s << "': " << e << endf;
}
}
version
parse_package_version (const char* s,
bool allow_wildcard,
- bool fold_zero_revision)
+ version::flags fl)
{
using traits = string::traits_type;
@@ -83,15 +108,7 @@ namespace bpkg
try
{
- version r (p, fold_zero_revision);
-
- if (r.release && r.release->empty ())
- throw invalid_argument ("earliest version");
-
- if (r.compare (wildcard_version, true /* ignore_revision */) == 0)
- throw invalid_argument ("stub version");
-
- return r;
+ return extract_package_version (s, fl);
}
catch (const invalid_argument& e)
{
@@ -106,7 +123,7 @@ namespace bpkg
optional<version_constraint>
parse_package_version_constraint (const char* s,
bool allow_wildcard,
- bool fold_zero_revision,
+ version::flags fl,
bool version_only)
{
// Calculate the version specification position as a length of the prefix
@@ -118,16 +135,20 @@ namespace bpkg
if (s[n] == '\0') // No version (constraint) is specified?
return nullopt;
- const char* v (s + n); // Constraint or version including '/'.
+ const char* v (s + n); // Constraint or version including leading '/'.
+
+ if (version_only && v[0] != '/')
+ fail << "exact package version expected instead of version constraint "
+ << "in '" << s << "'";
- // If only the version is allowed or the package name is followed by '/'
- // then fallback to the version parsing.
+ // If the package name is followed by '/' then fallback to the version
+ // parsing.
//
- if (version_only || v[0] == '/')
+ if (v[0] == '/')
try
{
return version_constraint (
- parse_package_version (s, allow_wildcard, fold_zero_revision));
+ parse_package_version (s, allow_wildcard, fl));
}
catch (const invalid_argument& e)
{
@@ -295,43 +316,344 @@ namespace bpkg
}
}
- optional<version>
- package_version (const common_options& o, const dir_path& d)
+ package_version_infos
+ package_versions (const common_options& o,
+ const dir_paths& ds,
+ b_info_flags fl)
{
- path b (name_b (o));
+ vector<b_project_info> pis (package_b_info (o, ds, fl));
- try
- {
- b_project_info pi (
- b_info (d,
- false /* ext_mods */,
- verb,
- [] (const char* const args[], size_t n)
- {
- if (verb >= 2)
- print_process (args, n);
- },
- b,
- exec_dir,
- o.build_option ()));
-
- optional<version> r;
+ package_version_infos r;
+ r.reserve (pis.size ());
+ for (const b_project_info& pi: pis)
+ {
// An empty version indicates that the version module is not enabled for
// the project.
//
- if (!pi.version.empty ())
- r = version (pi.version.string ());
+ optional<version> v (!pi.version.empty ()
+ ? version (pi.version.string ())
+ : optional<version> ());
- return r;
+ r.push_back (package_version_info {move (v), move (pi)});
}
- catch (const b_error& e)
+
+ return r;
+ }
+
+ string
+ package_checksum (const common_options& o,
+ const dir_path& d,
+ const package_info* pi)
+ {
+ path f (d / manifest_file);
+
+ try
{
- if (e.normal ())
- throw failed (); // Assume the build2 process issued diagnostics.
+ ifdstream is (f, fdopen_mode::binary);
+ sha256 cs (is);
- fail << "unable to parse project " << d << " info: " << e <<
- info << "produced by '" << b << "'; use --build to override" << endf;
+ const vector<package_info::subproject>& sps (
+ pi != nullptr
+ ? pi->subprojects
+ : package_b_info (o, d, b_info_flags::subprojects).subprojects);
+
+ for (const package_info::subproject& sp: sps)
+ cs.append (sp.path.string ());
+
+ return cs.string ();
+ }
+ catch (const io_error& e)
+ {
+ fail << "unable to read from " << f << ": " << e << endf;
+ }
+ }
+
+ // Return the sorted list of *.build files (first) which are present in the
+ // package's build/config/ subdirectory (or their alternatives) together
+ // with the *-build manifest value names they correspond to (second). Skip
+ // files which are already present in the specified buildfile/path
+ // lists. Note: throws system_error on filesystem errors.
+ //
+ static vector<pair<path, path>>
+ find_buildfiles (const dir_path& config,
+ const string& ext,
+ const vector<buildfile>& bs,
+ const vector<path>& bps)
+ {
+ vector<pair<path, path>> r;
+
+ for (const dir_entry& de: dir_iterator (config, dir_iterator::no_follow))
+ {
+ if (de.type () == entry_type::regular)
+ {
+ const path& p (de.path ());
+ const char* e (p.extension_cstring ());
+
+ if (e != nullptr && ext == e)
+ {
+ path f (config.leaf () / p.base ()); // Relative to build/.
+
+ if (find_if (bs.begin (), bs.end (),
+ [&f] (const auto& v) {return v.path == f;}) ==
+ bs.end () &&
+ find (bps.begin (), bps.end (), f) == bps.end ())
+ {
+ r.emplace_back (config / p, move (f));
+ }
+ }
+ }
+ }
+
+ sort (r.begin (), r.end (),
+ [] (const auto& x, const auto& y) {return x.second < y.second;});
+
+ return r;
+ }
+
+ string
+ package_buildfiles_checksum (const optional<string>& bb,
+ const optional<string>& rb,
+ const vector<buildfile>& bs,
+ const dir_path& d,
+ const vector<path>& bps,
+ optional<bool> an)
+ {
+ if (d.empty ())
+ {
+ assert (bb);
+
+ sha256 cs (*bb);
+
+ if (rb)
+ cs.append (*rb);
+
+ for (const buildfile& b: bs)
+ cs.append (b.content);
+
+ return cs.string ();
+ }
+
+ auto checksum = [&bb, &rb, &bs, &bps] (const path& b,
+ const path& r,
+ const dir_path& c,
+ const string& e)
+ {
+ sha256 cs;
+
+ auto append_file = [&cs] (const path& f)
+ {
+ try
+ {
+ // Open the buildfile in the text mode and hash the NULL character
+ // at the end to calculate the checksum over files consistently with
+ // calculating it over the *-build manifest values.
+ //
+ ifdstream ifs (f);
+ cs.append (ifs);
+ cs.append ('\0');
+ }
+ catch (const io_error& e)
+ {
+ fail << "unable to read from " << f << ": " << e;
+ }
+ };
+
+ if (bb)
+ cs.append (*bb);
+ else
+ append_file (b);
+
+ bool root (true);
+
+ if (rb)
+ cs.append (*rb);
+ else if (exists (r))
+ append_file (r);
+ else
+ root = false;
+
+ for (const buildfile& b: bs)
+ cs.append (b.content);
+
+ if (!bps.empty ())
+ {
+ dir_path bd (b.directory ());
+
+ for (const path& p: bps)
+ {
+ path f (bd / p);
+ f += '.' + e;
+
+ append_file (f);
+ }
+ }
+
+ if (root && exists (c))
+ try
+ {
+ for (auto& f: find_buildfiles (c, e, bs, bps))
+ append_file (f.first);
+ }
+ catch (const system_error& e)
+ {
+ fail << "unable to scan directory " << c << ": " << e;
+ }
+
+ return string (cs.string ());
+ };
+
+ // Verify that the deduced naming scheme matches the specified one and
+ // fail if that's not the case.
+ //
+ auto verify = [an, &d] (bool alt_naming)
+ {
+ assert (an);
+
+ if (*an != alt_naming)
+ fail << "buildfile naming scheme mismatch between manifest and "
+ << "package directory " << d;
+ };
+
+ // Check the alternative bootstrap file first since it is more specific.
+ //
+ path bf;
+ if (exists (bf = d / alt_bootstrap_file))
+ {
+ if (an)
+ verify (true /* alt_naming */);
+
+ return checksum (bf,
+ d / alt_root_file,
+ d / alt_config_dir,
+ alt_build_ext);
+ }
+ else if (exists (bf = d / std_bootstrap_file))
+ {
+ if (an)
+ verify (false /* alt_naming */);
+
+ return checksum (bf,
+ d / std_root_file,
+ d / std_config_dir,
+ std_build_ext);
+ }
+ else
+ fail << "unable to find bootstrap.build file in package directory "
+ << d << endf;
+ }
+
+ void
+ load_package_buildfiles (package_manifest& m, const dir_path& d, bool erp)
+ {
+ assert (m.buildfile_paths.empty ()); // build-file values must be expanded.
+
+ auto load_buildfiles = [&m, &d, erp] (const path& b,
+ const path& r,
+ const dir_path& c,
+ const string& ext)
+ {
+ auto diag_path = [&d, erp] (const path& p)
+ {
+ return !erp ? p : p.leaf (d);
+ };
+
+ auto load = [&diag_path] (const path& f)
+ {
+ try
+ {
+ ifdstream ifs (f);
+ string r (ifs.read_text ());
+ ifs.close ();
+ return r;
+ }
+ catch (const io_error& e)
+ {
+ // Sanitize the exception description.
+ //
+ ostringstream os;
+ os << "unable to read from " << diag_path (f) << ": " << e;
+ throw runtime_error (os.str ());
+ }
+ };
+
+ if (!m.bootstrap_build)
+ m.bootstrap_build = load (b);
+
+ if (!m.root_build && exists (r))
+ m.root_build = load (r);
+
+ if (m.root_build && exists (c))
+ try
+ {
+ for (auto& f: find_buildfiles (c,
+ ext,
+ m.buildfiles,
+ m.buildfile_paths))
+ {
+ m.buildfiles.emplace_back (move (f.second), load (f.first));
+ }
+ }
+ catch (const system_error& e)
+ {
+ // Sanitize the exception description.
+ //
+ ostringstream os;
+ os << "unable to scan directory " << diag_path (c) << ": " << e;
+ throw runtime_error (os.str ());
+ }
+ };
+
+ // Set the manifest's alt_naming flag to the deduced value if absent and
+ // verify that it matches otherwise.
+ //
+ auto alt_naming = [&m, &d, erp] (bool v)
+ {
+ if (!m.alt_naming)
+ {
+ m.alt_naming = v;
+ }
+ else if (*m.alt_naming != v)
+ {
+ string e ("buildfile naming scheme mismatch between manifest and "
+ "package directory");
+
+ if (!erp)
+ e += ' ' + d.string ();
+
+ throw runtime_error (e);
+ }
+ };
+
+ // Check the alternative bootstrap file first since it is more specific.
+ //
+ path bf;
+ if (exists (bf = d / alt_bootstrap_file))
+ {
+ alt_naming (true);
+
+ load_buildfiles (bf,
+ d / alt_root_file,
+ d / alt_config_dir,
+ alt_build_ext);
+ }
+ else if (exists (bf = d / std_bootstrap_file))
+ {
+ alt_naming (false);
+
+ load_buildfiles (bf,
+ d / std_root_file,
+ d / std_config_dir,
+ std_build_ext);
+ }
+ else
+ {
+ string e ("unable to find bootstrap.build file in package directory");
+
+ if (!erp)
+ e += ' ' + d.string ();
+
+ throw runtime_error (e);
}
}
}
diff --git a/bpkg/manifest-utility.hxx b/bpkg/manifest-utility.hxx
index 29d548d..a5ea962 100644
--- a/bpkg/manifest-utility.hxx
+++ b/bpkg/manifest-utility.hxx
@@ -7,6 +7,8 @@
#include <libbpkg/manifest.hxx>
#include <libbpkg/package-name.hxx>
+#include <libbutl/b.hxx> // b_info_flags
+
#include <bpkg/types.hxx>
#include <bpkg/utility.hxx>
@@ -17,6 +19,22 @@ namespace bpkg
extern const path signature_file; // signature.manifest
extern const path manifest_file; // manifest
+ using butl::b_info_flags;
+
+ // Obtain build2 projects info for package source or output directories.
+ //
+ vector<package_info>
+ package_b_info (const common_options&, const dir_paths&, b_info_flags);
+
+ // As above but return the info for a single package directory.
+ //
+ inline package_info
+ package_b_info (const common_options& o, const dir_path& d, b_info_flags fl)
+ {
+ vector<package_info> r (package_b_info (o, dir_paths ({d}), fl));
+ return move (r[0]);
+ }
+
// Package naming schemes.
//
enum class package_scheme
@@ -48,16 +66,14 @@ namespace bpkg
version
parse_package_version (const char*,
bool allow_wildcard = false,
- bool fold_zero_revision = true);
+ version::flags fl = version::fold_zero_revision);
inline version
parse_package_version (const string& s,
bool allow_wildcard = false,
- bool fold_zero_revision = true)
+ version::flags fl = version::fold_zero_revision)
{
- return parse_package_version (s.c_str (),
- allow_wildcard,
- fold_zero_revision);
+ return parse_package_version (s.c_str (), allow_wildcard, fl);
}
// Extract the package constraint from either <name>[/<version>] or
@@ -66,10 +82,11 @@ namespace bpkg
// the package name is specified.
//
optional<version_constraint>
- parse_package_version_constraint (const char*,
- bool allow_wildcard = false,
- bool fold_zero_revision = true,
- bool version_only = false);
+ parse_package_version_constraint (
+ const char*,
+ bool allow_wildcard = false,
+ version::flags = version::fold_zero_revision,
+ bool version_only = false);
// If the passed location is a relative local path, then assume this is a
// relative path to the repository directory and complete it based on the
@@ -95,20 +112,82 @@ namespace bpkg
bool
repository_name (const string&);
- // Return the version of a package as provided by the build2 version module.
- // Return nullopt if the version module is disabled for the package (or the
- // build2 project directory doesn't contain the manifest file). Fail if the
- // directory is not a build2 project.
+ // Return the versions of packages as provided by the build2 version module
+ // together with the build2 project info the versions originate from (in
+ // case the caller may want to reuse it). Return nullopt as a package
+ // version if the version module is disabled for the package (or the build2
+ // project directory doesn't contain the manifest file). Fail if any of the
+ // specified directories is not a build2 project.
//
- // Note that if the package directory is under the version control, then the
+ // Note that if a package directory is under the version control, then the
// resulting version may be populated with the snapshot information (see
- // libbutl/standard-version.mxx for more details). Thus, this function can
- // be used for fixing up the package manifest version.
+ // libbutl/standard-version.hxx for more details). Thus, this function can
+ // be used for fixing up the package manifest versions.
//
class common_options;
- optional<version>
- package_version (const common_options&, const dir_path&);
+ struct package_version_info
+ {
+ optional<bpkg::version> version;
+ package_info info;
+ };
+
+ using package_version_infos = vector<package_version_info>;
+
+ package_version_infos
+ package_versions (const common_options&, const dir_paths&, b_info_flags);
+
+ // As above but return the version of a single package.
+ //
+ inline package_version_info
+ package_version (const common_options& o, const dir_path& d, b_info_flags fl)
+ {
+ package_version_infos r (package_versions (o, dir_paths ({d}), fl));
+ return move (r[0]);
+ }
+
+ // Caclulate the checksum of the manifest file located in the package source
+ // directory and the subproject set (see package::manifest_checksum).
+ //
+ // Pass the build2 project info for the package, if available, to speed up
+ // the call and NULL otherwise (in which case it will be queried by the
+ // implementation). In the former case it is assumed that the package info
+ // has been retrieved with the b_info_flags::subprojects flag.
+ //
+ string
+ package_checksum (const common_options&,
+ const dir_path& src_dir,
+ const package_info*);
+
+ // Calculate the checksum of the buildfiles using the *-build manifest
+ // values and, if the package source directory is specified (not empty),
+ // build-file values. If the package source directory is specified, then
+ // also use the files it contains for unspecified values. If additionally
+ // the alt_naming flag is specified, then verify the package's buildfile
+ // naming scheme against its value and fail on mismatch.
+ //
+ string
+ package_buildfiles_checksum (const optional<string>& bootstrap_build,
+ const optional<string>& root_build,
+ const vector<buildfile>& buildfiles,
+ const dir_path& src_dir = {},
+ const vector<path>& buildfile_paths = {},
+ optional<bool> alt_naming = nullopt);
+
+ // Load the package's buildfiles for unspecified manifest values. Throw
+ // std::runtime_error for underlying errors (unable to find bootstrap.build,
+ // unable to read from file, etc). Optionally convert paths used in the
+ // potential error description to be relative to the package source
+ // directory.
+ //
+ // Note that before calling this function you need to expand the build-file
+ // manifest values into the respective *-build values, for example, by
+ // calling manifest::load_files().
+ //
+ void
+ load_package_buildfiles (package_manifest&,
+ const dir_path& src_dir,
+ bool err_path_relative = false);
}
#endif // BPKG_MANIFEST_UTILITY_HXX
diff --git a/bpkg/odb.sh b/bpkg/odb.sh
index 5cd8e02..75c6d2d 100755
--- a/bpkg/odb.sh
+++ b/bpkg/odb.sh
@@ -43,11 +43,14 @@ fi
$odb "${inc[@]}" \
-DLIBODB_BUILD2 -DLIBODB_SQLITE_BUILD2 --generate-schema \
-d sqlite --std c++14 --generate-query \
- --odb-epilogue '#include <libbutl/small-vector-odb.hxx>' \
+ --odb-epilogue '#include <libbutl/small-vector-odb.hxx>' \
+ --odb-epilogue '#include <bpkg/pointer-traits.hxx>' \
--odb-epilogue '#include <bpkg/wrapper-traits.hxx>' \
- --hxx-prologue '#include <libbutl/small-vector-odb.hxx>' \
+ --hxx-prologue '#include <libbutl/small-vector-odb.hxx>' \
+ --hxx-prologue '#include <bpkg/pointer-traits.hxx>' \
--hxx-prologue '#include <bpkg/wrapper-traits.hxx>' \
--hxx-prologue '#include <bpkg/value-traits.hxx>' \
- --include-with-brackets --include-prefix bpkg --guard-prefix BPKG \
- --schema main --schema-version-table main.schema_version \
+ --generate-prepared --include-with-brackets --include-prefix bpkg \
+ --guard-prefix BPKG --schema main \
+ --schema-version-table main.schema_version \
--sqlite-override-null package.hxx
diff --git a/bpkg/options-types.hxx b/bpkg/options-types.hxx
index 876f0a5..30d52a0 100644
--- a/bpkg/options-types.hxx
+++ b/bpkg/options-types.hxx
@@ -8,8 +8,14 @@
#include <cassert>
#include <utility> // move()
+#include <libbutl/prefix-map.hxx>
+
+#include <bpkg/types.hxx>
+
namespace bpkg
{
+ using uuid_type = uuid;
+
enum class auth
{
none,
@@ -17,6 +23,23 @@ namespace bpkg
all
};
+ enum class stdout_format
+ {
+ lines,
+ json
+ };
+
+ enum class git_protocol_capabilities
+ {
+ dumb, // No shallow clone support.
+ smart, // Support for shallow clone, but not for unadvertised refs fetch.
+ unadv // Support for shallow clone and for unadvertised refs fetch.
+ };
+
+ using git_capabilities_map = butl::prefix_map<string,
+ git_protocol_capabilities,
+ '/'>;
+
// Qualified options.
//
// An option that uses this type can have its values qualified using the
@@ -69,7 +92,7 @@ namespace bpkg
}
};
- extern const char* openssl_commands[3]; // Clang bug requres explicit size.
+ extern const char* openssl_commands[5]; // Clang bug requres explicit size.
}
#endif // BPKG_OPTIONS_TYPES_HXX
diff --git a/bpkg/package-configuration.cxx b/bpkg/package-configuration.cxx
new file mode 100644
index 0000000..483e3e0
--- /dev/null
+++ b/bpkg/package-configuration.cxx
@@ -0,0 +1,516 @@
+// file : bpkg/package-configuration.cxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#include <bpkg/package-configuration.hxx>
+
+#include <sstream>
+
+#include <bpkg/package-skeleton.hxx>
+
+namespace bpkg
+{
+ using build2::config::variable_origin;
+
+ string
+ serialize_cmdline (const string& name, const optional<build2::names>& value)
+ {
+ using namespace build2;
+
+ string r (name + '=');
+
+ if (!value)
+ r += "[null]";
+ else
+ {
+ if (!value->empty ())
+ {
+ // Note: we need to use command-line (effective) quoting.
+ //
+ ostringstream os;
+ to_stream (os, *value, quote_mode::effective, '@');
+ r += os.str ();
+ }
+ }
+
+ return r;
+ }
+
+ void package_configuration::
+ print (diag_record& dr,
+ const char* indent,
+ const dependent_config_variable_values* ovrs) const
+ {
+ bool first (true);
+ for (const config_variable_value& v: *this)
+ {
+ if (v.origin != variable_origin::buildfile &&
+ v.origin != variable_origin::override_)
+ continue;
+
+ if (first)
+ first = false;
+ else
+ dr << '\n';
+
+ dr << indent;
+
+ if (ovrs != nullptr && v.origin == variable_origin::buildfile)
+ {
+ if (const dependent_config_variable_value* ov = ovrs->find (v.name))
+ {
+ dr << ov->serialize_cmdline () << " (set by " << ov->dependent << ')';
+ continue;
+ }
+ }
+
+ dr << v.serialize_cmdline () << " (";
+
+ if (v.origin == variable_origin::buildfile)
+ dr << "set by " << *v.dependent;
+ else
+ dr << "user configuration";
+
+ dr << ')';
+ }
+ }
+
+ void
+ to_checksum (sha256& cs, const config_variable_value& v)
+ {
+ using namespace build2;
+
+ cs.append (v.name);
+ cs.append (static_cast<uint8_t> (v.origin));
+ if (v.type)
+ cs.append (*v.type);
+
+ if (v.origin != variable_origin::undefined)
+ {
+ if (v.value)
+ for (const name& n: *v.value)
+ to_checksum (cs, n);
+
+ if (v.origin == variable_origin::buildfile)
+ {
+ cs.append (v.dependent->string ());
+ cs.append (v.confirmed);
+ }
+ }
+ }
+
+ optional<bool>
+ negotiate_configuration (
+ package_configurations& cfgs,
+ package_skeleton& dept,
+ pair<size_t, size_t> pos,
+ const small_vector<reference_wrapper<package_skeleton>, 1>& depcs,
+ bool has_alt)
+ {
+ assert (!dept.system);
+
+ pos.first--; pos.second--; // Convert to 0-base.
+
+ const dependency_alternative& da (
+ dept.available->dependencies[pos.first][pos.second]);
+
+ assert (da.require || da.prefer);
+
+ // Step 1: save a snapshot of the old configuration while unsetting values
+ // that have this dependent as the originator and reloading the defaults.
+ //
+ // The idea behind unsetting values previously (first) set by this
+ // dependent is to allow it to "change its mind" based on other changes in
+ // the configuration (e.g., some expensive feature got enabled by another
+ // dependent which this dependent might as well use).
+ //
+ // This works well if the default values of configuration variables are
+ // independent. However, consider this example:
+ //
+ // dependency:
+ //
+ // config [bool] config.foo.x ?= false
+ // config [bool] config.foo.buf ?= ($config.foo.x ? 8196 : 4096)
+ //
+ // dependent:
+ //
+ // config.foo.x = true
+ // config.foo.buf = ($config.foo.buf < 6144 ? 6144 : $config.foo.buf)
+ //
+ // Here if we unset both x and buf to their defaults, we will get an
+ // incorrect result.
+ //
+ // The long-term solution here is to track dependencies between
+ // configuration variables (which we can do as part of the config
+ // directive via our parser::lookup_variable() hook and save this
+ // information in the config module's saved_variables list). Then, we
+ // "levelize" all the variables and have an inner "refinement" loop over
+ // these levels. Specifically, we first unset all of them. Then we unset
+ // all except the first level (which contains configuration variables that
+ // don't depend on any others). And so on.
+ //
+ // And until we implement this, we expect the dependent to take such
+ // configuration variable dependencies into account. For example:
+ //
+ // config.foo.x = true
+ // config.foo.buf = ($config.foo.buf < 6144
+ // ? ($config.foo.x ? 8196 : 6144)
+ // : $config.foo.buf)
+ //
+ // Another issue with this "originating dependent" logic is that it will
+ // be tricky to scale to containers where we would need to track
+ // originating dependents for individual elements of a value rather than
+ // the whole value as we do now. As an example, consider some "set of
+ // enabled backends" configuration variable. Technically, this doesn't
+ // seem insurmountable if we make some assumptions (e.g., if a value
+ // contains multiple elements, then it is such a set-like value; or
+ // we could use actual type names).
+ //
+ // For now we recommend to use multiple bool configurations to handle such
+ // cases (and, in fact, we currently don't have any set/map-like types,
+ // which we may want to add in the future). However, one could come up
+ // with some open-ended configuration list that will be difficult to
+ // support with bool. For example, we may need to support a set of buffer
+ // sizes or some such.
+ //
+ // Our assumptions regarding require:
+ //
+ // - Can only set bool configuration variables and only to true.
+ //
+ // - Should not have any conditions on the state of other configuration
+ // variables, including their origin (but can have other conditions,
+ // for example on the target platform).
+ //
+ // This means that we don't need to set the default values, but will need
+ // the type information as well as overrides. So what we will do is only
+ // call reload_defaults() for the first time to load types/override. Note
+ // that this assumes the set of configuration variables cannot change
+ // based on the values of other configuration variables (we have a note
+ // in the manual instructing the user not to do this).
+ //
+ // The dependency could also be system in which case there could be no
+ // skeleton information to load the types/defaults from. In this case we
+ // can handle require in the "lax mode" (based on the above assumptions)
+ // but not prefer.
+ //
+ // While at it, also collect the configurations to pass to dependent's
+ // evaluate_*() calls.
+ //
+ dependent_config_variable_values old_cfgs;
+ package_skeleton::dependency_configurations depc_cfgs;
+ depc_cfgs.reserve (depcs.size ());
+
+ for (package_skeleton& depc: depcs)
+ {
+ package_configuration& cfg (cfgs[depc.package]);
+
+ for (config_variable_value& v: cfg)
+ {
+ if (v.origin == variable_origin::buildfile)
+ {
+ if (*v.dependent == dept.package)
+ {
+ old_cfgs.push_back (
+ dependent_config_variable_value {
+ v.name, move (v.value), move (*v.dependent), v.has_alternative});
+
+ // Note that we will not reload it to default in case of require.
+ //
+ v.undefine ();
+ }
+ else
+ old_cfgs.push_back (
+ dependent_config_variable_value {
+ v.name, v.value, *v.dependent, v.has_alternative});
+ }
+ }
+
+ if (depc.available == nullptr)
+ {
+ assert (depc.system);
+
+ if (da.prefer)
+ fail << "unable to negotiate configuration for system dependency "
+ << depc.package << " without configuration information" <<
+ info << "consider specifying system dependency version that has "
+ << "corresponding available package" <<
+ info << "dependent " << dept.package << " has prefer/accept clauses "
+ << "that cannot be evaluated without configuration information";
+
+ if (!cfg.system)
+ {
+ // Note that we still need the overrides.
+ //
+ depc.load_overrides (cfg);
+ cfg.system = true;
+ }
+
+ continue;
+ }
+ else
+ assert (!cfg.system);
+
+ if (da.prefer || cfg.empty ())
+ depc.reload_defaults (cfg);
+ }
+
+ // Note that we need to collect the dependency configurations as a
+ // separate loop so that the stored references are not invalidated by
+ // operator[] (which is really a push_back() into a vector).
+ //
+ for (package_skeleton& depc: depcs)
+ depc_cfgs.push_back (cfgs[depc.package]);
+
+ // Step 2: execute the prefer/accept or requires clauses.
+ //
+ if (!(da.require
+ ? dept.evaluate_require (depc_cfgs, *da.require, pos, has_alt)
+ : dept.evaluate_prefer_accept (depc_cfgs,
+ *da.prefer, *da.accept, pos,
+ has_alt)))
+ {
+ if (has_alt)
+ return nullopt;
+
+ diag_record dr (fail);
+
+ dr << "unable to negotiate acceptable configuration with dependent "
+ << dept.package << " for dependencies ";
+
+ for (size_t i (0); i != depcs.size (); ++i)
+ dr << (i == 0 ? "" : ", ") << depcs[i].get ().package;
+
+ dr << info << "configuration before negotiation:\n";
+
+ // Note that we won't print this dependent's values (which we have unset
+ // above), but that seems like a good thing since they are not the cause
+ // of this impasse.
+ //
+ for (const package_configuration& cfg: depc_cfgs)
+ cfg.print (dr, " "); // Note 4 spaces since in nested info.
+ }
+
+ // Check if anything changed by comparing to entries in old_cfgs.
+ //
+ // While at it, also detect if we have any changes where one dependent
+ // overrides a value set by another dependent (see below).
+ //
+ bool cycle (false);
+ {
+ optional<size_t> n (0); // Number of unchanged.
+
+ for (package_skeleton& depc: depcs)
+ {
+ package_configuration& cfg (cfgs[depc.package]);
+
+ for (config_variable_value& v: cfg)
+ {
+ if (v.origin == variable_origin::buildfile)
+ {
+ if (const dependent_config_variable_value* ov =
+ old_cfgs.find (v.name))
+ {
+ if (ov->value == v.value)
+ {
+ // If the value hasn't change, so shouldn't the originating
+ // dependent.
+ //
+ assert (ov->dependent == *v.dependent);
+
+ if (n)
+ ++*n;
+
+ continue;
+ }
+ else
+ {
+ // Note that it's possible the same dependent overrides its
+ // old value (e.g., because a conditional default changed to a
+ // better value).
+ //
+ if (ov->dependent != *v.dependent)
+ cycle = true;
+ }
+ }
+
+ n = nullopt;
+
+ if (cycle)
+ break;
+ }
+ }
+
+ if (!n && cycle)
+ break;
+ }
+
+ // If we haven't seen any changed and we've seen the same number, then
+ // nothing has changed.
+ //
+ if (n && *n == old_cfgs.size ())
+ return false;
+ }
+
+ // Besides the dependent returning false from its accept clause, there is
+ // another manifestation of the inability to negotiate an acceptable
+ // configuration: two dependents keep changing the same configuration to
+ // mutually unacceptable values. To detect this, we need to look for
+ // negotiation cycles.
+ //
+ // Specifically, given a linear change history in the form:
+ //
+ // O->N ... O->N ... O->N
+ //
+ // We need to look for a possibility of turning it into a cycle:
+ //
+ // O->N ... O->N
+ // \ ... /
+ //
+ // Where O->N is a change that involves one dependent overriding a value
+ // set by another dependent and `...` are identical history segments.
+ //
+ if (!cycle)
+ return true;
+
+ // Populate new_cfgs.
+ //
+ dependent_config_variable_values new_cfgs;
+ for (package_skeleton& depc: depcs)
+ {
+ package_configuration& cfg (cfgs[depc.package]);
+
+ for (config_variable_value& v: cfg)
+ {
+ if (v.origin == variable_origin::buildfile)
+ {
+ new_cfgs.push_back (
+ dependent_config_variable_value {
+ v.name, v.value, *v.dependent, v.has_alternative});
+ }
+ }
+ }
+
+ // Sort both.
+ //
+ {
+ auto cmp = [] (const dependent_config_variable_value& x,
+ const dependent_config_variable_value& y)
+ {
+ return x.name < y.name;
+ };
+
+ sort (old_cfgs.begin (), old_cfgs.end (), cmp);
+ sort (new_cfgs.begin (), new_cfgs.end (), cmp);
+ }
+
+ // Look backwards for identical O->N changes and see if we can come
+ // up with two identical segments between them.
+ //
+ cycle = false;
+
+ auto& change_history (cfgs.change_history_);
+
+ for (size_t n (change_history.size ()), i (n); i != 0; i -= 2)
+ {
+ if (change_history[i - 2] == old_cfgs &&
+ change_history[i - 1] == new_cfgs)
+ {
+ size_t d (n - i); // Segment length.
+
+ // See if there is an identical segment before this that also starts
+ // with O->N.
+ //
+ if (i < 2 + d + 2)
+ break; // Not long enough to possibly find anything.
+
+ size_t j (i - 2 - d); // Earlier O->N.
+
+ if (change_history[j - 2] == old_cfgs &&
+ change_history[j - 1] == new_cfgs)
+ {
+ if (equal (change_history.begin () + j,
+ change_history.begin () + j + d,
+ change_history.begin () + i))
+ {
+ cycle = true;
+ break;
+ }
+ }
+
+ // Otherwise, keep looking for a potentially longer segment.
+ }
+ }
+
+ if (!cycle)
+ {
+ change_history.push_back (move (old_cfgs));
+ change_history.push_back (move (new_cfgs));
+
+ return true;
+ }
+
+ if (has_alt)
+ return nullopt;
+
+ // Analyze the O->N changes and determine the problematic dependent(s).
+ // Do we actually know for sure they are all problematic? Well, they
+ // repeatedly changed the values to the ones we don't like, so I guess so.
+ //
+ // If it's the other dependent that has an alternative, then we let the
+ // negotiation continue for one more half-cycle at which point it will be
+ // while negotiating the configuration of the other dependent that we will
+ // (again) detect this cycle.
+ //
+ small_vector<reference_wrapper<const package_key>, 1> depts;
+ for (const dependent_config_variable_value& nv: new_cfgs)
+ {
+ if (nv.dependent == dept.package)
+ {
+ if (const dependent_config_variable_value* ov = old_cfgs.find (nv.name))
+ {
+ if (ov->value != nv.value && ov->dependent != nv.dependent)
+ {
+ if (find_if (depts.begin (), depts.end (),
+ [ov] (reference_wrapper<const package_key> pk)
+ {
+ return ov->dependent == pk.get ();
+ }) == depts.end ())
+ {
+ if (ov->has_alternative)
+ {
+ change_history.push_back (move (old_cfgs));
+ change_history.push_back (move (new_cfgs));
+
+ return true;
+ }
+
+ depts.push_back (ov->dependent);
+ }
+ }
+ }
+ }
+ }
+
+ diag_record dr (fail);
+
+ dr << "unable to negotiate acceptable configuration between dependents "
+ << dept.package;
+
+ for (const package_key& d: depts)
+ dr << ", " << d;
+
+ dr << " for dependencies ";
+
+ for (size_t i (0); i != depcs.size (); ++i)
+ dr << (i == 0 ? "" : ", ") << depcs[i].get ().package;
+
+ dr << info << "configuration before negotiation:\n";
+ for (const package_configuration& cfg: depc_cfgs)
+ cfg.print (dr, " ", &old_cfgs);
+
+ dr << info << "configuration after negotiation:\n";
+ for (const package_configuration& cfg: depc_cfgs)
+ cfg.print (dr, " ");
+
+ dr << endf;
+ }
+}
diff --git a/bpkg/package-configuration.hxx b/bpkg/package-configuration.hxx
new file mode 100644
index 0000000..30cbe0a
--- /dev/null
+++ b/bpkg/package-configuration.hxx
@@ -0,0 +1,223 @@
+// file : bpkg/package-configuration.hxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#ifndef BPKG_PACKAGE_CONFIGURATION_HXX
+#define BPKG_PACKAGE_CONFIGURATION_HXX
+
+#include <libbuild2/types.hxx> // build2::names
+#include <libbuild2/config/types.hxx> // build2::config::variable_origin
+
+#include <bpkg/types.hxx>
+#include <bpkg/utility.hxx>
+
+#include <bpkg/package.hxx>
+
+using namespace std;
+
+namespace bpkg
+{
+ class package_skeleton;
+
+ // Serialize the variable value as a command line override.
+ //
+ string
+ serialize_cmdline (const string& name, const optional<build2::names>& value);
+
+ struct config_variable_value
+ {
+ string name;
+
+ // The variable_origin values have the following meaning:
+ //
+ // default -- default value from the config directive
+ // buildfile -- dependent configuration (config_source::dependent)
+ // override -- user configuration (config_source::user)
+ // undefined -- none of the above
+ //
+ build2::config::variable_origin origin;
+
+ // Variable type name with absent signifying untyped.
+ //
+ optional<string> type;
+
+ // If origin is not undefined, then this is the reversed variable value
+ // with absent signifying NULL.
+ //
+ optional<build2::names> value;
+
+ // If origin is buildfile, then this is the "originating dependent" which
+ // first set this variable to this value.
+ //
+ optional<package_key> dependent;
+
+ // If origin is buildfile, then this flag indicates whether the
+ // originating dependent has been encountered during the negotiation
+ // retry.
+ //
+ bool confirmed;
+
+ // If origin is buildfile and the originating dependent has been
+ // encountered during the negotiation, then this flag indicates whether
+ // this dependent has another dependency alternative.
+ //
+ // @@ Strictly speaking this is a property of the dependent and
+ // duplicating it here for each variable is quite dirty (and requires
+ // us to drag this through skeleton calls). Doing this properly,
+ // however, will likely require another map with the dependent as a
+ // key. Maybe one day.
+ //
+ bool has_alternative;
+
+ public:
+ void
+ undefine ()
+ {
+ origin = build2::config::variable_origin::undefined;
+ value = nullopt;
+ dependent = nullopt;
+ confirmed = false;
+ has_alternative = false;
+ }
+
+ string
+ serialize_cmdline () const
+ {
+ return bpkg::serialize_cmdline (name, value);
+ }
+ };
+
+ void
+ to_checksum (sha256&, const config_variable_value&);
+
+ // A subset of config_variable_value for variable values set by the
+ // dependents (origin is buildfile). Used to track change history.
+ //
+ struct dependent_config_variable_value
+ {
+ string name;
+ optional<build2::names> value;
+ package_key dependent;
+ bool has_alternative;
+
+ public:
+ string
+ serialize_cmdline () const
+ {
+ return bpkg::serialize_cmdline (name, value);
+ }
+ };
+
+ inline bool
+ operator== (const dependent_config_variable_value& x,
+ const dependent_config_variable_value& y)
+ {
+ return x.name == y.name && x.value == y.value && x.dependent == y.dependent;
+ }
+
+ class dependent_config_variable_values:
+ public small_vector<dependent_config_variable_value, 1>
+ {
+ public:
+ const dependent_config_variable_value*
+ find (const string& name) const
+ {
+ auto i (find_if (begin (), end (),
+ [&name] (const dependent_config_variable_value& v)
+ {
+ return v.name == name;
+ }));
+ return i != end () ? &*i : nullptr;
+ }
+ };
+
+ class package_configuration: public vector<config_variable_value>
+ {
+ public:
+ package_key package;
+ bool system = false; // True if system package without skeleton info.
+
+ explicit
+ package_configuration (package_key p): package (move (p)) {}
+
+ config_variable_value*
+ find (const string& name)
+ {
+ auto i (find_if (begin (), end (),
+ [&name] (const config_variable_value& v)
+ {
+ return v.name == name;
+ }));
+ return i != end () ? &*i : nullptr;
+ }
+
+ const config_variable_value*
+ find (const string& name) const
+ {
+ auto i (find_if (begin (), end (),
+ [&name] (const config_variable_value& v)
+ {
+ return v.name == name;
+ }));
+ return i != end () ? &*i : nullptr;
+ }
+
+ // Print buildfile and override configuration variable values as command
+ // line overrides one per line with the specified indentation. After each
+ // variable also print in parenthesis its origin. If overrides is not
+ // NULL, then it is used to override the value/dependent information.
+ //
+ void
+ print (diag_record&, const char* indent,
+ const dependent_config_variable_values* overrides = nullptr) const;
+ };
+
+ class package_configurations: public small_vector<package_configuration, 1>
+ {
+ public:
+ // Note: may invalidate references.
+ //
+ package_configuration&
+ operator[] (const package_key& p)
+ {
+ auto i (find_if (begin (), end (),
+ [&p] (const package_configuration& pc)
+ {
+ return pc.package == p;
+ }));
+ if (i != end ())
+ return *i;
+
+ push_back (package_configuration (p));
+ return back ();
+ }
+
+ void
+ clear ()
+ {
+ small_vector<package_configuration, 1>::clear ();
+ change_history_.clear ();
+ }
+
+ // Implementation details.
+ //
+ public:
+ // Note: dependent_config_variable_values must be sorted by name.
+ //
+ small_vector<dependent_config_variable_values, 2> change_history_;
+ };
+
+ // Negotiate the configuration for the specified dependencies of the
+ // specified dependent. Return true if the configuration has changed.
+ // Return absent if has_alternative is true and no acceptable configuration
+ // could be negotiated.
+ //
+ optional<bool>
+ negotiate_configuration (
+ package_configurations&,
+ package_skeleton& dependent,
+ pair<size_t, size_t> position,
+ const small_vector<reference_wrapper<package_skeleton>, 1>& dependencies,
+ bool has_alternative);
+}
+
+#endif // BPKG_PACKAGE_CONFIGURATION_HXX
diff --git a/bpkg/package-query.cxx b/bpkg/package-query.cxx
new file mode 100644
index 0000000..a90cdef
--- /dev/null
+++ b/bpkg/package-query.cxx
@@ -0,0 +1,726 @@
+// file : bpkg/package-query.cxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#include <bpkg/package-query.hxx>
+
+#include <bpkg/package.hxx>
+#include <bpkg/package-odb.hxx>
+#include <bpkg/database.hxx>
+#include <bpkg/rep-mask.hxx>
+#include <bpkg/satisfaction.hxx>
+
+using namespace std;
+
+namespace bpkg
+{
+ // Search in the imaginary system repository.
+ //
+ vector<shared_ptr<available_package>> imaginary_stubs;
+
+ shared_ptr<available_package>
+ find_imaginary_stub (const package_name& name)
+ {
+ auto i (find_if (imaginary_stubs.begin (), imaginary_stubs.end (),
+ [&name] (const shared_ptr<available_package>& p)
+ {
+ return p->id.name == name;
+ }));
+
+ return i != imaginary_stubs.end () ? *i : nullptr;
+ }
+
+ // Search in the existing packages registry.
+ //
+ vector<pair<reference_wrapper<database>,
+ shared_ptr<available_package>>> existing_packages;
+
+ pair<shared_ptr<available_package>, lazy_shared_ptr<repository_fragment>>
+ find_existing (database& db,
+ const package_name& name,
+ const optional<version_constraint>& c)
+ {
+ pair<shared_ptr<available_package>,
+ lazy_shared_ptr<repository_fragment>> r;
+
+ for (const auto& p: existing_packages)
+ {
+ if (p.first == db &&
+ p.second->id.name == name &&
+ (!c || satisfies (p.second->version, *c)))
+ {
+ r.first = p.second;
+ r.second = lazy_shared_ptr<repository_fragment> (db, empty_string);
+ break;
+ }
+ }
+
+ return r;
+ }
+
+ // Search in real repositories.
+ //
+ linked_databases repo_configs;
+
+ linked_databases
+ dependent_repo_configs (database& db)
+ {
+ linked_databases r;
+ for (database& ddb: db.dependent_configs ())
+ {
+ if (find (repo_configs.begin (), repo_configs.end (), ddb) !=
+ repo_configs.end ())
+ r.push_back (ddb);
+ }
+
+ return r;
+ }
+
+ odb::result<available_package>
+ query_available (database& db,
+ const package_name& name,
+ const optional<version_constraint>& c,
+ bool order,
+ bool revision)
+ {
+ using query = query<available_package>;
+
+ query q (query::id.name == name);
+ const auto& vm (query::id.version);
+
+ // If there is a constraint, then translate it to the query. Otherwise,
+ // get the latest version or stub versions if present.
+ //
+ if (c)
+ {
+ assert (c->complete ());
+
+ query qs (compare_version_eq (vm,
+ canonical_version (wildcard_version),
+ false /* revision */,
+ false /* iteration */));
+
+ if (c->min_version &&
+ c->max_version &&
+ *c->min_version == *c->max_version)
+ {
+ const version& v (*c->min_version);
+
+ q = q &&
+ (compare_version_eq (vm,
+ canonical_version (v),
+ revision || v.revision.has_value (),
+ revision /* iteration */) ||
+ qs);
+ }
+ else
+ {
+ query qr (true);
+
+ if (c->min_version)
+ {
+ const version& v (*c->min_version);
+ canonical_version cv (v);
+ bool rv (revision || v.revision);
+
+ if (c->min_open)
+ qr = compare_version_gt (vm, cv, rv, revision /* iteration */);
+ else
+ qr = compare_version_ge (vm, cv, rv, revision /* iteration */);
+ }
+
+ if (c->max_version)
+ {
+ const version& v (*c->max_version);
+ canonical_version cv (v);
+ bool rv (revision || v.revision);
+
+ if (c->max_open)
+ qr = qr && compare_version_lt (vm, cv, rv, revision);
+ else
+ qr = qr && compare_version_le (vm, cv, rv, revision);
+ }
+
+ q = q && (qr || qs);
+ }
+ }
+
+ if (order)
+ q += order_by_version_desc (vm);
+
+ return db.query<available_package> (q);
+ }
+
+ // Check if the package is available from the specified repository fragment,
+ // its prerequisite repositories, or one of their complements, recursively.
+ // Return the first repository fragment that contains the package or NULL if
+ // none are.
+ //
+ // Note that we can end up with a repository dependency cycle since the
+ // root repository can be the default complement for dir and git
+ // repositories (see rep_fetch() implementation for details). Thus we need
+ // to make sure that the repository fragment is not in the dependency chain
+ // yet.
+ //
+ using repository_fragments =
+ vector<reference_wrapper<const shared_ptr<repository_fragment>>>;
+
+ static shared_ptr<repository_fragment>
+ find (const shared_ptr<repository_fragment>& rf,
+ const shared_ptr<available_package>& ap,
+ repository_fragments& chain,
+ bool prereq)
+ {
+ // Prerequisites are not searched through recursively.
+ //
+ assert (!prereq || chain.empty ());
+
+ if (find_if (chain.begin (), chain.end (),
+ [&rf] (const shared_ptr<repository_fragment>& i) -> bool
+ {
+ return i == rf;
+ }) != chain.end ())
+ return nullptr;
+
+ chain.emplace_back (rf);
+
+ unique_ptr<repository_fragments, void (*)(repository_fragments*)> deleter (
+ &chain, [] (repository_fragments* rf) {rf->pop_back ();});
+
+ const auto& cs (rf->complements);
+ const auto& ps (rf->prerequisites);
+
+ for (const package_location& pl: ap->locations)
+ {
+ const lazy_shared_ptr<repository_fragment>& lrf (pl.repository_fragment);
+
+ if (rep_masked_fragment (lrf))
+ continue;
+
+ // First check the repository itself.
+ //
+ if (lrf.object_id () == rf->name)
+ return rf;
+
+ // Then check all the complements and prerequisites repository fragments
+ // without loading them. Though, we still need to load complement and
+ // prerequisite repositories.
+ //
+ auto pr = [&lrf] (const repository::fragment_type& i)
+ {
+ return i.fragment == lrf;
+ };
+
+ for (const lazy_weak_ptr<repository>& r: cs)
+ {
+ if (!rep_masked (r))
+ {
+ const auto& frs (r.load ()->fragments);
+
+ if (find_if (frs.begin (), frs.end (), pr) != frs.end ())
+ return lrf.load ();
+ }
+ }
+
+ if (prereq)
+ {
+ for (const lazy_weak_ptr<repository>& r: ps)
+ {
+ if (!rep_masked (r))
+ {
+ const auto& frs (r.load ()->fragments);
+
+ if (find_if (frs.begin (), frs.end (), pr) != frs.end ())
+ return lrf.load ();
+ }
+ }
+ }
+ }
+
+ // Finally, load the complements and prerequisites and check them
+ // recursively.
+ //
+ for (const lazy_weak_ptr<repository>& cr: cs)
+ {
+ if (!rep_masked (cr))
+ {
+ for (const auto& fr: cr.load ()->fragments)
+ {
+ // Should we consider prerequisites of our complements as our
+ // prerequisites? I'd say not.
+ //
+ if (shared_ptr<repository_fragment> r =
+ find (fr.fragment.load (), ap, chain, false /* prereq */))
+ return r;
+ }
+ }
+ }
+
+ if (prereq)
+ {
+ for (const lazy_weak_ptr<repository>& pr: ps)
+ {
+ if (!rep_masked (pr))
+ {
+ for (const auto& fr: pr.load ()->fragments)
+ {
+ if (shared_ptr<repository_fragment> r =
+ find (fr.fragment.load (), ap, chain, false /* prereq */))
+ return r;
+ }
+ }
+ }
+ }
+
+ return nullptr;
+ }
+
+ shared_ptr<repository_fragment>
+ filter (const shared_ptr<repository_fragment>& r,
+ const shared_ptr<available_package>& ap,
+ bool prereq)
+ {
+ repository_fragments chain;
+ return find (r, ap, chain, prereq);
+ }
+
+ vector<shared_ptr<available_package>>
+ filter (const shared_ptr<repository_fragment>& r,
+ result<available_package>&& apr,
+ bool prereq)
+ {
+ vector<shared_ptr<available_package>> aps;
+
+ for (shared_ptr<available_package> ap: pointer_result (apr))
+ {
+ if (filter (r, ap, prereq) != nullptr)
+ aps.push_back (move (ap));
+ }
+
+ return aps;
+ }
+
+ pair<shared_ptr<available_package>, shared_ptr<repository_fragment>>
+ filter_one (const shared_ptr<repository_fragment>& r,
+ result<available_package>&& apr,
+ bool prereq)
+ {
+ using result = pair<shared_ptr<available_package>,
+ shared_ptr<repository_fragment>>;
+
+ for (shared_ptr<available_package> ap: pointer_result (apr))
+ {
+ if (shared_ptr<repository_fragment> pr = filter (r, ap, prereq))
+ return result (move (ap), move (pr));
+ }
+
+ return result ();
+ }
+
+ vector<pair<shared_ptr<available_package>, shared_ptr<repository_fragment>>>
+ filter (const vector<shared_ptr<repository_fragment>>& rps,
+ odb::result<available_package>&& apr,
+ bool prereq)
+ {
+ vector<pair<shared_ptr<available_package>,
+ shared_ptr<repository_fragment>>> aps;
+
+ for (shared_ptr<available_package> ap: pointer_result (apr))
+ {
+ for (const shared_ptr<repository_fragment>& r: rps)
+ {
+ if (shared_ptr<repository_fragment> rf = filter (r, ap, prereq))
+ {
+ aps.emplace_back (move (ap), move (rf));
+ break;
+ }
+ }
+ }
+
+ return aps;
+ }
+
+ pair<shared_ptr<available_package>, shared_ptr<repository_fragment>>
+ filter_one (const vector<shared_ptr<repository_fragment>>& rps,
+ odb::result<available_package>&& apr,
+ bool prereq)
+ {
+ using result = pair<shared_ptr<available_package>,
+ shared_ptr<repository_fragment>>;
+
+ for (shared_ptr<available_package> ap: pointer_result (apr))
+ {
+ for (const shared_ptr<repository_fragment>& r: rps)
+ {
+ if (shared_ptr<repository_fragment> rf = filter (r, ap, prereq))
+ return result (move (ap), move (rf));
+ }
+ }
+
+ return result ();
+ }
+
+ // Sort the available package fragments in the package version descending
+ // order and suppress duplicate packages and, optionally, older package
+ // revisions.
+ //
+ static void
+ sort_dedup (available_packages& pfs, bool suppress_older_revisions = false)
+ {
+ sort (pfs.begin (), pfs.end (),
+ [] (const auto& x, const auto& y)
+ {
+ return x.first->version > y.first->version;
+ });
+
+ pfs.erase (
+ unique (pfs.begin(), pfs.end(),
+ [suppress_older_revisions] (const auto& x, const auto& y)
+ {
+ return x.first->version.compare (y.first->version,
+ suppress_older_revisions) == 0;
+ }),
+ pfs.end ());
+ }
+
+ available_packages
+ find_available (const linked_databases& dbs,
+ const package_name& name,
+ const optional<version_constraint>& c)
+ {
+ available_packages r;
+
+ for (database& db: dbs)
+ {
+ for (shared_ptr<available_package> ap:
+ pointer_result (query_available (db, name, c)))
+ {
+ // All repository fragments the package comes from are equally good,
+ // so we pick the first unmasked one.
+ //
+ for (const auto& pl: ap->locations)
+ {
+ const lazy_shared_ptr<repository_fragment>& lrf (
+ pl.repository_fragment);
+
+ if (!rep_masked_fragment (lrf))
+ {
+ r.emplace_back (move (ap), lrf);
+ break;
+ }
+ }
+ }
+ }
+
+ // If there are multiple databases specified, then sort the result in the
+ // package version descending order and suppress duplicates.
+ //
+ if (dbs.size () > 1)
+ sort_dedup (r);
+
+ // Adding a stub from the imaginary system repository to the non-empty
+ // results isn't necessary but may end up with a duplicate. That's why we
+ // only add it if nothing else is found.
+ //
+ if (r.empty ())
+ {
+ if (shared_ptr<available_package> ap = find_imaginary_stub (name))
+ r.emplace_back (move (ap), nullptr);
+ }
+
+ return r;
+ }
+
+ available_packages
+ find_available (const package_name& name,
+ const optional<version_constraint>& c,
+ const config_repo_fragments& rfs,
+ bool prereq)
+ {
+ available_packages r;
+
+ for (const auto& dfs: rfs)
+ {
+ database& db (dfs.first);
+ for (auto& af: filter (dfs.second,
+ query_available (db, name, c),
+ prereq))
+ {
+ r.emplace_back (
+ move (af.first),
+ lazy_shared_ptr<repository_fragment> (db, move (af.second)));
+ }
+ }
+
+ if (rfs.size () > 1)
+ sort_dedup (r);
+
+ if (r.empty ())
+ {
+ if (shared_ptr<available_package> ap = find_imaginary_stub (name))
+ r.emplace_back (move (ap), nullptr);
+ }
+
+ return r;
+ }
+
+ vector<shared_ptr<available_package>>
+ find_available (const package_name& name,
+ const optional<version_constraint>& c,
+ const lazy_shared_ptr<repository_fragment>& rf,
+ bool prereq)
+ {
+ assert (!rep_masked_fragment (rf));
+
+ vector<shared_ptr<available_package>> r;
+
+ database& db (rf.database ());
+ for (auto& ap: filter (rf.load (), query_available (db, name, c), prereq))
+ r.emplace_back (move (ap));
+
+ if (r.empty ())
+ {
+ if (shared_ptr<available_package> ap = find_imaginary_stub (name))
+ r.emplace_back (move (ap));
+ }
+
+ return r;
+ }
+
+ pair<shared_ptr<available_package>,
+ lazy_shared_ptr<repository_fragment>>
+ find_available_one (const package_name& name,
+ const optional<version_constraint>& c,
+ const lazy_shared_ptr<repository_fragment>& rf,
+ bool prereq,
+ bool revision)
+ {
+ assert (!rep_masked_fragment (rf));
+
+ // Filter the result based on the repository fragment to which each
+ // version belongs.
+ //
+ database& db (rf.database ());
+ auto r (
+ filter_one (rf.load (),
+ query_available (db, name, c, true /* order */, revision),
+ prereq));
+
+ if (r.first == nullptr)
+ r.first = find_imaginary_stub (name);
+
+ return make_pair (r.first,
+ (r.second != nullptr
+ ? lazy_shared_ptr<repository_fragment> (db,
+ move (r.second))
+ : nullptr));
+ }
+
+ pair<shared_ptr<available_package>, shared_ptr<repository_fragment>>
+ find_available_one (database& db,
+ const package_name& name,
+ const optional<version_constraint>& c,
+ const vector<shared_ptr<repository_fragment>>& rfs,
+ bool prereq,
+ bool revision)
+ {
+ // Filter the result based on the repository fragments to which each
+ // version belongs.
+ //
+ auto r (
+ filter_one (rfs,
+ query_available (db, name, c, true /* order */, revision),
+ prereq));
+
+ if (r.first == nullptr)
+ r.first = find_imaginary_stub (name);
+
+ return r;
+ }
+
+ pair<shared_ptr<available_package>,
+ lazy_shared_ptr<repository_fragment>>
+ find_available_one (const linked_databases& dbs,
+ const package_name& name,
+ const optional<version_constraint>& c,
+ bool prereq,
+ bool revision)
+ {
+ for (database& db: dbs)
+ {
+ auto r (
+ filter_one (db.load<repository_fragment> (""),
+ query_available (db, name, c, true /* order */, revision),
+ prereq));
+
+ if (r.first != nullptr)
+ return make_pair (
+ move (r.first),
+ lazy_shared_ptr<repository_fragment> (db, move (r.second)));
+ }
+
+ return make_pair (find_imaginary_stub (name), nullptr);
+ }
+
+ shared_ptr<available_package>
+ find_available (const common_options& options,
+ database& db,
+ const shared_ptr<selected_package>& sp)
+ {
+ available_package_id pid (sp->name, sp->version);
+ for (database& ddb: dependent_repo_configs (db))
+ {
+ shared_ptr<available_package> ap (ddb.find<available_package> (pid));
+
+ if (ap != nullptr && !ap->stub ())
+ return ap;
+ }
+
+ return make_available (options, db, sp);
+ }
+
+ pair<shared_ptr<available_package>,
+ lazy_shared_ptr<repository_fragment>>
+ find_available_fragment (const common_options& options,
+ database& db,
+ const shared_ptr<selected_package>& sp)
+ {
+ available_package_id pid (sp->name, sp->version);
+ const string& cn (sp->repository_fragment.canonical_name ());
+
+ for (database& ddb: dependent_repo_configs (db))
+ {
+ shared_ptr<available_package> ap (ddb.find<available_package> (pid));
+
+ if (ap != nullptr && !ap->stub ())
+ {
+ if (shared_ptr<repository_fragment> f =
+ ddb.find<repository_fragment> (cn))
+ {
+ if (!rep_masked_fragment (ddb, f))
+ return make_pair (ap,
+ lazy_shared_ptr<repository_fragment> (ddb,
+ move (f)));
+ }
+ }
+ }
+
+ return make_pair (find_available (options, db, sp), nullptr);
+ }
+
+ available_packages
+ find_available_all (const linked_databases& dbs,
+ const package_name& name,
+ bool suppress_older_revisions)
+ {
+ // Collect all the databases linked explicitly and implicitly to the
+ // specified databases, recursively.
+ //
+ // Note that this is a superset of the database cluster, since we descend
+ // into the database links regardless of their types (see
+ // cluster_configs() for details).
+ //
+ linked_databases all_dbs;
+ all_dbs.reserve (dbs.size ());
+
+ auto add = [&all_dbs] (database& db, const auto& add)
+ {
+ if (find (all_dbs.begin (), all_dbs.end (), db) != all_dbs.end ())
+ return;
+
+ all_dbs.push_back (db);
+
+ {
+ const linked_configs& cs (db.explicit_links ());
+ for (auto i (cs.begin_linked ()); i != cs.end (); ++i)
+ add (i->db, add);
+ }
+
+ {
+ const linked_databases& cs (db.implicit_links ());
+ for (auto i (cs.begin_linked ()); i != cs.end (); ++i)
+ add (*i, add);
+ }
+ };
+
+ for (database& db: dbs)
+ add (db, add);
+
+ // Collect all the available packages from all the collected databases.
+ //
+ available_packages r;
+
+ for (database& db: all_dbs)
+ {
+ for (shared_ptr<available_package> ap:
+ pointer_result (
+ query_available (db, name, nullopt /* version_constraint */)))
+ {
+ // All repository fragments the package comes from are equally good,
+ // so we pick the first unmasked one.
+ //
+ for (const auto& pl: ap->locations)
+ {
+ const lazy_shared_ptr<repository_fragment>& lrf (
+ pl.repository_fragment);
+
+ if (!rep_masked_fragment (lrf))
+ {
+ r.emplace_back (move (ap), lrf);
+ break;
+ }
+ }
+ }
+ }
+
+ // Sort the result in the package version descending order and suppress
+ // duplicates and, if requested, older package revisions.
+ //
+ sort_dedup (r, suppress_older_revisions);
+
+ return r;
+ }
+
+ pair<shared_ptr<available_package>,
+ lazy_shared_ptr<repository_fragment>>
+ make_available_fragment (const common_options& options,
+ database& db,
+ const shared_ptr<selected_package>& sp)
+ {
+ shared_ptr<available_package> ap (make_available (options, db, sp));
+
+ if (sp->system ())
+ return make_pair (move (ap), nullptr);
+
+ // First see if we can find its repository fragment.
+ //
+ // Note that this is package's "old" repository fragment and there is no
+ // guarantee that its dependencies are still resolvable from it. But this
+ // is our best chance (we could go nuclear and point all orphans to the
+ // root repository fragment but that feels a bit too drastic at the
+ // moment).
+ //
+ // Also note that the repository information for this selected package can
+ // potentially be in one of the ultimate dependent configurations as
+ // determined at the time of the run when the package was configured. This
+ // configurations set may differ from the current one, but let's try
+ // anyway.
+ //
+ lazy_shared_ptr<repository_fragment> rf;
+ const string& cn (sp->repository_fragment.canonical_name ());
+
+ for (database& ddb: dependent_repo_configs (db))
+ {
+ if (shared_ptr<repository_fragment> f =
+ ddb.find<repository_fragment> (cn))
+ {
+ if (!rep_masked_fragment (ddb, f))
+ {
+ rf = lazy_shared_ptr<repository_fragment> (ddb, move (f));
+ break;
+ }
+ }
+ }
+
+ return make_pair (move (ap), move (rf));
+ }
+}
diff --git a/bpkg/package-query.hxx b/bpkg/package-query.hxx
new file mode 100644
index 0000000..de389c1
--- /dev/null
+++ b/bpkg/package-query.hxx
@@ -0,0 +1,281 @@
+// file : bpkg/package-query.hxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#ifndef BPKG_PACKAGE_QUERY_HXX
+#define BPKG_PACKAGE_QUERY_HXX
+
+#include <odb/core.hxx>
+
+#include <bpkg/types.hxx>
+#include <bpkg/utility.hxx>
+
+#include <bpkg/package.hxx>
+#include <bpkg/database.hxx>
+
+#include <bpkg/common-options.hxx>
+
+namespace bpkg
+{
+ // Query the available packages that optionally satisfy the specified
+ // version constraint and return them in the version descending order, by
+ // default. Note that a stub satisfies any constraint.
+ //
+ // By default if the revision is not explicitly specified for the version
+ // constraint, then compare ignoring the revision. The idea is that when the
+ // user runs 'bpkg build libfoo/1' and there is 1+1 available, it should
+ // just work. The user shouldn't have to spell the revision
+ // explicitly. Similarly, when we have 'depends: libfoo == 1', then it would
+ // be strange if 1+1 did not satisfy this constraint. The same for libfoo <=
+ // 1 -- 1+1 should satisfy.
+ //
+ // Note that by default we compare ignoring the iteration, as it can not be
+ // specified in the manifest/command line. This way the latest iteration
+ // will always be picked up.
+ //
+ // Pass true as the revision argument to query the exact available package
+ // version, also comparing the version revision and iteration.
+ //
+ odb::result<available_package>
+ query_available (database&,
+ const package_name&,
+ const optional<version_constraint>&,
+ bool order = true,
+ bool revision = false);
+
+ // Only return packages that are in the specified repository fragments, their
+ // complements or prerequisites (if prereq is true), recursively. While you
+ // could maybe come up with a (barely comprehensible) view/query to achieve
+ // this, doing it on the "client side" is definitely more straightforward.
+ //
+ vector<shared_ptr<available_package>>
+ filter (const shared_ptr<repository_fragment>&,
+ odb::result<available_package>&&,
+ bool prereq = true);
+
+ pair<shared_ptr<available_package>, shared_ptr<repository_fragment>>
+ filter_one (const shared_ptr<repository_fragment>&,
+ odb::result<available_package>&&,
+ bool prereq = true);
+
+ shared_ptr<repository_fragment>
+ filter (const shared_ptr<repository_fragment>&,
+ const shared_ptr<available_package>&,
+ bool prereq = true);
+
+ vector<pair<shared_ptr<available_package>, shared_ptr<repository_fragment>>>
+ filter (const vector<shared_ptr<repository_fragment>>&,
+ odb::result<available_package>&&,
+ bool prereq = true);
+
+ pair<shared_ptr<available_package>, shared_ptr<repository_fragment>>
+ filter_one (const vector<shared_ptr<repository_fragment>>&,
+ odb::result<available_package>&&,
+ bool prereq = true);
+
+ // Try to find packages that optionally satisfy the specified version
+ // constraint in multiple databases, suppressing duplicates. Return the list
+ // of packages and repository fragments in which each was found in the
+ // package version descending order or empty list if none were found. Note
+ // that a stub satisfies any constraint.
+ //
+ // Note that we return (loaded) lazy_shared_ptr in order to also convey
+ // the database to which it belongs.
+ //
+ available_packages
+ find_available (const linked_databases&,
+ const package_name&,
+ const optional<version_constraint>&);
+
+ // As above but only look for packages from the specified list of repository
+ // fragments, their prerequisite repositories, and their complements,
+ // recursively (note: recursivity applies to complements, not prerequisites).
+ //
+ using config_repo_fragments =
+ database_map<vector<shared_ptr<repository_fragment>>>;
+
+ available_packages
+ find_available (const package_name&,
+ const optional<version_constraint>&,
+ const config_repo_fragments&,
+ bool prereq = true);
+
+ // As above but only look for packages from a single repository fragment,
+ // its prerequisite repositories, and its complements, recursively (note:
+ // recursivity applies to complements, not prerequisites). Doesn't provide
+ // the repository fragments the packages come from.
+ //
+ // It is assumed that the repository fragment lazy pointer contains the
+ // database information.
+ //
+ vector<shared_ptr<available_package>>
+ find_available (const package_name&,
+ const optional<version_constraint>&,
+ const lazy_shared_ptr<repository_fragment>&,
+ bool prereq = true);
+
+ // As above but only look for a single package from the specified repository
+ // fragment, its prerequisite repositories, and their complements,
+ // recursively (note: recursivity applies to complements, not
+ // prerequisites). Return the package and the repository fragment in which
+ // it was found or NULL for both if not found.
+ //
+ // It is assumed that the repository fragment lazy pointer contains the
+ // database information.
+ //
+ pair<shared_ptr<available_package>,
+ lazy_shared_ptr<repository_fragment>>
+ find_available_one (const package_name&,
+ const optional<version_constraint>&,
+ const lazy_shared_ptr<repository_fragment>&,
+ bool prereq = true,
+ bool revision = false);
+
+ // As above but look for a single package from a list of repository
+ // fragments.
+ //
+ pair<shared_ptr<available_package>, shared_ptr<repository_fragment>>
+ find_available_one (database&,
+ const package_name&,
+ const optional<version_constraint>&,
+ const vector<shared_ptr<repository_fragment>>&,
+ bool prereq = true,
+ bool revision = false);
+
+ // As above but look for a single package in multiple databases from their
+ // respective root repository fragments.
+ //
+ pair<shared_ptr<available_package>,
+ lazy_shared_ptr<repository_fragment>>
+ find_available_one (const linked_databases&,
+ const package_name&,
+ const optional<version_constraint>&,
+ bool prereq = true,
+ bool revision = false);
+
+ // Try to find an available package corresponding to the specified selected
+ // package and, if not found, return a transient one. The search is
+ // performed in the ultimate dependent configurations of the selected
+ // package (see dependent_repo_configs() for details).
+ //
+ // NOTE: repo_configs needs to be filled prior to the function call.
+ //
+ shared_ptr<available_package>
+ find_available (const common_options&,
+ database&,
+ const shared_ptr<selected_package>&);
+
+ // As above but also pair the available package with the repository fragment
+ // the available package comes from. Note that the package locations list is
+ // left empty and that the returned repository fragment could be NULL if the
+ // package is an orphan.
+ //
+ // NOTE: repo_configs needs to be filled prior to the function call.
+ //
+ pair<shared_ptr<available_package>,
+ lazy_shared_ptr<repository_fragment>>
+ find_available_fragment (const common_options&,
+ database&,
+ const shared_ptr<selected_package>&);
+
+ // Try to find packages in multiple databases, traversing the explicitly and
+ // implicitly linked databases recursively and suppressing duplicates and,
+ // optionally, older package revisions. Return the list of packages and
+ // repository fragments in which each was found in the package version
+ // descending order or empty list if none were found.
+ //
+ // Note that we return (loaded) lazy_shared_ptr in order to also convey
+ // the database to which it belongs.
+ //
+ available_packages
+ find_available_all (const linked_databases&,
+ const package_name&,
+ bool suppress_older_revisions = true);
+
+ // Create a transient (or fake, if you prefer) available_package object
+ // corresponding to the specified selected object. Note that the package
+ // locations list is left empty and that the returned repository fragment
+ // could be NULL if the package is an orphan.
+ //
+ // Note that the repository fragment is searched in the ultimate dependent
+ // configurations of the selected package (see dependent_repo_configs() for
+ // details).
+ //
+ // Also note that in our model we assume that make_available_fragment() is
+ // only called if there is no real available_package. This makes sure that
+ // if the package moves (e.g., from testing to stable), then we will be
+ // using stable to resolve its dependencies.
+ //
+ // NOTE: repo_configs needs to be filled prior to the function call.
+ //
+ pair<shared_ptr<available_package>,
+ lazy_shared_ptr<repository_fragment>>
+ make_available_fragment (const common_options&,
+ database&,
+ const shared_ptr<selected_package>&);
+
+ // Try to find an available stub package in the imaginary system repository.
+ // Such a repository contains stubs corresponding to the system packages
+ // specified by the user on the command line with version information
+ // (sys:libfoo/1.0, ?sys:libfoo/* but not ?sys:libfoo; the idea is that a
+ // real stub won't add any extra information to such a specification so we
+ // shouldn't insist on its presence). Semantically this imaginary repository
+ // complements all real repositories.
+ //
+ extern vector<shared_ptr<available_package>> imaginary_stubs;
+
+ shared_ptr<available_package>
+ find_imaginary_stub (const package_name&);
+
+ // Try to find an available package in the existing packages registry. Such
+ // a registry is configuration-specific and contains package versions
+ // specified by the user on the command line as archives or directories for
+ // specific configurations (see pkg-build for details on such packages).
+ //
+ // Note that semantically such a registry can be considered as an imaginary
+ // repository which complements all the real repositories fetched in the
+ // respective configuration. Also note that normally this repository is used
+ // first (by calling this function) when trying to resolve a dependency
+ // package, prior to searching in the real repositories.
+ //
+ extern vector<pair<reference_wrapper<database>,
+ shared_ptr<available_package>>> existing_packages;
+
+ pair<shared_ptr<available_package>, lazy_shared_ptr<repository_fragment>>
+ find_existing (database&,
+ const package_name&,
+ const optional<version_constraint>&);
+
+ inline pair<shared_ptr<available_package>,
+ lazy_shared_ptr<repository_fragment>>
+ find_existing (const package_name& n,
+ const optional<version_constraint>& c,
+ const lazy_shared_ptr<repository_fragment>& rf)
+ {
+ return find_existing (rf.database (), n, c);
+ }
+
+ // Configurations to use as the repository information sources.
+ //
+ // The list normally contains the current configurations and configurations
+ // of the specified on the command line build-to-hold packages (ultimate
+ // dependents).
+ //
+ // For ultimate dependents we use configurations in which they are being
+ // built as a source of the repository information. For dependency packages
+ // we use configurations of their ultimate dependents.
+ //
+ extern linked_databases repo_configs;
+
+ // Return the ultimate dependent configurations for packages in this
+ // configuration.
+ //
+ // Specifically, this is an intersection of all the dependent configurations
+ // for the specified configuration (see database::dependent_configs() for
+ // details) and configurations which contain repository information
+ // (repo_configs).
+ //
+ linked_databases
+ dependent_repo_configs (database&);
+}
+
+#endif // BPKG_PACKAGE_QUERY_HXX
diff --git a/bpkg/package-skeleton.cxx b/bpkg/package-skeleton.cxx
new file mode 100644
index 0000000..78635e7
--- /dev/null
+++ b/bpkg/package-skeleton.cxx
@@ -0,0 +1,2892 @@
+// file : bpkg/package-skeleton.cxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#include <bpkg/package-skeleton.hxx>
+
+#include <sstream>
+
+#include <libbutl/manifest-parser.hxx>
+#include <libbutl/manifest-serializer.hxx>
+
+#include <libbuild2/types.hxx>
+#include <libbuild2/utility.hxx>
+
+#include <libbuild2/file.hxx>
+#include <libbuild2/scope.hxx>
+#include <libbuild2/context.hxx>
+#include <libbuild2/variable.hxx>
+#include <libbuild2/operation.hxx>
+#include <libbuild2/diagnostics.hxx>
+
+#include <libbuild2/lexer.hxx>
+#include <libbuild2/parser.hxx>
+
+#include <libbuild2/config/utility.hxx>
+
+#include <bpkg/bpkg.hxx>
+#include <bpkg/package.hxx>
+#include <bpkg/database.hxx>
+#include <bpkg/manifest-utility.hxx>
+
+using namespace std;
+using namespace butl;
+
+namespace bpkg
+{
+ // Check whether the specified configuration variable override has a project
+ // variable (i.e., its name starts with config.<project>). If the last
+ // argument is not NULL, then set it to the length of the variable portion.
+ //
+ // Note that some user-specified variables may have qualifications
+ // (global, scope, etc) but there is no reason to expect any project
+ // configuration variables to use such qualifications (since they can
+ // only apply to one project). So we ignore all qualified variables.
+ //
+ static inline bool
+ project_override (const string& v, const string& p, size_t* l = nullptr)
+ {
+ size_t n (p.size ());
+
+ if (v.compare (0, n, p) == 0)
+ {
+ if (v[n] == '.')
+ {
+ if (l != nullptr)
+ *l = v.find_first_of ("=+ \t", n + 1);
+
+ return true;
+ }
+ else if (strchr ("=+ \t", v[n]) != nullptr)
+ {
+ if (l != nullptr)
+ *l = n;
+
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ // Check whether the specified configuration variable name is a project
+ // variable (i.e., its name starts with config.<project>).
+ //
+ static inline bool
+ project_variable (const string& v, const string& p)
+ {
+ size_t n (p.size ());
+ return v.compare (0, n, p) == 0 && (v[n] == '.' || v[n] == '\0');
+ }
+
+ // Customized buildfile parser that is used to detect and diagnose
+ // references to undefined dependency configuration variables.
+ //
+ class buildfile_parser: public build2::parser
+ {
+ public:
+ buildfile_parser (build2::context& ctx,
+ const strings& dvps,
+ optional<size_t> dvps_pending = {})
+ : parser (ctx),
+ dependency_var_prefixes_ (dvps),
+ dependency_var_prefixes_pending_ (dvps_pending) {}
+
+ protected:
+ virtual build2::lookup
+ lookup_variable (build2::names&& qual,
+ string&& name,
+ const build2::location& loc) override
+ {
+ using namespace build2;
+ using build2::fail;
+ using build2::info;
+
+ // To avoid making copies of the name, pre-check if it is from one
+ // of the dependencies.
+ //
+ optional<string> dep;
+ if (!pre_parse_ && qual.empty ())
+ {
+ auto b (dependency_var_prefixes_.begin ());
+ auto e (dependency_var_prefixes_pending_
+ ? b + *dependency_var_prefixes_pending_
+ : dependency_var_prefixes_.end ());
+
+ if (find_if (b, e,
+ [&name] (const string& p)
+ {
+ return project_variable (name, p);
+ }) != e)
+ dep = name;
+ }
+
+ lookup l (parser::lookup_variable (move (qual), move (name), loc));
+
+ if (dep && !l.defined ())
+ fail (loc) << "undefined dependency configuration variable " << *dep <<
+ info << "was " << *dep << " set in earlier prefer or require clause?";
+
+ return l;
+ }
+
+ private:
+ const strings& dependency_var_prefixes_;
+ optional<size_t> dependency_var_prefixes_pending_;
+ };
+
+ static void
+ create_context (package_skeleton&, const strings&);
+
+ // Note: cannot be package_skeleton member function due to iterator return
+ // (build2 stuff is only forward-declared in the header).
+ //
+ static build2::scope_map::iterator
+ bootstrap (package_skeleton&, const strings&, bool old = false);
+
+ package_skeleton::
+ ~package_skeleton ()
+ {
+ }
+
+ package_skeleton::
+ package_skeleton (package_skeleton&& v) noexcept
+ : package (move (v.package)),
+ system (v.system),
+ available (move (v.available)),
+ load_config_flags (v.load_config_flags),
+ co_ (v.co_),
+ db_ (v.db_),
+ var_prefix_ (move (v.var_prefix_)),
+ config_vars_ (move (v.config_vars_)),
+ config_var_srcs_ (move (v.config_var_srcs_)),
+ disfigure_ (v.disfigure_),
+ config_srcs_ (v.config_srcs_),
+ src_root_ (move (v.src_root_)),
+ out_root_ (move (v.out_root_)),
+ src_root_specified_ (v.src_root_specified_),
+ old_src_root_ (move (v.old_src_root_)),
+ old_out_root_ (move (v.old_out_root_)),
+ created_ (v.created_),
+ verified_ (v.verified_),
+ loaded_old_config_ (v.loaded_old_config_),
+ develop_ (v.develop_),
+ ctx_ (move (v.ctx_)),
+ rs_ (v.rs_),
+ cmd_vars_ (move (v.cmd_vars_)),
+ cmd_vars_cache_ (v.cmd_vars_cache_),
+ dependent_vars_ (move (v.dependent_vars_)),
+ dependent_orgs_ (move (v.dependent_orgs_)),
+ reflect_ (move (v.reflect_)),
+ dependency_reflect_ (move (v.dependency_reflect_)),
+ dependency_reflect_index_ (v.dependency_reflect_index_),
+ dependency_reflect_pending_ (v.dependency_reflect_pending_),
+ dependency_var_prefixes_ (move (v.dependency_var_prefixes_)),
+ dependency_var_prefixes_pending_ (v.dependency_var_prefixes_pending_),
+ prefer_accept_ (v.prefer_accept_)
+ {
+ v.db_ = nullptr;
+ }
+
+ package_skeleton& package_skeleton::
+ operator= (package_skeleton&& v) noexcept
+ {
+ if (this != &v)
+ {
+ package = move (v.package);
+ system = v.system;
+ available = move (v.available);
+ load_config_flags = v.load_config_flags;
+ co_ = v.co_;
+ db_ = v.db_;
+ var_prefix_ = move (v.var_prefix_);
+ config_vars_ = move (v.config_vars_);
+ config_var_srcs_ = move (v.config_var_srcs_);
+ disfigure_ = v.disfigure_;
+ config_srcs_ = v.config_srcs_;
+ src_root_ = move (v.src_root_);
+ out_root_ = move (v.out_root_);
+ src_root_specified_ = v.src_root_specified_;
+ old_src_root_ = move (v.old_src_root_);
+ old_out_root_ = move (v.old_out_root_);
+ created_ = v.created_;
+ verified_ = v.verified_;
+ loaded_old_config_ = v.loaded_old_config_;
+ develop_ = v.develop_;
+ ctx_ = move (v.ctx_);
+ rs_ = v.rs_;
+ cmd_vars_ = move (v.cmd_vars_);
+ cmd_vars_cache_ = v.cmd_vars_cache_;
+ dependent_vars_ = move (v.dependent_vars_);
+ dependent_orgs_ = move (v.dependent_orgs_);
+ reflect_ = move (v.reflect_);
+ dependency_reflect_ = move (v.dependency_reflect_);
+ dependency_reflect_index_ = v.dependency_reflect_index_;
+ dependency_reflect_pending_ = v.dependency_reflect_pending_;
+ dependency_var_prefixes_ = move (v.dependency_var_prefixes_);
+ dependency_var_prefixes_pending_ = v.dependency_var_prefixes_pending_;
+ prefer_accept_ = v.prefer_accept_;
+
+ v.db_ = nullptr;
+ }
+
+ return *this;
+ }
+
+ package_skeleton::
+ package_skeleton (const package_skeleton& v)
+ : package (v.package),
+ system (v.system),
+ available (v.available),
+ load_config_flags (v.load_config_flags),
+ co_ (v.co_),
+ db_ (v.db_),
+ var_prefix_ (v.var_prefix_),
+ config_vars_ (v.config_vars_),
+ config_var_srcs_ (v.config_var_srcs_),
+ disfigure_ (v.disfigure_),
+ config_srcs_ (v.config_srcs_),
+ src_root_ (v.src_root_),
+ out_root_ (v.out_root_),
+ src_root_specified_ (v.src_root_specified_),
+ old_src_root_ (v.old_src_root_),
+ old_out_root_ (v.old_out_root_),
+ created_ (v.created_),
+ verified_ (v.verified_),
+ loaded_old_config_ (v.loaded_old_config_),
+ develop_ (v.develop_),
+ cmd_vars_ (v.cmd_vars_),
+ cmd_vars_cache_ (v.cmd_vars_cache_),
+ dependent_vars_ (v.dependent_vars_),
+ dependent_orgs_ (v.dependent_orgs_),
+ reflect_ (v.reflect_),
+ dependency_reflect_ (v.dependency_reflect_),
+ dependency_reflect_index_ (v.dependency_reflect_index_),
+ dependency_reflect_pending_ (v.dependency_reflect_pending_),
+ dependency_var_prefixes_ (v.dependency_var_prefixes_),
+ dependency_var_prefixes_pending_ (v.dependency_var_prefixes_pending_),
+ prefer_accept_ (v.prefer_accept_)
+ {
+ // The idea here is to create an "unloaded" copy but with enough state
+ // that it can be loaded if necessary.
+ //
+ // Note that there is a bit of a hole in this logic with regards to the
+ // prefer_accept_ semantics but it looks like we cannot plausible trigger
+ // it (which is fortified with an assert in evaluate_reflect(); note that
+ // doing it here would be overly strict since this may have a left-over
+ // prefer_accept_ position).
+ }
+
+ void package_skeleton::
+ reset ()
+ {
+ assert (db_ != nullptr); // Cannot be called after collect_config().
+
+ rs_ = nullptr;
+ ctx_ = nullptr; // Free.
+
+ cmd_vars_.clear ();
+ cmd_vars_cache_ = false;
+
+ dependent_vars_.clear ();
+ dependent_orgs_.clear ();
+
+ reflect_.clear ();
+
+ dependency_reflect_.clear ();
+ dependency_reflect_index_ = 0;
+ dependency_reflect_pending_ = 0;
+
+ dependency_var_prefixes_.clear ();
+ dependency_var_prefixes_pending_ = 0;
+
+ prefer_accept_ = nullopt;
+ }
+
+ package_skeleton::
+ package_skeleton (const common_options& co,
+ package_key pk,
+ bool sys,
+ shared_ptr<const available_package> ap,
+ strings cvs,
+ bool df,
+ const vector<config_variable>* css,
+ optional<dir_path> src_root,
+ optional<dir_path> out_root,
+ optional<dir_path> old_src_root,
+ optional<dir_path> old_out_root,
+ uint16_t lcf)
+ : package (move (pk)),
+ system (sys),
+ available (move (ap)),
+ load_config_flags (lcf),
+ co_ (&co),
+ db_ (&package.db.get ()),
+ var_prefix_ ("config." + package.name.variable ()),
+ config_vars_ (move (cvs)),
+ disfigure_ (df),
+ config_srcs_ (df ? nullptr : css)
+ {
+ if (available != nullptr)
+ assert (available->bootstrap_build); // Should have skeleton info.
+ else
+ assert (system);
+
+ if (!config_vars_.empty ())
+ config_var_srcs_ = vector<config_source> (config_vars_.size (),
+ config_source::user);
+
+ // We are only interested in old user configuration variables.
+ //
+ if (config_srcs_ != nullptr)
+ {
+ if (find_if (config_srcs_->begin (), config_srcs_->end (),
+ [this] (const config_variable& v)
+ {
+ return ((load_config_flags & load_config_user) != 0 &&
+ v.source == config_source::user) ||
+ ((load_config_flags & load_config_dependent) != 0 &&
+ v.source == config_source::dependent);
+ }) == config_srcs_->end ())
+ config_srcs_ = nullptr;
+ }
+
+ // We don't need to load old user configuration if there isn't any and
+ // there is no new project configuration specified by the user.
+ //
+ // Note that at first it may seem like we shouldn't do this for any system
+ // packages but if we want to verify the user configuration, why not do so
+ // for system if we can (i.e., have skeleton info)?
+ //
+ if (available == nullptr)
+ loaded_old_config_ = true;
+ else
+ loaded_old_config_ =
+ (config_srcs_ == nullptr) &&
+ find_if (config_vars_.begin (), config_vars_.end (),
+ [this] (const string& v)
+ {
+ // For now tighten it even further so that we can continue
+ // using repositories without package skeleton information
+ // (bootstrap.build, root.build). See
+ // load_old_config_impl() for details.
+ //
+#if 0
+ return project_override (v, var_prefix_);
+#else
+ size_t vn;
+ size_t pn (var_prefix_.size ());
+ return (project_override (v, var_prefix_, &vn) &&
+ v.compare (pn, vn - pn, ".develop") == 0);
+#endif
+ }) == config_vars_.end ();
+
+ if (src_root)
+ {
+ src_root_ = move (*src_root);
+
+ assert (!src_root_.empty ()); // Must exist.
+
+ src_root_specified_ = true;
+
+ if (out_root)
+ out_root_ = move (*out_root);
+ }
+ else
+ assert (!out_root);
+
+ if (old_src_root)
+ {
+ old_src_root_ = move (*old_src_root);
+
+ assert (!old_src_root_.empty ()); // Must exist.
+
+ if (old_out_root)
+ old_out_root_ = move (*old_out_root);
+ }
+ else
+ assert (!old_out_root);
+ }
+
+ // Serialize a variable assignment for a command line override.
+ //
+ static string
+ serialize_cmdline (const string& var, const build2::value& val,
+ build2::names& storage)
+ {
+ using namespace build2;
+
+ string r (var + '=');
+
+ if (val.null)
+ r += "[null]";
+ else
+ {
+ storage.clear ();
+ names_view nv (reverse (val, storage, true /* reduce */));
+
+ if (!nv.empty ())
+ {
+ // Note: we need to use command-line (effective) quoting.
+ //
+ ostringstream os;
+ to_stream (os, nv, quote_mode::effective, '@');
+ r += os.str ();
+ }
+ }
+
+ return r;
+ }
+
+ // Reverse value to names reducing empty simple value to empty list of
+ // names.
+ //
+ static optional<build2::names>
+ reverse_value (const build2::value& val)
+ {
+ using namespace build2;
+
+ if (val.null)
+ return nullopt;
+
+ names storage;
+ names_view nv (reverse (val, storage, true /* reduce */));
+
+ return (nv.data () == storage.data ()
+ ? move (storage)
+ : names (nv.begin (), nv.end ()));
+ }
+
+ // Return the dependent (origin==buildfile) configuration variables as
+ // command line overrides. If the second argument is not NULL, then populate
+ // it with the corresponding originating dependents.
+ //
+ static strings
+ dependent_cmd_vars (const package_configuration& cfg,
+ vector<package_key>* orgs = nullptr)
+ {
+ using build2::config::variable_origin;
+
+ strings r;
+
+ for (const config_variable_value& v: cfg)
+ {
+ if (v.origin == variable_origin::buildfile)
+ {
+ r.push_back (v.serialize_cmdline ());
+
+ if (orgs != nullptr)
+ orgs->push_back (*v.dependent);
+ }
+ }
+
+ return r;
+ }
+
+ void package_skeleton::
+ reload_defaults (package_configuration& cfg)
+ {
+ // Should only be called before dependent_config()/evaluate_*().
+ //
+ assert (dependent_vars_.empty () &&
+ reflect_.empty () &&
+ dependency_reflect_.empty () &&
+ available != nullptr &&
+ ctx_ == nullptr);
+
+ if (!loaded_old_config_)
+ load_old_config_impl ();
+
+ try
+ {
+ using namespace build2;
+ using build2::info;
+
+ // This is what needs to happen to the variables of different origins in
+ // the passed configuration:
+ //
+ // default -- reloaded
+ // buildfile/dependent -- made command line override
+ // override/user -- should match what's in config_vars_
+ // undefined -- reloaded
+ //
+ // Note also that on the first call we will have no configuration. And
+ // so to keep things simple, we merge variable of the buildfile origin
+ // into cmd_vars and then rebuild things from scratch. Note, however,
+ // that below we need to sort out these merged overrides into user and
+ // dependent, so we keep the old configuration for reference.
+ //
+ // Note also that dependent values do not clash with user overrides by
+ // construction (in evaluate_{prefer_accept,require}()): we do not add
+ // as dependent variables that have the override origin.
+ //
+ scope* rs;
+ {
+ auto df = build2::make_diag_frame (
+ [this] (const build2::diag_record& dr)
+ {
+ dr << info << "while loading build system skeleton of package "
+ << package.name;
+ });
+
+ rs = bootstrap (
+ *this, merge_cmd_vars (dependent_cmd_vars (cfg)))->second.front ();
+
+ // Load project's root.build.
+ //
+ load_root (*rs);
+ }
+
+ package_configuration old (move (cfg));
+ cfg.package = move (old.package);
+
+ // Note that a configuration variable may not have a default value so we
+ // cannot just iterate over all the config.<name>** values set on the
+ // root scope. Our options seem to be either iterating over the variable
+ // pool or forcing the config module with config.config.module=true and
+ // then using its saved variables map. Since the amount of stuff we load
+ // is quite limited, there shouldn't be too many variables in the pool.
+ // So let's go with the simpler approach for now.
+ //
+ // Though the saved variables map approach would have been more accurate
+ // since that's the variables that were introduced with the config
+ // directive. Potentially the user could just have a buildfile
+ // config.<name>** variable but it feels like that should be harmless
+ // (we will return it but nobody will presumably use that information).
+ // Also, if/when we start tracking the configuration variable
+ // dependencies (i.e., which default value depend on which config
+ // variable), then the saved variables map seem like the natural place
+ // to keep this information.
+ //
+ // Note: go straight for the public variable pool.
+ //
+ for (const variable& var: rs->ctx.var_pool)
+ {
+ if (!project_variable (var.name, var_prefix_))
+ continue;
+
+ using config::variable_origin;
+
+ pair<variable_origin, lookup> ol (config::origin (*rs, var));
+
+ switch (ol.first)
+ {
+ case variable_origin::default_:
+ case variable_origin::override_:
+ case variable_origin::undefined:
+ {
+ config_variable_value v {
+ var.name, ol.first, {}, {}, {}, false, false};
+
+ // Override could mean user override from config_vars_ or the
+ // dependent override that we have merged above.
+ //
+ if (v.origin == variable_origin::override_)
+ {
+ if (config_variable_value* ov = old.find (v.name))
+ {
+ if (ov->origin == variable_origin::buildfile)
+ {
+ v.origin = variable_origin::buildfile;
+ v.dependent = move (ov->dependent);
+ v.confirmed = ov->confirmed;
+ v.has_alternative = ov->has_alternative;
+ }
+ else
+ assert (ov->origin == variable_origin::override_);
+ }
+ }
+
+ // Save value.
+ //
+ if (v.origin != variable_origin::undefined)
+ v.value = reverse_value (*ol.second);
+
+ // Save type.
+ //
+ if (var.type != nullptr)
+ v.type = var.type->name;
+
+ cfg.push_back (move (v));
+ break;
+ }
+ case variable_origin::buildfile:
+ {
+ // Feel like this shouldn't happen since we have disfigured them.
+ //
+ assert (false);
+ break;
+ }
+ }
+ }
+
+ verified_ = true; // Managed to load without errors.
+ ctx_ = nullptr;
+ }
+ catch (const build2::failed&)
+ {
+ throw failed (); // Assume the diagnostics has already been issued.
+ }
+ }
+
+ void package_skeleton::
+ load_overrides (package_configuration& cfg)
+ {
+ // Should only be called before dependent_config()/evaluate_*() and only
+ // on system package without skeleton info.
+ //
+ assert (dependent_vars_.empty () &&
+ reflect_.empty () &&
+ dependency_reflect_.empty () &&
+ available == nullptr &&
+ system);
+
+ if (find_if (config_vars_.begin (), config_vars_.end (),
+ [this] (const string& v)
+ {
+ return project_override (v, var_prefix_);
+ }) == config_vars_.end ())
+ return;
+
+ try
+ {
+ using namespace build2;
+ using build2::fail;
+ using build2::endf;
+
+ using config::variable_origin;
+
+ // Create the build context.
+ //
+ create_context (*this, strings {});
+ context& ctx (*ctx_);
+
+ // Note: go straight for the public variable pool.
+ //
+ scope& gs (ctx.global_scope.rw ());
+ auto& vp (gs.var_pool (true /* public */));
+
+ for (const string& v: config_vars_)
+ {
+ size_t vn;
+ if (!project_override (v, var_prefix_, &vn))
+ continue;
+
+ const variable& var (vp.insert (string (v, 0, vn)));
+
+ // Parse the value part (note that all evaluate_require() cares about
+ // is whether the value is true or not, but we do need accurate values
+ // for diagnostics).
+ //
+ size_t p (v.find ('=', vn));
+ assert (p != string::npos);
+ if (v[p + 1] == '+')
+ ++p;
+
+ optional<names> val;
+ {
+ // Similar to context() ctor.
+ //
+ istringstream is (string (v, p + 1));
+ is.exceptions (istringstream::failbit | istringstream::badbit);
+
+ path_name in ("<cmdline>");
+ lexer lex (is, in, 1 /* line */, "\'\"\\$("); // Effective.
+
+ parser par (ctx);
+ pair<value, token> r (
+ par.parse_variable_value (lex, gs, &build2::work, var));
+
+ if (r.second.type != token_type::eos)
+ fail << "invalid command line variable override '" << v << "'";
+
+ val = reverse_value (r.first);
+ }
+
+ // @@ Should we do anything with append/prepend?
+ //
+ if (config_variable_value* v = cfg.find (var.name))
+ v->value = move (val);
+ else
+ cfg.push_back (config_variable_value {var.name,
+ variable_origin::override_,
+ {},
+ move (val),
+ {},
+ false,
+ false});
+ }
+
+ ctx_ = nullptr; // Free.
+ }
+ catch (const build2::failed&)
+ {
+ throw failed (); // Assume the diagnostics has already been issued.
+ }
+ }
+
+ pair<bool, string> package_skeleton::
+ verify_sensible (const package_configuration& cfg)
+ {
+ // Should only be called before dependent_config()/evaluate_*().
+ //
+ assert (dependent_vars_.empty () &&
+ reflect_.empty () &&
+ dependency_reflect_.empty () &&
+ available != nullptr &&
+ ctx_ == nullptr);
+
+ if (!loaded_old_config_)
+ load_old_config_impl ();
+
+ try
+ {
+ using namespace build2;
+ using build2::info;
+
+ // For now we treat any failure to load root.build as bad configuration,
+ // which is not very precise. One idea to make this more precise would
+ // be to invent some kind of tagging for "bad configuration" diagnostics
+ // (e.g., either via an attribute or via special config.assert directive
+ // or some such).
+ //
+ // For now we rely on load_defaults() and load_old_config_impl() to
+ // "flush" out any unrelated errors (e.g., one of the modules
+ // configuration is bad, etc). However, if that did not happen
+ // naturally, then we must do it ourselves.
+ //
+ if (!verified_)
+ {
+ auto df = build2::make_diag_frame (
+ [this] (const build2::diag_record& dr)
+ {
+ dr << info << "while loading build system skeleton of package "
+ << package.name;
+ });
+
+ scope& rs (
+ *bootstrap (*this, merge_cmd_vars (strings {}))->second.front ());
+
+ load_root (rs);
+
+ verified_ = true;
+ ctx_ = nullptr;
+ }
+
+ scope& rs (
+ *bootstrap (
+ *this, merge_cmd_vars (dependent_cmd_vars (cfg)))->second.front ());
+
+ // Load project's root.build while redirecting the diagnostics stream.
+ //
+ // Note: no diag_frame unlike all the other places.
+ //
+ ostringstream ds;
+ auto dg (make_guard ([ods = diag_stream] () {diag_stream = ods;}));
+ diag_stream = &ds;
+
+ pair<bool, string> r;
+ try
+ {
+ load_root (rs);
+ r.first = true;
+ }
+ catch (const build2::failed&)
+ {
+ r.first = false;
+ r.second = trim (ds.str ());
+ }
+
+ ctx_ = nullptr;
+ return r;
+ }
+ catch (const build2::failed&)
+ {
+ throw failed (); // Assume the diagnostics has already been issued.
+ }
+ }
+
+ void package_skeleton::
+ dependent_config (const package_configuration& cfg)
+ {
+ assert (dependent_vars_.empty ()); // Must be called at most once.
+
+ dependent_vars_ = dependent_cmd_vars (cfg, &dependent_orgs_);
+ }
+
+ // Print the location of a depends value in the specified manifest file.
+ //
+ // Note that currently we only use this function for the being reconfigured
+ // and external packages (i.e. when the existing source directory is
+ // specified). We could also do something similar for the remaining cases by
+ // pointing to the manifest we have serialized. In this case we would also
+ // need to make sure the temp directory is not cleaned in case of an error.
+ // Maybe one day.
+ //
+ static void
+ depends_location (const build2::diag_record& dr,
+ const path& mf,
+ size_t depends_index)
+ {
+ // Note that we can't do much on the manifest parsing failure and just
+ // skip printing the location in this case.
+ //
+ try
+ {
+ ifdstream is (mf);
+ manifest_parser p (is, mf.string ());
+
+ manifest_name_value nv (p.next ());
+ if (nv.name.empty () && nv.value == "1")
+ {
+ size_t i (0);
+ for (nv = p.next (); !nv.empty (); nv = p.next ())
+ {
+ if (nv.name == "depends" && i++ == depends_index)
+ {
+ dr << build2::info (build2::location (mf,
+ nv.value_line,
+ nv.value_column))
+ << "in depends manifest value defined here";
+ break;
+ }
+ }
+ }
+ }
+ catch (const manifest_parsing&) {}
+ catch (const io_error&) {}
+ }
+
+ bool package_skeleton::
+ evaluate_enable (const string& cond, pair<size_t, size_t> indexes)
+ {
+ size_t depends_index (indexes.first);
+
+ try
+ {
+ using namespace build2;
+ using build2::fail;
+ using build2::info;
+ using build2::endf;
+
+ // Drop the state from the previous evaluation of prefer/accept.
+ //
+ if (prefer_accept_)
+ {
+ ctx_ = nullptr;
+ prefer_accept_ = nullopt;
+ }
+
+ scope& rs (load ());
+
+ // Evaluate the enable condition.
+ //
+ istringstream is ('(' + cond + ')');
+ is.exceptions (istringstream::failbit | istringstream::badbit);
+
+ // Location is tricky: theoretically we can point to the exact position
+ // of an error but that would require quite hairy and expensive manifest
+ // re-parsing. The really bad part is that all this effort will be
+ // wasted in the common "no errors" cases. So instead we do this
+ // re-parsing lazily from the diag frame.
+ //
+ path_name in ("<depends-enable-clause>");
+ uint64_t il (1);
+
+ auto df = build2::make_diag_frame (
+ [this, &cond, depends_index] (const build2::diag_record& dr)
+ {
+ dr << info << "enable condition: (" << cond << ")";
+
+ // If an existing source directory has been specified, then we have
+ // the manifest and so print the location of the depends value in
+ // questions.
+ //
+ if (src_root_specified_)
+ depends_location (dr, src_root_ / manifest_file, depends_index);
+ else
+ dr << info << "in depends manifest value of package "
+ << package.name;
+ });
+
+ lexer l (is, in, il /* start line */);
+ buildfile_parser p (rs.ctx, dependency_var_prefixes_);
+ value v (p.parse_eval (l, rs, rs, parser::pattern_mode::expand));
+
+ try
+ {
+ // Should evaluate to 'true' or 'false'.
+ //
+ return build2::convert<bool> (move (v));
+ }
+ catch (const invalid_argument& e)
+ {
+ fail (build2::location (in, il)) << e << endf;
+ }
+ }
+ catch (const build2::failed&)
+ {
+ throw failed (); // Assume the diagnostics has already been issued.
+ }
+ }
+
+ void package_skeleton::
+ evaluate_reflect (const string& refl, pair<size_t, size_t> indexes)
+ {
+ size_t depends_index (indexes.first);
+
+ // The reflect configuration variables are essentially overrides that will
+ // be passed on the command line when we configure the package. They could
+ // clash with configuration variables specified by the user (config_vars_)
+ // and it feels like user values should take precedence. Though one could
+ // also argue we should diagnose this case and fail not to cause more
+ // confusion.
+ //
+ // They could also clash with dependent configuration. Probably should be
+ // handled in the same way (it's just another type of "user"). Yes, since
+ // dependent_vars_ are entered as cmd line overrides, this is how they are
+ // treated.
+ //
+ // It seems like the most straightforward way to achieve the desired
+ // semantics with the mechanisms that we have (in other words, without
+ // inventing another "level" of overrides) is to evaluate the reflect
+ // fragment after loading root.build. This way it will (1) be able to use
+ // variables set by root.build in conditions, (2) override default values
+ // of configuration variables (and those loaded from config.build), and
+ // (3) be overriden by configuration variables specified by the user.
+ // Naturally, this approach is probably not without a few corner cases.
+ //
+ // We may also have configuration values from the previous reflect clause
+ // which we want to "factor in" before evaluating the next clause (enable,
+ // reflect etc.; so that they can use the previously reflected values or
+ // values that are derived from them in root.build). It seems like we have
+ // two options here: either enter them as true overrides similar to
+ // config_vars_ or just evaluate them similar to loading config.build
+ // (which, BTW, we might have, in case of a being reconfigured or external
+ // package). The big problem with the former approach is that it will then
+ // prevent any further reflect clauses from modifying the same values.
+ //
+ // So overall it feels like we have iterative/compartmentalized
+ // configuration process. A feedback loop, in a sense. And it's the
+ // responsibility of the package author (who is in control of both
+ // root.build and manifest) to arrange for suitable compartmentalization.
+ //
+ try
+ {
+ // Note: similar in many respects to evaluate_enable().
+ //
+ using namespace build2;
+ using config::variable_origin;
+ using build2::diag_record;
+ using build2::fail;
+ using build2::info;
+ using build2::endf;
+
+ // Drop the state from the previous evaluation of prefer/accept if it's
+ // from the wrong position.
+ //
+ optional<size_t> dependency_var_prefixes_pending;
+ if (prefer_accept_)
+ {
+ if (*prefer_accept_ != indexes)
+ {
+ ctx_ = nullptr;
+ prefer_accept_ = nullopt;
+ }
+ else
+ {
+ // This could theoretically happen if we make a copy of the skeleton
+ // after evaluate_prefer_accept() and then attempt to continue with
+ // the call on the copy to evaluate_reflect() passing the same
+ // position. But it doesn't appear our usage should trigger this.
+ //
+ assert (ctx_ != nullptr);
+
+ dependency_var_prefixes_pending = dependency_var_prefixes_pending_;
+ }
+ }
+
+ scope& rs (load ());
+
+ // Collect all the set config.<name>.* variables on the first pass and
+ // filter out unchanged on the second.
+ //
+ // Note: a lot of this code is inspired by the config module.
+ //
+ struct rvar
+ {
+ const variable* var;
+ const value* val;
+ size_t ver;
+ };
+
+ class rvars: public vector<rvar>
+ {
+ public:
+ pair<iterator, bool>
+ insert (const rvar& v)
+ {
+ auto i (find_if (begin (), end (),
+ [&v] (const rvar& o) {return v.var == o.var;}));
+ if (i != end ())
+ return make_pair (i, false);
+
+ push_back (v);
+ return make_pair (--end (), true);
+ }
+ };
+
+ rvars vars;
+
+ auto process = [this, &rs, &vars] (bool collect)
+ {
+ // @@ TODO: would be nice to verify no bogus variables are set (can
+ // probably only be done via the saved variables map).
+
+ for (auto p (rs.vars.lookup_namespace (var_prefix_));
+ p.first != p.second;
+ ++p.first)
+ {
+ const variable& var (p.first->first);
+
+ // This can be one of the overrides (__override, __prefix, etc),
+ // which we skip.
+ //
+ if (var.override ())
+ continue;
+
+ // What happens to version if overriden? A: appears to be still
+ // incremented!
+ //
+ const variable_map::value_data& val (p.first->second);
+
+ if (collect)
+ vars.insert (rvar {&var, nullptr, val.version});
+ else
+ {
+ auto p (vars.insert (rvar {&var, &val, 0}));
+
+ if (!p.second)
+ {
+ auto i (p.first);
+
+ if (i->ver == val.version)
+ vars.erase (i); // Unchanged.
+ else
+ i->val = &val;
+ }
+ }
+ }
+ };
+
+ // Evaluate the reflect clause.
+ //
+ istringstream is (refl);
+ is.exceptions (istringstream::failbit | istringstream::badbit);
+
+ path_name in ("<depends-reflect-clause>");
+ uint64_t il (1);
+
+ // Note: keep it active until the end (see the override detection).
+ //
+ auto df = build2::make_diag_frame (
+ [this, &refl, depends_index] (const build2::diag_record& dr)
+ {
+ // Probably safe to assume a one-line fragment contains a variable
+ // assignment.
+ //
+ if (refl.find ('\n') == string::npos)
+ dr << info << "reflect variable: " << trim (string (refl));
+ else
+ dr << info << "reflect clause:\n"
+ << trim_right (string (refl));
+
+ // If an existing source directory has been specified, then we have
+ // the manifest and so print the location of the depends value in
+ // questions.
+ //
+ if (src_root_specified_)
+ depends_location (dr, src_root_ / manifest_file, depends_index);
+ else
+ dr << info << "in depends manifest value of package "
+ << package.name;
+ });
+
+ lexer l (is, in, il /* start line */);
+ buildfile_parser p (rs.ctx,
+ dependency_var_prefixes_,
+ dependency_var_prefixes_pending);
+
+ process (true);
+ p.parse_buildfile (l, &rs, rs);
+ process (false);
+
+ // Add to the vars set the reflect variables collected previously.
+ //
+ for (const reflect_variable_value& v: reflect_)
+ {
+ // Note: go straight for the public variable pool.
+ //
+ const variable* var (rs.ctx.var_pool.find (v.name));
+ assert (var != nullptr); // Must be there (set by load()).
+
+ auto p (vars.insert (rvar {var, nullptr, 0}));
+
+ if (p.second)
+ p.first->val = rs.vars[*var].value; // Must be there (set by load()).
+ }
+
+ // Re-populate everything from the var set but try to re-use buffers as
+ // much a possible (normally we would just be appending more variables
+ // at the end).
+ //
+ reflect_.resize (vars.size ());
+
+ // Collect the config.<name>.* variables that were changed by this
+ // and previous reflect clauses.
+ //
+ for (size_t i (0); i != vars.size (); ++i)
+ {
+ const variable& var (*vars[i].var);
+ const value& val (*vars[i].val);
+
+ pair<variable_origin, lookup> ol (
+ config::origin (rs,
+ var,
+ pair<lookup, size_t> {
+ lookup {val, var, rs.vars}, 1 /* depth */}));
+
+ reflect_variable_value& v (reflect_[i]);
+ v.name = var.name;
+ v.origin = ol.first;
+
+ if (ol.first == variable_origin::override_)
+ {
+ // Detect an overridden reflect value, but allowing it if the values
+ // match (think both user/dependent and reflect trying to enable the
+ // same feature).
+ //
+ // What would be the plausible scenarios for an override?
+ //
+ // 1. Append override that adds some backend or some such to the
+ // reflect value.
+ //
+ // 2. A reflect may enable a feature based on the dependency
+ // alternative selected (e.g., I see we are using Qt6 so we might
+ // as well enable feature X). The user may want do disable it
+ // with an override.
+ //
+ // Note also that a sufficiently smart reflect clause can detect if
+ // a variable is overridden (with $config.origin()) and avoid the
+ // clash. Perhaps that should be the recommendation for reflect
+ // variables that can also plausibly be set by the user (it feels
+ // like configuration variables have the intf/impl split similar
+ // to libraries)?
+ //
+ if (val != *ol.second)
+ {
+ // See if this is a dependent or user override.
+ //
+ const package_key* d (nullptr);
+ {
+ for (size_t i (0); i != dependent_vars_.size (); ++i)
+ {
+ const string& v (dependent_vars_[i]);
+ size_t n (var.name.size ());
+ if (v.compare (0, n, var.name) == 0 && v[n] == '=')
+ {
+ d = &dependent_orgs_[i];
+ break;
+ }
+ }
+ }
+
+ diag_record dr (fail);
+
+ dr << "reflect variable " << var << " overriden by ";
+
+ if (d != nullptr)
+ dr << "dependent " << *d;
+ else
+ dr << "user configuration";
+
+ names storage;
+ dr << info << "reflect value: "
+ << serialize_cmdline (var.name, val, storage);
+
+ dr << info << (d != nullptr ? "dependent" : "user") << " value: "
+ << serialize_cmdline (var.name, *ol.second, storage);
+ }
+
+ // Skipped in load(), collect_config(), but used in print_config().
+ }
+ else
+ {
+ assert (ol.first == variable_origin::buildfile);
+
+ // Note that we keep it always untyped letting the config directives
+ // in root.build to take care of typing.
+ //
+ v.value = reverse_value (val);
+ }
+ }
+
+ // Drop the build system state since it needs reloading (some computed
+ // values in root.build may depend on the new configuration values).
+ //
+ ctx_ = nullptr;
+ }
+ catch (const build2::failed&)
+ {
+ throw failed (); // Assume the diagnostics has already been issued.
+ }
+ }
+
+ bool package_skeleton::
+ evaluate_prefer_accept (const dependency_configurations& cfgs,
+ const string& prefer,
+ const string& accept,
+ pair<size_t, size_t> indexes,
+ bool has_alt)
+ {
+ size_t depends_index (indexes.first);
+
+ assert (dependency_reflect_index_ <= depends_index);
+
+ try
+ {
+ using namespace build2;
+ using config::variable_origin;
+ using build2::fail;
+ using build2::info;
+ using build2::endf;
+
+ // Drop the state from the previous evaluation of prefer/accept.
+ //
+ if (prefer_accept_)
+ {
+ ctx_ = nullptr;
+ prefer_accept_ = nullopt;
+ }
+
+ // Drop any dependency reflect values from the previous evaluation of
+ // this clause, if any.
+ //
+ if (dependency_reflect_index_ == depends_index)
+ dependency_reflect_.resize (dependency_reflect_pending_);
+
+ // This is what needs to happen to the variables of different origins in
+ // the passed dependency configurations:
+ //
+ // default -- set as default (value::extra=1)
+ // buildfile/dependent -- set as buildfile (value::extra=2)
+ // override/user -- set as override (so cannot be overriden)
+ // undefined -- ignored
+ //
+ // Note that we set value::extra to 2 for buildfile/dependent values.
+ // This is done so that we can detect when they were set by this
+ // dependent (even if to the same value). Note that the build2 config
+ // module only treats 1 as the default value marker.
+ //
+ // Additionally, for all origins we need to typify the variables.
+ //
+ // All of this is done by load(), including removing and returning the
+ // dependency variable prefixes (config.<project>) which we later add
+ // to dependency_var_prefixes_.
+ //
+ strings dvps;
+ scope& rs (load (cfgs, &dvps, true /* defaults */));
+
+ // Evaluate the prefer clause.
+ //
+ {
+ istringstream is (prefer);
+ is.exceptions (istringstream::failbit | istringstream::badbit);
+
+ path_name in ("<depends-prefer-clause>");
+ uint64_t il (1);
+
+ auto df = build2::make_diag_frame (
+ [this, &prefer, depends_index] (const build2::diag_record& dr)
+ {
+ dr << info << "prefer clause:\n"
+ << trim_right (string (prefer));
+
+ // If an existing source directory has been specified, then we
+ // have the manifest and so print the location of the depends
+ // value in questions.
+ //
+ if (src_root_specified_)
+ depends_location (dr, src_root_ / manifest_file, depends_index);
+ else
+ dr << info << "in depends manifest value of package "
+ << package.name;
+ });
+
+ lexer l (is, in, il /* start line */);
+ buildfile_parser p (rs.ctx, dependency_var_prefixes_);
+ p.parse_buildfile (l, &rs, rs);
+
+ // Check if the dependent set any stray configuration variables.
+ //
+ for (size_t i (0); i != cfgs.size (); ++i)
+ {
+ package_configuration& cfg (cfgs[i]);
+
+ const string& ns (dvps[i]); // Parallel.
+ for (auto p (rs.vars.lookup_namespace (ns));
+ p.first != p.second;
+ ++p.first)
+ {
+ const variable& var (p.first->first);
+
+ // This can be one of the overrides (__override, __prefix, etc),
+ // which we skip.
+ //
+ if (var.override ())
+ continue;
+
+ if (cfg.find (var.name) == nullptr)
+ {
+ fail << "package " << cfg.package.name << " has no "
+ << "configuration variable " << var.name <<
+ info << var.name << " set in prefer clause of dependent "
+ << package.string ();
+ }
+ }
+ }
+ }
+
+ // Evaluate the accept condition.
+ //
+ bool r;
+ {
+ istringstream is ('(' + accept + ')');
+ is.exceptions (istringstream::failbit | istringstream::badbit);
+
+ path_name in ("<depends-accept-clause>");
+ uint64_t il (1);
+
+ auto df = build2::make_diag_frame (
+ [this, &accept, depends_index] (const build2::diag_record& dr)
+ {
+ dr << info << "accept condition: (" << accept << ")";
+
+ // If an existing source directory has been specified, then we
+ // have the manifest and so print the location of the depends
+ // value in questions.
+ //
+ if (src_root_specified_)
+ depends_location (dr, src_root_ / manifest_file, depends_index);
+ else
+ dr << info << "in depends manifest value of package "
+ << package.name;
+ });
+
+ lexer l (is, in, il /* start line */);
+ buildfile_parser p (rs.ctx, dependency_var_prefixes_);
+ value v (p.parse_eval (l, rs, rs, parser::pattern_mode::expand));
+
+ try
+ {
+ // Should evaluate to 'true' or 'false'.
+ //
+ r = build2::convert<bool> (move (v));
+ }
+ catch (const invalid_argument& e)
+ {
+ fail (build2::location (in, il)) << e << endf;
+ }
+ }
+
+ // If acceptable, update the configuration with the new values, if any.
+ //
+ // We also save the subset of values that were set by this dependent to
+ // be reflected to further clauses.
+ //
+ if (r)
+ {
+ dependency_reflect_index_ = depends_index;
+ dependency_reflect_pending_ = dependency_reflect_.size ();
+
+ for (size_t i (0); i != cfgs.size (); ++i)
+ {
+ package_configuration& cfg (cfgs[i]);
+
+ const string& ns (dvps[i]);
+ for (auto p (rs.vars.lookup_namespace (ns));
+ p.first != p.second;
+ ++p.first)
+ {
+ const variable& var (p.first->first);
+
+ if (var.override ())
+ continue;
+
+ const value& val (p.first->second);
+
+ pair<variable_origin, lookup> ol (
+ config::origin (rs,
+ var,
+ pair<lookup, size_t> {
+ lookup {val, var, rs.vars}, 1 /* depth */}));
+
+ config_variable_value& v (*cfg.find (var.name));
+
+ // An override cannot become a non-override. And a non-override
+ // cannot become an override. Except that the dependency override
+ // could be specified (only) for the dependent.
+ //
+ if (v.origin == variable_origin::override_)
+ {
+ assert (ol.first == variable_origin::override_);
+ }
+ else if (ol.first == variable_origin::override_ &&
+ v.origin != variable_origin::override_)
+ {
+ fail << "dependency override " << var.name << " specified for "
+ << "dependent " << package.string () << " but not dependency" <<
+ info << "did you mean to specify ?" << cfg.package.name
+ << " +{ " << var.name << "=... }";
+ }
+
+ switch (ol.first)
+ {
+ case variable_origin::buildfile:
+ {
+ optional<names> ns (reverse_value (val));
+
+ // If this value was set, save it as a dependency reflect.
+ //
+ if (val.extra == 0)
+ {
+ dependency_reflect_.push_back (
+ reflect_variable_value {v.name, ol.first, v.type, ns});
+ }
+
+ // Possible transitions:
+ //
+ // default/undefine -> buildfile -- override dependency default
+ // buildfile -> buildfile -- override other dependent
+ //
+ if (v.origin == variable_origin::buildfile)
+ {
+ // If unchanged, then we keep the old originating dependent
+ // (even if the value was technically "overwritten" by this
+ // dependent).
+ //
+ if (val.extra == 2 || v.value == ns)
+ break;
+ }
+ else
+ v.origin = variable_origin::buildfile;
+
+ v.value = move (ns);
+ v.dependent = package; // We are the originating dependent.
+ v.confirmed = true;
+ v.has_alternative = has_alt;
+ break;
+ }
+ case variable_origin::default_:
+ {
+ // A default can only come from a default.
+ //
+ assert (ol.first == v.origin);
+ break;
+ }
+ case variable_origin::override_:
+ {
+ // If the value was set by this dependent then we need to
+ // reflect it even if it was overridden (but as the overridden
+ // value). Note that the mere presence of the value in rs.vars
+ // is not enough to say that it was set -- it could also be
+ // the default value. But we can detect that by examining
+ // value::extra.
+ //
+ if (val.extra == 0)
+ {
+ dependency_reflect_.push_back (
+ reflect_variable_value {
+ v.name, ol.first, v.type, reverse_value (*ol.second)});
+ }
+ break;
+ }
+ case variable_origin::undefined:
+ {
+ // Not possible since we have the defined original.
+ //
+ assert (false);
+ break;
+ }
+ }
+ }
+ }
+
+ // Note that because we add it here, the following reflect clause will
+ // not be able to expand undefined values. We handle this by keeping a
+ // pending position.
+ //
+ dependency_var_prefixes_pending_ = dependency_var_prefixes_.size ();
+ dependency_var_prefixes_.insert (dependency_var_prefixes_.end (),
+ make_move_iterator (dvps.begin ()),
+ make_move_iterator (dvps.end ()));
+
+ // Note: do not drop the build system state yet since it should be
+ // reused by the following reflect clause, if any.
+ //
+ prefer_accept_ = indexes;
+ }
+ else
+ ctx_ = nullptr;
+
+ return r;
+ }
+ catch (const build2::failed&)
+ {
+ throw failed (); // Assume the diagnostics has already been issued.
+ }
+ }
+
+ bool package_skeleton::
+ evaluate_require (const dependency_configurations& cfgs,
+ const string& require,
+ pair<size_t, size_t> indexes,
+ bool has_alt)
+ {
+ size_t depends_index (indexes.first);
+
+ assert (dependency_reflect_index_ <= depends_index);
+
+ try
+ {
+ using namespace build2;
+ using config::variable_origin;
+ using build2::fail;
+ using build2::info;
+ using build2::endf;
+
+ // Drop the state from the previous evaluation of prefer/accept.
+ //
+ if (prefer_accept_)
+ {
+ ctx_ = nullptr;
+ prefer_accept_ = nullopt;
+ }
+
+ // Drop any dependency reflect values from the previous evaluation of
+ // this clause, if any.
+ //
+ if (dependency_reflect_index_ == depends_index)
+ dependency_reflect_.resize (dependency_reflect_pending_);
+
+ // A require clause can only set bool configuration variables and only
+ // to true and may not have any conditions on other configuration
+ // variables (including their origin). As a result, we don't need to set
+ // the default (or other dependent) values, but will need the type
+ // information as well as overrides. Except that the type information
+ // may not be available for system packages, so we must deal with
+ // that. See negotiate_configuration() for details.
+ //
+ strings dvps;
+ scope& rs (load (cfgs, &dvps, false /* defaults */));
+
+ // Evaluate the require clause.
+ //
+ {
+ istringstream is (require);
+ is.exceptions (istringstream::failbit | istringstream::badbit);
+
+ path_name in ("<depends-require-clause>");
+ uint64_t il (1);
+
+ auto df = build2::make_diag_frame (
+ [this, &require, depends_index] (const build2::diag_record& dr)
+ {
+ dr << info << "require clause:\n"
+ << trim_right (string (require));
+
+ // If an existing source directory has been specified, then we
+ // have the manifest and so print the location of the depends
+ // value in questions.
+ //
+ if (src_root_specified_)
+ depends_location (dr, src_root_ / manifest_file, depends_index);
+ else
+ dr << info << "in depends manifest value of package "
+ << package.name;
+ });
+
+ lexer l (is, in, il /* start line */);
+ buildfile_parser p (rs.ctx, dependency_var_prefixes_);
+ p.parse_buildfile (l, &rs, rs);
+
+ // Check for stray variables and enforce all the require restrictions
+ // (bool, set to true, etc).
+ //
+ for (size_t i (0); i != cfgs.size (); ++i)
+ {
+ package_configuration& cfg (cfgs[i]);
+
+ const string& ns (dvps[i]); // Parallel.
+ for (auto p (rs.vars.lookup_namespace (ns));
+ p.first != p.second;
+ ++p.first)
+ {
+ // Note that because we didn't set any default (or other
+ // dependent) values, all the values we see are set by this
+ // dependent.
+ //
+ const variable& var (p.first->first);
+
+ // This can be one of the overrides (__override, __prefix, etc),
+ // which we skip.
+ //
+ if (var.override ())
+ continue;
+
+ const value& val (p.first->second);
+
+ // Deal with a system package that has no type information.
+ //
+ if (!cfg.system)
+ {
+ const config_variable_value* v (cfg.find (var.name));
+
+ if (v == nullptr)
+ {
+ fail << "package " << cfg.package.name << " has no "
+ << "configuration variable " << var.name <<
+ info << var.name << " set in require clause of dependent "
+ << package.string ();
+ }
+
+ if (!v->type || *v->type != "bool")
+ {
+ fail << "configuration variable " << var.name << " is not of "
+ << "bool type" <<
+ info << var.name << " set in require clause of dependent "
+ << package.string ();
+ }
+ }
+
+ bool r;
+ if (cfg.system)
+ {
+ try
+ {
+ r = build2::convert<bool> (val);
+ }
+ catch (const invalid_argument&)
+ {
+ r = false;
+ }
+ }
+ else
+ r = cast_false<bool> (val);
+
+ if (!r)
+ {
+ fail << "configuration variable " << var.name << " is not set "
+ << "to true" <<
+ info << var.name << " set in require clause of dependent "
+ << package.string ();
+ }
+ }
+ }
+ }
+
+ // First determine if acceptable.
+ //
+ bool r (true);
+ for (size_t i (0); i != cfgs.size (); ++i)
+ {
+ package_configuration& cfg (cfgs[i]);
+
+ const string& ns (dvps[i]);
+ for (auto p (rs.vars.lookup_namespace (ns));
+ p.first != p.second;
+ ++p.first)
+ {
+ const variable& var (p.first->first);
+
+ if (var.override ())
+ continue;
+
+ const value& val (p.first->second);
+
+ // Note: could be NULL if cfg.system.
+ //
+ const config_variable_value* v (cfg.find (var.name));
+
+ // The only situation where the result would not be acceptable is if
+ // one of the values were overridden to false.
+ //
+ pair<variable_origin, lookup> ol (
+ config::origin (rs,
+ var,
+ pair<lookup, size_t> {
+ lookup {val, var, rs.vars}, 1 /* depth */}));
+
+ // An override cannot become a non-override. And a non-override
+ // cannot become an override. Except that the dependency override
+ // could be specified (only) for the dependent.
+ //
+ if (v != nullptr && v->origin == variable_origin::override_)
+ {
+ assert (ol.first == variable_origin::override_);
+ }
+ else if (ol.first == variable_origin::override_ &&
+ (v == nullptr || v->origin != variable_origin::override_))
+ {
+ fail << "dependency override " << var.name << " specified for "
+ << "dependent " << package.string () << " but not dependency" <<
+ info << "did you mean to specify ?" << cfg.package.name
+ << " +{ " << var.name << "=... }";
+ }
+
+ if (ol.first == variable_origin::override_)
+ {
+ if (cfg.system)
+ {
+ try
+ {
+ if (!build2::convert<bool> (*ol.second))
+ r = false;
+ }
+ catch (const invalid_argument&)
+ {
+ r = false;
+ }
+ }
+ else
+ {
+ if (!cast_false<bool> (*ol.second))
+ r = false;
+ }
+ }
+ }
+ }
+
+ // If acceptable, update the configuration with the new values, if any.
+ //
+ // Note that we cannot easily combine this loop with the above because
+ // we should not modify configurations if the result is not acceptable.
+ //
+ // We also save the subset of values that were set by this dependent to
+ // be reflected to further clauses.
+ //
+ if (r)
+ {
+ dependency_reflect_index_ = depends_index;
+ dependency_reflect_pending_ = dependency_reflect_.size ();
+
+ for (size_t i (0); i != cfgs.size (); ++i)
+ {
+ package_configuration& cfg (cfgs[i]);
+
+ const string& ns (dvps[i]);
+ for (auto p (rs.vars.lookup_namespace (ns));
+ p.first != p.second;
+ ++p.first)
+ {
+ const variable& var (p.first->first);
+
+ if (var.override ())
+ continue;
+
+ config_variable_value* v (cfg.find (var.name));
+
+ if (v == nullptr) // cfg.system
+ {
+ cfg.push_back (config_variable_value {var.name,
+ variable_origin::undefined,
+ {},
+ {},
+ {},
+ false,
+ false});
+
+ v = &cfg.back ();
+ }
+
+ // This value was set so save it as a dependency reflect.
+ //
+ // Note that unlike the equivalent evaluate_prefer_accept() logic,
+ // here the value cannot be the default/buildfile (since we don't
+ // set those; see the load() call above).
+ //
+ optional<names> ns (names {name ("true")});
+
+ // Note: force bool type if system.
+ //
+ dependency_reflect_.push_back (
+ reflect_variable_value {
+ v->name,
+ (v->origin == variable_origin::override_
+ ? v->origin
+ : variable_origin::buildfile),
+ cfg.system ? optional<string> ("bool") : v->type,
+ ns});
+
+ if (v->origin != variable_origin::override_)
+ {
+ // Possible transitions:
+ //
+ // default/undefine -> buildfile -- override dependency default
+ // buildfile -> buildfile -- override other dependent
+ //
+
+ if (v->origin == variable_origin::buildfile)
+ {
+ // If unchanged, then we keep the old originating dependent
+ // (even if the value was technically "overwritten" by this
+ // dependent).
+ //
+ if (v->value == ns)
+ continue;
+ }
+ else
+ v->origin = variable_origin::buildfile;
+
+ v->value = move (ns);
+ v->dependent = package; // We are the originating dependent.
+ v->confirmed = true;
+ v->has_alternative = has_alt;
+ }
+ }
+ }
+
+ dependency_var_prefixes_.insert (dependency_var_prefixes_.end (),
+ make_move_iterator (dvps.begin ()),
+ make_move_iterator (dvps.end ()));
+ }
+
+ // Drop the build system state since it needs reloading (while it may
+ // seem safe for us to keep the state since we didn't set any defaults,
+ // we may have overrides that the clause did not set, so let's drop it
+ // for good measure and also to keep things simple).
+ //
+ ctx_ = nullptr;
+
+ return r;
+ }
+ catch (const build2::failed&)
+ {
+ throw failed (); // Assume the diagnostics has already been issued.
+ }
+ }
+
+ bool package_skeleton::
+ empty_print ()
+ {
+ if (!loaded_old_config_)
+ load_old_config_impl ();
+
+ return (dependent_vars_.empty () &&
+ reflect_.empty () &&
+ find_if (config_vars_.begin (), config_vars_.end (),
+ [this] (const string& v)
+ {
+ // See print_config() for details.
+ //
+ size_t vn;
+ if (project_override (v, var_prefix_, &vn))
+ {
+ if (!develop_)
+ {
+ size_t pn (var_prefix_.size ());
+ if (v.compare (pn, vn - pn, ".develop") == 0)
+ return false;
+ }
+
+ return true;
+ }
+ return false;
+ }) == config_vars_.end ());
+ }
+
+ void package_skeleton::
+ print_config (ostream& os, const char* indent)
+ {
+ using build2::config::variable_origin;
+
+ if (!loaded_old_config_)
+ load_old_config_impl ();
+
+ auto print = [&os,
+ indent,
+ first = true] (const string& v) mutable -> ostream&
+ {
+ if (first)
+ first = false;
+ else
+ os << '\n';
+
+ os << indent << v;
+ return os;
+ };
+
+ // NOTE: see also empty_print() if changing anything here.
+
+ // @@ TODO: we could have added the package itself to "set by ..."
+ // for overriden (to the same value) reflect. But then why not
+ // do the same for dependent that is overriden by user (we could
+ // have kept them as reflect_variable_values rather than strings)?
+ // Maybe one day.
+
+ // First comes the user configuration.
+ //
+ for (size_t i (0); i != config_vars_.size (); ++i)
+ {
+ const string& v (config_vars_[i]);
+
+ size_t vn;
+ if (project_override (v, var_prefix_, &vn))
+ {
+ // To reduce the noise (e.g., during bdep-init), skip
+ // config.<project>.develop if the package doesn't use it.
+ //
+ if (!develop_)
+ {
+ size_t pn (var_prefix_.size ());
+ if (v.compare (pn, vn - pn, ".develop") == 0)
+ continue;
+ }
+
+ const char* s (nullptr);
+
+ switch (config_var_srcs_[i])
+ {
+ case config_source::user: s = "user"; break;
+ case config_source::dependent: s = "dependent"; break;
+ case config_source::reflect: assert (false); // Must never be loaded.
+ }
+
+ print (v) << " (" << (system ? "expected " : "")
+ << s << " configuration)";
+ }
+ }
+
+ // Next dependent configuration.
+ //
+ for (size_t i (0); i != dependent_vars_.size (); ++i)
+ {
+ const string& v (dependent_vars_[i]);
+ const package_key& d (dependent_orgs_[i]); // Parallel.
+
+ print (v) << " (" << (system ? "expected" : "set") << " by "
+ << d << ')';
+ }
+
+ // Finally reflect (but skip overriden).
+ //
+ for (const reflect_variable_value& v: reflect_)
+ {
+ if (v.origin == variable_origin::override_)
+ continue;
+
+ string s (serialize_cmdline (v.name, v.value));
+ print (s) << " (" << (system ? "expected" : "set") << " by "
+ << package.name << ')';
+ }
+ }
+
+ void package_skeleton::
+ load_old_config ()
+ {
+ if (!loaded_old_config_)
+ load_old_config_impl ();
+ }
+
+ pair<strings, vector<config_variable>> package_skeleton::
+ collect_config () &&
+ {
+ // NOTE: remember to update config_checksum() if changing anything here.
+
+ assert (db_ != nullptr); // Must be called only once.
+
+ using build2::config::variable_origin;
+
+ if (!loaded_old_config_)
+ load_old_config_impl ();
+
+ // Merge all the variables into a single list in the correct order
+ // and assign their sources while at it.
+ //
+ strings vars;
+ vector<config_variable> srcs;
+
+ if (size_t n = (config_vars_.size () +
+ dependent_vars_.size () +
+ reflect_.size ()))
+ {
+ // For vars we will steal the first non-empty *_vars_. But for sources
+ // reserve the space.
+ //
+ srcs.reserve (n); // At most that many.
+
+ // Return the variable name given the variable override.
+ //
+ auto var_name = [] (const string& v)
+ {
+ size_t p (v.find_first_of ("=+ \t"));
+ assert (p != string::npos);
+ return string (v, 0, p);
+ };
+
+ // Note that we assume the three sets of variables do not clash.
+ //
+
+ // First comes the user configuration.
+ //
+ if (!config_vars_.empty ())
+ {
+ // Assign the user source only to user-specified configuration
+ // variables which are project variables (i.e., names start with
+ // config.<project>).
+ //
+ size_t pn (var_prefix_.size ());
+ for (const string& v: config_vars_)
+ {
+ size_t vn;
+ if (project_override (v, var_prefix_, &vn))
+ {
+ // Skip config.<project>.develop (can potentially be passed by
+ // bdep-init) if the package doesn't use it.
+ //
+ if (!develop_ && v.compare (pn, vn - pn, ".develop") == 0)
+ continue;
+
+ string n (v, 0, vn);
+
+ // Check for a duplicate.
+ //
+ auto i (find_if (srcs.begin (), srcs.end (),
+ [&n] (const config_variable& cv)
+ {
+ return cv.name == n;
+ }));
+
+ if (i == srcs.end ())
+ srcs.push_back (config_variable {move (n), config_source::user});
+ }
+ }
+
+ vars = move (config_vars_);
+ }
+
+ // Next dependent configuration.
+ //
+ if (!dependent_vars_.empty ())
+ {
+ // These are all project variables. There should also be no duplicates
+ // by construction.
+ //
+ for (const string& v: dependent_vars_)
+ srcs.push_back (
+ config_variable {var_name (v), config_source::dependent});
+
+ if (vars.empty ())
+ vars = move (dependent_vars_);
+ else
+ {
+ vars.reserve (n);
+ vars.insert (vars.end (),
+ make_move_iterator (dependent_vars_.begin ()),
+ make_move_iterator (dependent_vars_.end ()));
+ }
+ }
+
+ // Finally reflect.
+ //
+ if (!reflect_.empty ())
+ {
+ vars.reserve (n);
+
+ // These are all project variables. There should also be no duplicates
+ // by construction (see evaluate_reflect()).
+ //
+ for (const reflect_variable_value& v: reflect_)
+ {
+ if (v.origin == variable_origin::override_)
+ continue;
+
+ vars.push_back (serialize_cmdline (v.name, v.value));
+ srcs.push_back (config_variable {v.name, config_source::reflect});
+ }
+ }
+ }
+
+ ctx_ = nullptr; // Free.
+ db_ = nullptr;
+
+ return make_pair (move (vars), move (srcs));
+ }
+
+ string package_skeleton::
+ config_checksum ()
+ {
+ // Note: this is parallel to collect_config() logic but is not destructive.
+
+ assert (db_ != nullptr); // Must be called before collect_config().
+
+ if (!loaded_old_config_)
+ load_old_config_impl ();
+
+ sha256 cs;
+
+ if (!config_vars_.empty ())
+ {
+ cstrings vs;
+ size_t pn (var_prefix_.size ());
+ for (const string& v: config_vars_)
+ {
+ size_t vn;
+ if (project_override (v, var_prefix_, &vn))
+ {
+ // Skip config.<project>.develop (can potentially be passed by
+ // bdep-init) if the package doesn't use it.
+ //
+ if (develop_ || v.compare (pn, vn - pn, ".develop") != 0)
+ cs.append (v);
+ }
+ }
+ }
+
+ if (!dependent_vars_.empty ())
+ {
+ for (const string& v: dependent_vars_)
+ cs.append (v);
+ }
+
+ if (!reflect_.empty ())
+ {
+ for (const reflect_variable_value& v: reflect_)
+ {
+ if (v.origin != build2::config::variable_origin::override_)
+ cs.append (serialize_cmdline (v.name, v.value));
+ }
+ }
+
+ return !cs.empty () ? cs.string () : string ();
+ }
+
+ const strings& package_skeleton::
+ merge_cmd_vars (const strings& dependent_vars,
+ const strings& dependency_vars,
+ bool cache)
+ {
+ // Merge variable overrides (note that the order is important). See also a
+ // custom/optimized version in load_old_config_impl().
+ //
+ if (!cache || !cmd_vars_cache_)
+ {
+ const strings& vs1 (build2_cmd_vars);
+ const strings& vs2 (config_vars_);
+ const strings& vs3 (dependent_vars); // Should not override.
+ const strings& vs4 (dependency_vars); // Should not override.
+
+ // Try to reuse both vector and string buffers.
+ //
+ cmd_vars_.resize (
+ 1 + vs1.size () + vs2.size () + vs3.size () + vs4.size ());
+
+ size_t i (0);
+ {
+ string& v (cmd_vars_[i++]);
+
+ // If the package is being disfigured, then don't load config.build at
+ // all. Otherwise, disfigure all package variables (config.<name>**).
+ //
+ // Note that this semantics must be consistent with how we actually
+ // configure the package in pkg_configure().
+ //
+ if (disfigure_)
+ v = "config.config.unload=true";
+ else
+ {
+ // Note: must be quoted to preserve the pattern.
+ //
+ v = "config.config.disfigure='config.";
+ v += package.name.variable ();
+ v += "**'";
+ }
+ }
+
+ for (const string& v: vs1) cmd_vars_[i++] = v;
+ for (const string& v: vs2) cmd_vars_[i++] = v;
+ for (const string& v: vs3) cmd_vars_[i++] = v;
+ for (const string& v: vs4) cmd_vars_[i++] = v;
+
+ cmd_vars_cache_ = cache;
+ }
+
+ return cmd_vars_;
+ }
+
+ void package_skeleton::
+ load_old_config_impl ()
+ {
+ assert (!loaded_old_config_ && ctx_ == nullptr);
+
+ try
+ {
+ using namespace build2;
+ using build2::info;
+
+ // This load that must be done without config.config.disfigure. Also, it
+ // would be nice to optimize for the common case where the only load is
+ // to get the old configuration (e.g., config.*.develop) as part of
+ // collect_config(). So instead of calling merge_cmd_vars() we will do
+ // our own (but consistent) thing.
+ //
+ const strings* cmd_vars (nullptr);
+ {
+ assert (!cmd_vars_cache_); // Sanity check (we are always first).
+
+ const strings& vs1 (build2_cmd_vars);
+ const strings& vs2 (config_vars_);
+
+ if (!disfigure_)
+ cmd_vars = (vs2.empty () ? &vs1 : vs1.empty () ? &vs2 : nullptr);
+
+ if (cmd_vars == nullptr)
+ {
+ // Note: the order is important (see merge_cmd_vars()).
+ //
+ cmd_vars_.reserve ((disfigure_ ? 1 : 0) + vs1.size () + vs2.size ());
+
+ // If the package is being disfigured, then don't load config.build
+ // at all.
+ //
+ if (disfigure_)
+ cmd_vars_.push_back ("config.config.unload=true");
+
+ cmd_vars_.insert (cmd_vars_.end (), vs1.begin (), vs1.end ());
+ cmd_vars_.insert (cmd_vars_.end (), vs2.begin (), vs2.end ());
+
+ cmd_vars = &cmd_vars_;
+ }
+ }
+
+ scope* rs;
+ {
+ auto df = build2::make_diag_frame (
+ [this] (const build2::diag_record& dr)
+ {
+ dr << info << "while loading build system skeleton of package "
+ << package.name;
+ });
+
+ rs = bootstrap (*this, *cmd_vars, true /* old */)->second.front ();
+
+ // Load project's root.build.
+ //
+ load_root (*rs);
+ }
+
+ // Note: go straight for the public variable pool.
+ //
+ if (const variable* var = rs->ctx.var_pool.find (var_prefix_ + ".develop"))
+ {
+ // Use the fact that the variable is typed as a proxy for it being
+ // defined with config directive (the more accurate way would be via
+ // the config module's saved variables map).
+ //
+ develop_ = (var->type != nullptr);
+ }
+
+ // @@ TODO: should we also verify user-specified project configuration
+ // variables are not bogus? But they could be untyped...
+ //
+ // Also, build2 warns about unused variables being dropped.
+ //
+ // Note that currently load_old_config_impl() is disabled unless
+ // there is a config.*.develop variable or we were asked to load
+ // dependent configuration; see package_skeleton ctor.
+
+ // Extract and merge old user and/or dependent configuration variables
+ // from config.build (or equivalent) into config_vars.
+ //
+ if (config_srcs_ != nullptr)
+ {
+ assert (!disfigure_);
+
+ auto i (config_vars_.begin ()); // Insert position, see below.
+ auto j (config_var_srcs_.begin ()); // Insert position, see below.
+
+ names storage;
+ for (const config_variable& v: *config_srcs_)
+ {
+ if (!(((load_config_flags & load_config_user) != 0 &&
+ v.source == config_source::user) ||
+ ((load_config_flags & load_config_dependent) != 0 &&
+ v.source == config_source::dependent)))
+ continue;
+
+ using config::variable_origin;
+
+ pair<variable_origin, lookup> ol (config::origin (*rs, v.name));
+
+ switch (ol.first)
+ {
+ case variable_origin::override_:
+ {
+ // Already in config_vars.
+ //
+ // @@ TODO: theoretically, this could be an append/prepend
+ // override(s) and to make this work correctly we would need
+ // to replace them with an assign override with the final
+ // value. Maybe one day.
+ //
+ break;
+ }
+ case variable_origin::buildfile:
+ {
+ // Doesn't really matter where we add them though conceptually
+ // feels like old should go before new (and in the original
+ // order).
+ //
+ i = config_vars_.insert (
+ i,
+ serialize_cmdline (v.name, *ol.second, storage)) + 1;
+
+ j = config_var_srcs_.insert (j, v.source) + 1;
+ break;
+ }
+ case variable_origin::undefined:
+ case variable_origin::default_:
+ {
+ // Old user configuration no longer in config.build. We could
+ // complain but that feels overly drastic. Seeing that we will
+ // recalculate the new set of config variable sources, let's
+ // just ignore this (we could issue a warning, but who knows how
+ // many times it will be issued with all this backtracking).
+ //
+ break;
+ }
+ }
+ }
+ }
+
+ loaded_old_config_ = true;
+
+ if (old_src_root_.empty ())
+ verified_ = true; // Managed to load without errors.
+
+ ctx_ = nullptr;
+ }
+ catch (const build2::failed&)
+ {
+ throw failed (); // Assume the diagnostics has already been issued.
+ }
+ }
+
+ build2::scope& package_skeleton::
+ load (const dependency_configurations& cfgs, strings* dvps, bool defaults)
+ {
+ if (ctx_ != nullptr)
+ {
+ // We have to reload if there is any dependency configuration.
+ //
+ if (cfgs.empty ())
+ return *rs_;
+
+ ctx_ = nullptr;
+ }
+
+ if (!loaded_old_config_)
+ load_old_config_impl ();
+
+ try
+ {
+ using namespace build2;
+ using build2::info;
+ using build2::config::variable_origin;
+
+ // If we have any dependency configurations, then here we need to add
+ // dependency configuration variables with the override origin to the
+ // command line overrides (see evaluate_prefer_accept() for details).
+ // While at it, handle dependency variable prefixes.
+ //
+ strings dependency_vars;
+ for (const package_configuration& cfg: cfgs)
+ {
+ for (const config_variable_value& v: cfg)
+ {
+ if (v.origin == variable_origin::override_)
+ dependency_vars.push_back (v.serialize_cmdline ());
+ }
+
+ string p ("config." + cfg.package.name.variable ());
+
+ auto i (find (dependency_var_prefixes_.begin (),
+ dependency_var_prefixes_.end (),
+ p));
+ if (i != dependency_var_prefixes_.end ())
+ dependency_var_prefixes_.erase (i);
+
+ dvps->push_back (move (p));
+ }
+
+ // If there aren't any, then we can reuse already merged cmd_vars (they
+ // don't change during evaluate_*() calls except for the dependency
+ // overrides case).
+ //
+ const strings& cmd_vars (
+ merge_cmd_vars (dependent_vars_,
+ dependency_vars,
+ dependency_vars.empty () /* cache */));
+
+ auto df = build2::make_diag_frame (
+ [this] (const build2::diag_record& dr)
+ {
+ dr << info << "while loading build system skeleton of package "
+ << package.name;
+ });
+
+ auto rsi (bootstrap (*this, cmd_vars));
+ scope& rs (*rsi->second.front ());
+
+ // Load project's root.build as well as potentially accumulated reflect
+ // variables.
+ //
+ // If we have the accumulated reflect variables, wedge them just before
+ // loading root.build (but after initializing config which may load
+ // config.build and which we wish to override).
+ //
+ // Note that the plan for non-external packages is to extract the
+ // configuration and then load it with config.config.load and this
+ // approach should work for that case too.
+ //
+ // This is also where we set dependency configuration variables with the
+ // default and buildfile origins and typify all dependency variables
+ // (see evaluate_prefer_accept() for details).
+ //
+ function<void (parser&)> pre;
+
+ struct data
+ {
+ scope& rs;
+ const dependency_configurations& cfgs;
+ bool defaults;
+ } d {rs, cfgs, defaults};
+
+ if (!cfgs.empty () ||
+ !reflect_.empty () ||
+ !dependency_reflect_.empty ())
+ {
+ pre = [this, &d] (parser&)
+ {
+ scope& rs (d.rs);
+
+ auto insert_var = [&rs] (const string& name,
+ const optional<string>& type)
+ -> const variable&
+ {
+ const value_type* vt (nullptr);
+ if (type)
+ {
+ vt = parser::find_value_type (&rs, *type);
+ assert (vt != nullptr);
+ }
+
+ // Note: go straight for the public variable pool.
+ //
+ return rs.var_pool (true /* public */).insert (name, vt);
+ };
+
+ for (const reflect_variable_value& v: reflect_)
+ {
+ if (v.origin == variable_origin::override_)
+ continue;
+
+ const variable& var (insert_var (v.name, v.type)); // Note: untyped.
+ value& val (rs.assign (var));
+
+ if (v.value)
+ val.assign (names (*v.value), &var);
+ else
+ val = nullptr;
+ }
+
+ // Note that for now we don't bother setting overridden reflect
+ // values as overrides. It seems the only reason to go through the
+ // trouble would be to get the accurate $origin() result. But basing
+ // any decisions on whether the reflect value was overridden or not
+ // seems far fetched.
+ //
+ for (const reflect_variable_value& v: dependency_reflect_)
+ {
+ const variable& var (insert_var (v.name, v.type));
+ value& val (rs.assign (var));
+
+ if (v.value)
+ val.assign (names (*v.value), &var);
+ else
+ val = nullptr;
+ }
+
+ for (const package_configuration& cfg: d.cfgs)
+ {
+ for (const config_variable_value& v: cfg)
+ {
+ const variable& var (insert_var (v.name, v.type));
+
+ switch (v.origin)
+ {
+ case variable_origin::default_:
+ case variable_origin::buildfile:
+ {
+ if (d.defaults)
+ {
+ auto& val (
+ static_cast<variable_map::value_data&> (
+ rs.assign (var)));
+
+ if (v.value)
+ val.assign (names (*v.value), &var);
+ else
+ val = nullptr;
+
+ val.extra = v.origin == variable_origin::default_ ? 1 : 2;
+ }
+ break;
+ }
+ case variable_origin::undefined:
+ case variable_origin::override_: break;
+ }
+ }
+ }
+ };
+ }
+
+ load_root (rs, pre);
+
+ setup_base (rsi,
+ out_root_.empty () ? src_root_ : out_root_,
+ src_root_);
+
+ rs_ = &rs;
+ return rs;
+ }
+ catch (const build2::failed&)
+ {
+ throw failed (); // Assume the diagnostics has already been issued.
+ }
+ }
+
+ // Create the build context.
+ //
+ static void
+ create_context (package_skeleton& skl, const strings& cmd_vars)
+ {
+ assert (skl.db_ != nullptr && skl.ctx_ == nullptr);
+
+ // Initialize the build system.
+ //
+ if (!build2_sched.started ())
+ build2_init (*skl.co_);
+
+ try
+ {
+ using namespace build2;
+ using build2::fail;
+ using build2::endf;
+
+ // Create build context.
+ //
+ skl.ctx_.reset (
+ new context (build2_sched,
+ build2_mutexes,
+ build2_fcache,
+ nullopt /* match_only */, // Shouldn't matter.
+ false /* no_external_modules */,
+ false /* dry_run */, // Shouldn't matter.
+ false /* no_diag_buffer */, // Shouldn't matter.
+ false /* keep_going */, // Shouldnt' matter.
+ cmd_vars));
+ }
+ catch (const build2::failed&)
+ {
+ throw failed (); // Assume the diagnostics has already been issued.
+ }
+ }
+
+ // Bootstrap the package skeleton.
+ //
+ static build2::scope_map::iterator
+ bootstrap (package_skeleton& skl, const strings& cmd_vars, bool old)
+ {
+ assert (skl.db_ != nullptr &&
+ skl.ctx_ == nullptr &&
+ skl.available != nullptr);
+
+ // The overall plan is as follows:
+ //
+ // 0. Create filesystem state if necessary (could have been created by
+ // another instance, e.g., during simulation).
+ //
+ // 1. Bootstrap the package skeleton.
+ //
+ // Creating a new context is not exactly cheap (~1.2ms debug, 0.08ms
+ // release) so we could try to re-use it by cleaning all the scopes other
+ // than the global scope (and probably some other places, like var pool).
+ // But we will need to carefully audit everything to make sure we don't
+ // miss anything (like absolute scope variable overrides being lost). So
+ // maybe, one day, if this really turns out to be a performance issue.
+
+ // Create the skeleton filesystem state, if it doesn't exist yet.
+ //
+ if (old && skl.old_src_root_.empty ())
+ old = false;
+
+ dir_path& skl_src_root (old ? skl.old_src_root_ : skl.src_root_);
+ dir_path& skl_out_root (old ? skl.old_out_root_ : skl.out_root_);
+
+ if (!skl.created_)
+ {
+ const available_package& ap (*skl.available);
+
+ // Note that we create the skeleton directories in the skeletons/
+ // subdirectory of the configuration temporary directory to make sure
+ // they never clash with other temporary subdirectories (git
+ // repositories, etc).
+ //
+ // Note: for old src/out, everything should already exist.
+ //
+ if (!old && (skl_src_root.empty () || skl_out_root.empty ()))
+ {
+ // Cannot be specified if src_root_ is unspecified.
+ //
+ assert (skl_out_root.empty ());
+
+ // Note that only configurations which can be used as repository
+ // information sources has the temporary directory facility
+ // pre-initialized (see pkg-build.cxx for details). Thus, we may need
+ // to initialize it ourselves.
+ //
+ const dir_path& c (skl.db_->config_orig);
+ auto i (tmp_dirs.find (c));
+
+ if (i == tmp_dirs.end ())
+ {
+ init_tmp (c);
+
+ i = tmp_dirs.find (c);
+ }
+
+ // Make sure the source and out root directories, if set, are absolute
+ // and normalized.
+ //
+ // Note: can never fail since the temporary directory should already
+ // be created and so its path should be valid.
+ //
+ dir_path d (normalize (i->second, "temporary directory"));
+
+ d /= "skeletons";
+ d /= skl.package.name.string () + '-' + ap.version.string ();
+
+ if (skl_src_root.empty ())
+ skl_src_root = move (d); // out_root_ is the same.
+ else
+ skl_out_root = move (d); // Don't even need to create it.
+ }
+
+ if (!exists (skl_src_root))
+ {
+ assert (!old); // An old package version cannot not exist.
+
+ // Create the buildfiles.
+ //
+ // Note that it's probably doesn't matter which naming scheme to use
+ // for the buildfiles, unless in the future we allow specifying
+ // additional files.
+ //
+ {
+ bool an (*ap.alt_naming);
+
+ path bf (skl_src_root /
+ (an ? alt_bootstrap_file : std_bootstrap_file));
+
+ mk_p (bf.directory ());
+
+ // Save the {bootstrap,root}.build files.
+ //
+ auto save = [] (const string& s, const path& f)
+ {
+ try
+ {
+ ofdstream os (f);
+ os << s;
+ os.close ();
+ }
+ catch (const io_error& e)
+ {
+ fail << "unable to write to " << f << ": " << e;
+ }
+ };
+
+ save (*ap.bootstrap_build, bf);
+
+ if (ap.root_build)
+ save (*ap.root_build,
+ skl_src_root / (an ? alt_root_file : std_root_file));
+
+ for (const buildfile& f: ap.buildfiles)
+ {
+ path p (skl_src_root /
+ (an ? alt_build_dir : std_build_dir) /
+ f.path);
+
+ p += ".";
+ p += (an ? alt_build_ext : std_build_ext);
+
+ mk_p (p.directory ());
+
+ save (f.content, p);
+ }
+ }
+
+ // Create the manifest file containing the bare minimum of values
+ // which can potentially be required to load the build system state
+ // (i.e., either via the version module or manual version extraction).
+ //
+ {
+ package_manifest m;
+ m.name = skl.package.name;
+ m.version = ap.version;
+
+ // Note that there is no guarantee that the potential build2
+ // constraint has already been verified. Thus, we also serialize the
+ // build2 dependency value, letting the version module verify the
+ // constraint.
+ //
+ // Also note that the resulting file is not quite a valid package
+ // manifest, since it doesn't contain all the required values
+ // (summary, etc). It, however, is good enough for build2 which
+ // doesn't perform exhaustive manifest validation.
+ //
+ m.dependencies.reserve (ap.dependencies.size ());
+ for (const dependency_alternatives_ex& das: ap.dependencies)
+ {
+ // Skip the the special (inverse) test dependencies.
+ //
+ if (!das.type)
+ m.dependencies.push_back (das);
+ }
+
+ path mf (skl_src_root / manifest_file);
+
+ try
+ {
+ ofdstream os (mf);
+ manifest_serializer s (os, mf.string ());
+ m.serialize (s);
+ os.close ();
+ }
+ catch (const manifest_serialization& e)
+ {
+ // We shouldn't be creating a non-serializable manifest, since
+ // it's crafted from the parsed values.
+ //
+ assert (false);
+
+ fail << "unable to serialize " << mf << ": " << e.description;
+ }
+ catch (const io_error& e)
+ {
+ fail << "unable to write to " << mf << ": " << e;
+ }
+ }
+ }
+
+ if (!old)
+ skl.created_ = true;
+ }
+
+ try
+ {
+ using namespace build2;
+ using build2::fail;
+ using build2::endf;
+
+ // Create the build context.
+ //
+ create_context (skl, cmd_vars);
+ context& ctx (*skl.ctx_);
+
+ // This is essentially a subset of the steps we perform in b.cxx. See
+ // there for more detailed comments.
+ //
+ scope& gs (ctx.global_scope.rw ());
+
+ const meta_operation_info& mif (mo_perform);
+ const operation_info& oif (op_update);
+
+ ctx.current_mname = mif.name;
+ ctx.current_oname = oif.name;
+
+ gs.assign (ctx.var_build_meta_operation) = ctx.current_mname;
+
+ // Use the build mode to signal this is a package skeleton load.
+ //
+ gs.assign (*ctx.var_pool.find ("build.mode")) = "skeleton";
+
+ // Note that it's ok for out_root to not exist (external package).
+ //
+ const dir_path& src_root (skl_src_root);
+ const dir_path& out_root (skl_out_root.empty ()
+ ? skl_src_root
+ : skl_out_root);
+
+ auto rsi (create_root (ctx, out_root, src_root));
+ scope& rs (*rsi->second.front ());
+
+ // Note: we know this project hasn't yet been bootstrapped.
+ //
+ optional<bool> altn;
+ value& v (bootstrap_out (rs, altn));
+
+ if (!v)
+ v = src_root;
+ else
+ {
+ // If the package directory was moved, then it's possible we will have
+ // src-root.build with an old src_root value. Presumably this will
+ // cause the package to be re-configured and so ignoring the old value
+ // here should be ok. Note that the outdated src-root.build can also
+ // mess up subproject discovery in create_bootstrap_outer() but we
+ // omit that part.
+ //
+ if (cast<dir_path> (v) != src_root)
+ v = src_root;
+ }
+
+ setup_root (rs, false /* forwarded */);
+
+ bootstrap_pre (rs, altn);
+ bootstrap_src (rs, altn,
+ skl.db_->config.relative (out_root) /* amalgamation */,
+ false /* subprojects */);
+
+
+ // Omit discovering amalgamation's subprojects (i.e., all the packages
+ // in the configuration). Besides being a performance optimization, this
+ // also sidesteps the issue of outdated src-root.build (see above).
+ //
+ create_bootstrap_outer (rs, false /* subprojects */);
+ bootstrap_post (rs);
+
+ assert (mif.meta_operation_pre == nullptr);
+ ctx.current_meta_operation (mif);
+
+ ctx.enter_project_overrides (rs, out_root, ctx.var_overrides);
+
+ return rsi;
+ }
+ catch (const build2::failed&)
+ {
+ throw failed (); // Assume the diagnostics has already been issued.
+ }
+ }
+}
diff --git a/bpkg/package-skeleton.hxx b/bpkg/package-skeleton.hxx
new file mode 100644
index 0000000..947522e
--- /dev/null
+++ b/bpkg/package-skeleton.hxx
@@ -0,0 +1,400 @@
+// file : bpkg/package-skeleton.hxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#ifndef BPKG_PACKAGE_SKELETON_HXX
+#define BPKG_PACKAGE_SKELETON_HXX
+
+#include <libbuild2/forward.hxx>
+
+#include <bpkg/types.hxx>
+#include <bpkg/utility.hxx>
+
+#include <bpkg/package.hxx>
+#include <bpkg/package-configuration.hxx>
+#include <bpkg/common-options.hxx>
+
+namespace bpkg
+{
+ // A build system skeleton of a package used to evaluate buildfile clauses
+ // during dependency resolution (enable, reflect, require or prefer/accept).
+ //
+ class package_skeleton
+ {
+ public:
+ // If the package is system, then its available package should be NULL if
+ // it doesn't match the system package version "close enough" to be usable
+ // as the source of its configuration information (types, defaults). If it
+ // is NULL, then the skeleton can only be used to print and collect the
+ // configuration information.
+ //
+ // If the package is being reconfigured (rather than up/downgraded), then
+ // the existing package source and output root directories (src_root and
+ // out_root) need to be specified (as absolute and normalized). Otherwise,
+ // if the package is external, then the existing package source root
+ // directory needs to be specified (as absolute and normalized). In this
+ // case, if output root is specified (as absolute and normalized; normally
+ // <config-dir>/<package-name>), then it's used as is. Otherwise, an empty
+ // skeleton directory is used as output root.
+ //
+ // If the package is neither being reconfigured nor is external, then none
+ // of the root directories should be specified.
+ //
+ // If the package is configured as source and the user and/or dependent
+ // configuration is requested to be loaded from config.build, then the
+ // existing package old source and output root directories (old_src_root
+ // and old_out_root) need to be specified (as absolute and normalized). If
+ // specified, they are used instead of package source and output root
+ // directories to load the current user and/or dependent configuration.
+ // The idea here is that during package upgrade/downgrade, we want to load
+ // the old configuration from the old version's src/out but then continue
+ // evaluating clauses using the new version's src/out.
+ //
+ // The disfigure argument should indicate whether the package is being
+ // reconfigured from scratch (--disfigure).
+ //
+ // The config_vars argument contains configuration variables specified by
+ // the user in this bpkg execution. Optional config_srcs is used to
+ // extract (from config.build or equivalent) configuration variables
+ // specified by the user in previous bpkg executions. It should be NULL if
+ // this is the first build of the package. The extracted variables are
+ // merged with config_vars and the combined result is returned by
+ // collect_config() below.
+ //
+ // @@ TODO: speaking of the "config.build or equivalent" part, the
+ // equivalent is likely to be extracted configuration (probably saved
+ // to file in tmp somewhere) that we will load with config.config.load.
+ // It doesn't seem like a good idea to pass it as part of config_vars
+ // (because sometimes we may need to omit it) so most likely it will be
+ // passed as a separate arguments (likely a file path).
+ //
+ // Note that the options, database, and config_srcs are expected to
+ // outlive this object.
+ //
+ // Note also that this creates an "unloaded" skeleton and is therefore
+ // relatively cheap.
+ //
+ package_skeleton (const common_options& co,
+ package_key,
+ bool system,
+ shared_ptr<const available_package>,
+ strings config_vars,
+ bool disfigure,
+ const vector<config_variable>* config_srcs,
+ optional<dir_path> src_root,
+ optional<dir_path> out_root,
+ optional<dir_path> old_src_root,
+ optional<dir_path> old_out_root,
+ uint16_t load_config_flags);
+
+ package_key package;
+ bool system;
+ shared_ptr<const available_package> available;
+
+ // Load package (old) configuration flags.
+ //
+ uint16_t load_config_flags;
+
+ static const uint16_t load_config_user = 0x1;
+ static const uint16_t load_config_dependent = 0x2;
+
+ // The following functions should be called in the following sequence
+ // (* -- zero or more, ? -- zero or one):
+ //
+ // * reload_defaults() | verify_sensible()
+ // ? dependent_config()
+ // * evaluate_*()
+ // * empty() | print_config()
+ // * config_checksum()
+ // collect_config()
+ //
+ // Note that the load_old_config() function can be called at eny point
+ // before collect_config() (and is called implicitly by most other
+ // functions).
+ //
+ // Note that a copy of the skeleton is expected to continue with the
+ // sequence rather than starting from scratch, unless reset() is called.
+ //
+ public:
+ // Reload the default values and type information for configuration
+ // variables using the values with the buildfile origin as a "tentative"
+ // dependent configuration.
+ //
+ void
+ reload_defaults (package_configuration&);
+
+ // Load overrides for a system package without skeleton info. Note that
+ // this is done in an ad hoc manner and only to support evaluate_require()
+ // semantics (see the implementation for details).
+ //
+ void
+ load_overrides (package_configuration&);
+
+ // Verify the specified "tentative" dependent configuration is sensible,
+ // that is, acceptable to the dependency itself. If it is not, then the
+ // second half of the result contains the diagnostics.
+ //
+ pair<bool, string>
+ verify_sensible (const package_configuration&);
+
+ // Incorporate the "final" dependent configuration into subsequent
+ // evaluations. Dependent configuration variables are expected not to
+ // clash with user.
+ //
+ void
+ dependent_config (const package_configuration&);
+
+ // For the following evaluate_*() functions assume that the clause belongs
+ // to the dependency alternative specified as a pair of indexes (depends
+ // value index and alternative index).
+
+ // Evaluate the enable clause.
+ //
+ bool
+ evaluate_enable (const string&, pair<size_t, size_t>);
+
+ // Evaluate the reflect clause.
+ //
+ void
+ evaluate_reflect (const string&, pair<size_t, size_t>);
+
+ // Evaluate the prefer/accept or require clauses on the specified
+ // dependency configurations (serves as both input and output).
+ //
+ // Return true is acceptable and false otherwise. If acceptable, the
+ // passed configuration is updated with new values, if any.
+ //
+ using dependency_configurations =
+ small_vector<reference_wrapper<package_configuration>, 1>;
+
+ bool
+ evaluate_prefer_accept (const dependency_configurations&,
+ const string&, const string&, pair<size_t, size_t>,
+ bool has_alternative);
+
+ bool
+ evaluate_require (const dependency_configurations&,
+ const string&, pair<size_t, size_t>,
+ bool has_alternative);
+
+ // Reset the skeleton to the start of the call sequence.
+ //
+ // Note that this function cannot be called after collect_config().
+ //
+ void
+ reset ();
+
+ // Return true if there are no accumulated *project* configuration
+ // variables that will be printed by print_config().
+ //
+ bool
+ empty_print ();
+
+ // Print the accumulated *project* configuration variables as command line
+ // overrides one per line with the specified indentation.
+ //
+ void
+ print_config (ostream&, const char* indent);
+
+ // Load the package's old configuration, unless it is already loaded.
+ //
+ void
+ load_old_config ();
+
+ // Return the accumulated configuration variables (first) and project
+ // configuration variable sources (second). Note that the arrays are not
+ // necessarily parallel (config_vars may contain non-project variables).
+ //
+ // Note that the dependent and reflect variables are merged with
+ // config_vars/config_srcs and should be used instead rather than in
+ // addition to config_vars.
+ //
+ // Note also that this should be the final call on this object.
+ //
+ pair<strings, vector<config_variable>>
+ collect_config () &&;
+
+ // Return the checksum of the project configuration variables that will be
+ // returned by the collect_config() function call.
+ //
+ string
+ config_checksum ();
+
+ // Implementation details.
+ //
+ public:
+ // We have to define these because context is forward-declared. Also, copy
+ // constructor has some special logic.
+ //
+ ~package_skeleton ();
+ package_skeleton (package_skeleton&&) noexcept;
+ package_skeleton& operator= (package_skeleton&&) noexcept;
+
+ package_skeleton (const package_skeleton&);
+ package_skeleton& operator= (const package_skeleton&) = delete;
+
+ private:
+ // Load old user and/or dependent configuration variables from
+ // config.build (or equivalent) and merge them into config_vars_ and
+ // config_var_srcs_. Also verify new user configuration already in
+ // config_vars_ makes sense.
+ //
+ // This should be done before any attempt to load the configuration with
+ // config.config.disfigure and, if this did not happen, inside
+ // collect_config() (since the package will be reconfigured with
+ // config.config.disfigure).
+ //
+ void
+ load_old_config_impl ();
+
+ // (Re)load the build system state.
+ //
+ // Call this function before evaluating every clause.
+ //
+ // If dependency configurations are specified, then typify the variables
+ // and set their values. If defaults is false, then only typify the
+ // variables and set overrides without setting the default/buildfile
+ // values. Note that buildfile values have value::extra set to 2. While
+ // at it, also remove from dependency_var_prefixes_ and add to
+ // dependency_var_prefixes variable prefixes (config.<project>) for
+ // the passed dependencies.
+ //
+ build2::scope&
+ load (const dependency_configurations& = {},
+ strings* dependency_var_prefixes = nullptr,
+ bool defaults = true);
+
+ // Merge command line variable overrides into one list (normally to be
+ // passed to bootstrap()).
+ //
+ // If cache is true, then assume the result can be reused on subsequent
+ // calls.
+ //
+ const strings&
+ merge_cmd_vars (const strings& dependent_vars,
+ const strings& dependency_vars = {},
+ bool cache = false);
+
+ // Implementation details (public for bootstrap()).
+ //
+ public:
+ // NOTE: remember to update move/copy constructors!
+ //
+ const common_options* co_;
+ database* db_;
+
+ string var_prefix_; // config.<project>
+
+ strings config_vars_;
+
+ // Configuration sources for variables in config_vars_ (parallel). Can
+ // only contain config_source::{user,dependent} entries (see
+ // load_old_config_impl() for details).
+ //
+ vector<config_source> config_var_srcs_;
+
+ bool disfigure_;
+ const vector<config_variable>* config_srcs_; // NULL if nothing to do or
+ // already done.
+
+ dir_path src_root_; // Must be absolute and normalized.
+ dir_path out_root_; // If empty, the same as src_root_.
+
+ // True if the existing source root directory has been specified.
+ //
+ // Note that if that's the case, we can use the manifest file this
+ // directory contains for diagnostics.
+ //
+ bool src_root_specified_ = false;
+
+ // If specified, are used instead of {src,out}_root_ for loading of the
+ // project configuration variables.
+ //
+ dir_path old_src_root_;
+ dir_path old_out_root_;
+
+ bool created_ = false;
+ bool verified_ = false;
+ bool loaded_old_config_;
+ bool develop_ = true; // Package has config.*.develop.
+
+ unique_ptr<build2::context> ctx_;
+ build2::scope* rs_ = nullptr;
+
+ // Storage for merged build2_cmd_vars and config_vars_ and extra overrides
+ // (like config.config.disfigure). If cache is true, then the existing
+ // content can be reused.
+ //
+ strings cmd_vars_;
+ bool cmd_vars_cache_ = false;
+
+ strings dependent_vars_; // Dependent variable overrides.
+ vector<package_key> dependent_orgs_; // Dependent originators (parallel).
+
+ // Reflect variable value storage. Used for both real reflect and
+ // dependency reflect.
+ //
+ struct reflect_variable_value
+ {
+ string name;
+ build2::config::variable_origin origin;
+ optional<string> type;
+ optional<build2::names> value;
+ };
+
+ class reflect_variable_values: public vector<reflect_variable_value>
+ {
+ public:
+ const reflect_variable_value*
+ find (const string& name)
+ {
+ auto i (find_if (begin (), end (),
+ [&name] (const reflect_variable_value& v)
+ {
+ return v.name == name;
+ }));
+ return i != end () ? &*i : nullptr;
+ }
+ };
+
+ reflect_variable_values reflect_; // Reflect variables.
+
+ // Dependency configuration variables set by the prefer/require clauses
+ // and that should be reflected in subsequent clauses.
+ //
+ // The same prefer/require clause could be re-evaluated multiple times in
+ // which case the previous dependency reflect values from this clause (but
+ // not from any previous clauses) should be dropped. This is achieved by
+ // keeping track of the depends_index for the most recently evaluated
+ // prefer/require clause along with the position of the first element that
+ // was added by this clause. Note also that this logic does the right
+ // thing if we move to a different dependency alternative withing the same
+ // depends value.
+ //
+ reflect_variable_values dependency_reflect_;
+ size_t dependency_reflect_index_ = 0;
+ size_t dependency_reflect_pending_ = 0;
+
+ // List of variable prefixes (config.<project>) of all known dependencies.
+ //
+ // This information is used to detect and diagnose references to undefined
+ // dependency configuration variables (for example, those that were not
+ // set and therefore not reflected). The pending index is used to ignore
+ // the entries added by the last evaluate_prefer_accept() in the following
+ // reflect clause (see prefer_accept_ below for details).
+ //
+ strings dependency_var_prefixes_;
+ size_t dependency_var_prefixes_pending_ = 0;
+
+ // Position of the last successfully evaluated prefer/accept clauses.
+ //
+ // This information is used to make all (as opposed to only those set by
+ // the prefer clause) dependency configuration variables available to the
+ // reflect clause but only at the same position. This allows for some more
+ // advanced configuration techniques, such as, using a feature if enabled
+ // by someone else but not having any preferences ourselves.
+ //
+ optional<pair<size_t, size_t>> prefer_accept_;
+ };
+}
+
+#endif // BPKG_PACKAGE_SKELETON_HXX
diff --git a/bpkg/package.cxx b/bpkg/package.cxx
index 3532f3d..05dbc0d 100644
--- a/bpkg/package.cxx
+++ b/bpkg/package.cxx
@@ -6,7 +6,10 @@
#include <bpkg/database.hxx>
#include <bpkg/checksum.hxx>
+#include <bpkg/rep-mask.hxx>
+#include <bpkg/pkg-verify.hxx>
#include <bpkg/diagnostics.hxx>
+#include <bpkg/satisfaction.hxx>
#include <bpkg/manifest-utility.hxx>
using namespace std;
@@ -15,322 +18,191 @@ namespace bpkg
{
const version wildcard_version (0, "0", nullopt, nullopt, 0);
- // available_package_id
+ // configuration
//
- bool
- operator< (const available_package_id& x, const available_package_id& y)
+ configuration::
+ configuration (optional<string> n, string t, optional<uuid_type> uid)
+ : id (0),
+ name (move (n)),
+ type (move (t)),
+ expl (false)
{
- int r (x.name.compare (y.name));
- return r != 0 ? r < 0 : x.version < y.version;
+ try
+ {
+ uuid = uid ? *uid : uuid_type::generate ();
+ }
+ catch (const system_error& e)
+ {
+ fail << "unable to generate configuration uuid: " << e;
+ }
}
- // available_package
- //
- odb::result<available_package>
- query_available (database& db,
- const package_name& name,
- const optional<version_constraint>& c,
- bool order)
+ dir_path configuration::
+ effective_path (const dir_path& d) const
{
- using query = query<available_package>;
-
- query q (query::id.name == name);
- const auto& vm (query::id.version);
-
- // If there is a constraint, then translate it to the query. Otherwise,
- // get the latest version or stub versions if present.
- //
- if (c)
+ if (path.relative ())
{
- assert (c->complete ());
-
- // If the revision is not explicitly specified, then compare ignoring the
- // revision. The idea is that when the user runs 'bpkg build libfoo/1'
- // and there is 1+1 available, it should just work. The user shouldn't
- // have to spell the revision explicitly. Similarly, when we have
- // 'depends: libfoo == 1', then it would be strange if 1+1 did not
- // satisfy this constraint. The same for libfoo <= 1 -- 1+1 should
- // satisfy.
- //
- // Note that we always compare ignoring the iteration, as it can not be
- // specified in the manifest/command line. This way the latest iteration
- // will always be picked up.
- //
- query qs (compare_version_eq (vm,
- canonical_version (wildcard_version),
- false /* revision */,
- false /* iteration */));
-
- if (c->min_version &&
- c->max_version &&
- *c->min_version == *c->max_version)
- {
- const version& v (*c->min_version);
-
- q = q &&
- (compare_version_eq (vm,
- canonical_version (v),
- v.revision.has_value (),
- false /* iteration */) ||
- qs);
- }
- else
- {
- query qr (true);
-
- if (c->min_version)
- {
- const version& v (*c->min_version);
- canonical_version cv (v);
- bool rv (v.revision);
-
- if (c->min_open)
- qr = compare_version_gt (vm, cv, rv, false /* iteration */);
- else
- qr = compare_version_ge (vm, cv, rv, false /* iteration */);
- }
+ dir_path r (d / path);
- if (c->max_version)
- {
- const version& v (*c->max_version);
- canonical_version cv (v);
- bool rv (v.revision);
-
- if (c->max_open)
- qr = qr && compare_version_lt (vm, cv, rv, false /* iteration */);
- else
- qr = qr && compare_version_le (vm, cv, rv, false /* iteration */);
- }
+ string what ("linked with " + d.representation () + " configuration " +
+ (name ? *name : to_string (*id)));
- q = q && (qr || qs);
- }
+ normalize (r, what.c_str ());
+ return r;
}
-
- if (order)
- q += order_by_version_desc (vm);
-
- return db.query<available_package> (q);
+ else
+ return path;
}
- // Check if the package is available from the specified repository fragment,
- // its prerequisite repositories, or one of their complements, recursively.
- // Return the first repository fragment that contains the package or NULL if
- // none are.
+ // package_key
//
- // Note that we can end up with a repository dependency cycle since the
- // root repository can be the default complement for dir and git
- // repositories (see rep_fetch() implementation for details). Thus we need
- // to make sure that the repository fragment is not in the dependency chain
- // yet.
- //
- using repository_fragments =
- vector<reference_wrapper<const shared_ptr<repository_fragment>>>;
-
- static shared_ptr<repository_fragment>
- find (const shared_ptr<repository_fragment>& rf,
- const shared_ptr<available_package>& ap,
- repository_fragments& chain,
- bool prereq)
+ string package_key::
+ string () const
{
- // Prerequisites are not searched through recursively.
- //
- assert (!prereq || chain.empty ());
-
- auto i (find_if (chain.begin (), chain.end (),
- [&rf] (const shared_ptr<repository_fragment>& i) -> bool
- {
- return i == rf;
- }));
-
- if (i != chain.end ())
- return nullptr;
-
- chain.emplace_back (rf);
+ const std::string& s (db.get ().string);
+ return !s.empty () ? name.string () + ' ' + s : name.string ();
+ }
- unique_ptr<repository_fragments, void (*)(repository_fragments*)> deleter (
- &chain, [] (repository_fragments* rf) {rf->pop_back ();});
+ bool package_key::
+ operator< (const package_key& v) const
+ {
+ int r (name.compare (v.name));
+ return r != 0 ? (r < 0) : (db < v.db);
+ }
- const auto& cs (rf->complements);
- const auto& ps (rf->prerequisites);
+ // package_version_key
+ //
+ string package_version_key::
+ string (bool ignore_version) const
+ {
+ std::string r (name.string ());
- for (const package_location& pl: ap->locations)
+ if (version && !version->empty () && !ignore_version)
{
- const lazy_shared_ptr<repository_fragment>& lrf (pl.repository_fragment);
-
- // First check the repository itself.
- //
- if (lrf.object_id () == rf->name)
- return rf;
-
- // Then check all the complements and prerequisites repository fragments
- // without loading them. Though, we still need to load complement and
- // prerequisite repositories.
- //
- auto pr = [&lrf] (const repository::fragment_type& i)
- {
- return i.fragment == lrf;
- };
-
- for (const lazy_weak_ptr<repository>& r: cs)
- {
- const auto& frs (r.load ()->fragments);
-
- if (find_if (frs.begin (), frs.end (), pr) != frs.end ())
- return lrf.load ();
- }
-
- if (prereq)
- {
- for (const lazy_weak_ptr<repository>& r: ps)
- {
- const auto& frs (r.load ()->fragments);
-
- if (find_if (frs.begin (), frs.end (), pr) != frs.end ())
- return lrf.load ();
- }
- }
+ r += '/';
+ r += version->string ();
+ }
- // Finally, load the complements and prerequisites and check them
- // recursively.
- //
- for (const lazy_weak_ptr<repository>& cr: cs)
- {
- for (const auto& fr: cr.load ()->fragments)
- {
- // Should we consider prerequisites of our complements as our
- // prerequisites? I'd say not.
- //
- if (shared_ptr<repository_fragment> r =
- find (fr.fragment.load (), ap, chain, false))
- return r;
- }
- }
+ const std::string& d (db.get ().string);
- if (prereq)
- {
- for (const lazy_weak_ptr<repository>& pr: ps)
- {
- for (const auto& fr: pr.load ()->fragments)
- {
- if (shared_ptr<repository_fragment> r =
- find (fr.fragment.load (), ap, chain, false))
- return r;
- }
- }
- }
+ if (!d.empty ())
+ {
+ r += ' ';
+ r += d;
}
- return nullptr;
+ return r;
}
- shared_ptr<repository_fragment>
- filter (const shared_ptr<repository_fragment>& r,
- const shared_ptr<available_package>& ap,
- bool prereq)
+ bool package_version_key::
+ operator< (const package_version_key& v) const
{
- repository_fragments chain;
- return find (r, ap, chain, prereq);
+ // NOTE: remember to update cmdline_adjustments::tried_earlier() if
+ // changing anything here.
+ //
+ if (int r = name.compare (v.name))
+ return r < 0;
+
+ return version != v.version ? (version < v.version) : (db < v.db);
}
- vector<shared_ptr<available_package>>
- filter (const shared_ptr<repository_fragment>& r,
- result<available_package>&& apr,
- bool prereq)
+ // available_package
+ //
+ const version* available_package::
+ system_version (database& db) const
{
- vector<shared_ptr<available_package>> aps;
-
- for (shared_ptr<available_package> ap: pointer_result (apr))
+ if (!system_version_)
{
- if (filter (r, ap, prereq) != nullptr)
- aps.push_back (move (ap));
+ assert (db.system_repository);
+
+ if (const system_package* sp = db.system_repository->find (id.name))
+ {
+ // Only cache if it is authoritative.
+ //
+ if (sp->authoritative)
+ system_version_ = sp->version;
+ else
+ return &sp->version;
+ }
}
- return aps;
+ return system_version_ ? &*system_version_ : nullptr;
}
- pair<shared_ptr<available_package>, shared_ptr<repository_fragment>>
- filter_one (const shared_ptr<repository_fragment>& r,
- result<available_package>&& apr,
- bool prereq)
+ pair<const version*, bool> available_package::
+ system_version_authoritative (database& db) const
{
- using result = pair<shared_ptr<available_package>,
- shared_ptr<repository_fragment>>;
+ assert (db.system_repository);
+
+ const system_package* sp (db.system_repository->find (id.name));
- for (shared_ptr<available_package> ap: pointer_result (apr))
+ if (!system_version_)
{
- if (shared_ptr<repository_fragment> pr = filter (r, ap, prereq))
- return result (move (ap), move (pr));
+ if (sp != nullptr)
+ {
+ // Only cache if it is authoritative.
+ //
+ if (sp->authoritative)
+ system_version_ = sp->version;
+ else
+ return make_pair (&sp->version, false);
+ }
}
- return result ();
+ return make_pair (system_version_ ? &*system_version_ : nullptr,
+ sp != nullptr ? sp->authoritative : false);
}
- vector<pair<shared_ptr<available_package>, shared_ptr<repository_fragment>>>
- filter (const vector<shared_ptr<repository_fragment>>& rps,
- odb::result<available_package>&& apr,
- bool prereq)
+ void
+ check_any_available (const linked_databases& dbs,
+ transaction&,
+ const diag_record* drp)
{
- vector<pair<shared_ptr<available_package>,
- shared_ptr<repository_fragment>>> aps;
-
- for (shared_ptr<available_package> ap: pointer_result (apr))
+ bool rep (false);
+ bool pkg (false);
+ for (database& db: dbs)
{
- for (const shared_ptr<repository_fragment>& r: rps)
+ if (db.query_value<repository_count> () != 0)
{
- if (shared_ptr<repository_fragment> rf = filter (r, ap, prereq))
+ rep = true;
+
+ if (db.query_value<available_package_count> () != 0)
{
- aps.emplace_back (move (ap), move (rf));
+ pkg = true;
break;
}
}
}
- return aps;
- }
-
- pair<shared_ptr<available_package>, shared_ptr<repository_fragment>>
- filter_one (const vector<shared_ptr<repository_fragment>>& rps,
- odb::result<available_package>&& apr,
- bool prereq)
- {
- using result = pair<shared_ptr<available_package>,
- shared_ptr<repository_fragment>>;
+ if (pkg)
+ return;
- for (shared_ptr<available_package> ap: pointer_result (apr))
- {
- for (const shared_ptr<repository_fragment>& r: rps)
- {
- if (shared_ptr<repository_fragment> rf = filter (r, ap, prereq))
- return result (move (ap), move (rf));
- }
- }
+ diag_record d;
+ const diag_record& dr (drp != nullptr ? *drp << info : d << fail);
- return result ();
- }
-
- void
- check_any_available (const dir_path& c,
- transaction& t,
- const diag_record* dr)
- {
- database& db (t.database ());
+ if (dbs.size () == 1)
+ dr << "configuration " << dbs[0].get ().config_orig << " has ";
+ else
+ dr << "specified configurations have ";
- if (db.query_value<repository_count> () == 0)
+ if (!rep)
{
- diag_record d;
- (dr != nullptr ? *dr << info : d << fail)
- << "configuration " << c << " has no repositories" <<
+ dr << "no repositories" <<
info << "use 'bpkg rep-add' to add a repository";
}
- else if (db.query_value<available_package_count> () == 0)
+ else
{
- diag_record d;
- (dr != nullptr ? *dr << info : d << fail)
- << "configuration " << c << " has no available packages" <<
+ dr << "no available packages" <<
info << "use 'bpkg rep-fetch' to fetch available packages list";
}
}
+ void
+ check_any_available (database& db, transaction& t, const diag_record* dr)
+ {
+ return check_any_available (linked_databases ({db}), t, dr);
+ }
+
string
package_string (const package_name& n, const version& v, bool system)
{
@@ -376,29 +248,157 @@ namespace bpkg
// Quote the result as it contains the space character.
//
- return "'" + name.string () + ' ' + constraint->string () + "'";
+ return '\'' + name.string () + ' ' + constraint->string () + '\'';
}
// selected_package
//
string selected_package::
- version_string () const
+ string (database& db) const
{
- return version != wildcard_version ? version.string () : "*";
+ const std::string& s (db.string);
+ return !s.empty () ? string () + ' ' + s : string ();
+ }
+
+ _selected_package_ref::
+ _selected_package_ref (const lazy_shared_ptr<selected_package>& p)
+ : configuration (p.database ().uuid),
+ prerequisite (p.object_id ())
+ {
+ }
+
+ lazy_shared_ptr<selected_package> _selected_package_ref::
+ to_ptr (odb::database& db) &&
+ {
+ database& pdb (static_cast<database&> (db));
+
+ // Note that if this points to a different configuration, then it should
+ // already be pre-attached since it must be explicitly linked.
+ //
+ database& ddb (pdb.find_dependency_config (configuration));
+
+ // Make sure the prerequisite exists in the explicitly linked
+ // configuration, so that a subsequent load() call will not fail. This,
+ // for example, can happen in unlikely but possible situation when the
+ // implicitly linked configuration containing a dependent was temporarily
+ // renamed before its prerequisite was dropped.
+ //
+ // Note that the diagnostics lacks information about the dependent and its
+ // configuration. However, handling this situation at all the load()
+ // function call sites where this information is available, for example by
+ // catching the odb::object_not_persistent exception, feels a bit
+ // hairy. Given the situation is not common, let's keep it simple for now
+ // and see how it goes.
+ //
+ if (ddb != pdb && ddb.find<selected_package> (prerequisite) == nullptr)
+ fail << "unable to find prerequisite package " << prerequisite
+ << " in linked configuration " << ddb.config_orig;
+
+ return lazy_shared_ptr<selected_package> (ddb, move (prerequisite));
+ }
+
+ string
+ to_string (config_source s)
+ {
+ switch (s)
+ {
+ case config_source::user: return "user";
+ case config_source::dependent: return "dependent";
+ case config_source::reflect: return "reflect";
+ }
+
+ return string (); // Should never reach.
+ }
+
+ config_source
+ to_config_source (const string& s)
+ {
+ if (s == "user") return config_source::user;
+ else if (s == "dependent") return config_source::dependent;
+ else if (s == "reflect") return config_source::reflect;
+ else throw invalid_argument ("invalid config source '" + s + '\'');
+ }
+
+ shared_ptr<available_package>
+ make_available (const common_options& options,
+ database& db,
+ const shared_ptr<selected_package>& sp)
+ {
+ assert (sp != nullptr && sp->state != package_state::broken);
+
+ if (sp->system ())
+ return make_shared<available_package> (sp->name, sp->version);
+
+ // The package is in at least fetched state, which means we should
+ // be able to get its manifest.
+ //
+ // @@ PERF We should probably implement the available package caching not
+ // to parse the same manifests multiple times during all that build
+ // plan refinement iterations. What should be the cache key? Feels like
+ // it should be the archive/directory path. Note that the package
+ // manifests can potentially differ in different external package
+ // directories for the same version iteration. Testing showed 6%
+ // speedup on tests (debug/sanitized).
+ //
+ package_manifest m (
+ sp->state == package_state::fetched
+ ? pkg_verify (options,
+ sp->effective_archive (db.config_orig),
+ true /* ignore_unknown */,
+ false /* ignore_toolchain */,
+ false /* expand_values */,
+ true /* load_buildfiles */)
+ : pkg_verify (options,
+ sp->effective_src_root (db.config_orig),
+ true /* ignore_unknown */,
+ false /* ignore_toolchain */,
+ true /* load_buildfiles */,
+ // Copy potentially fixed up version from selected package.
+ [&sp] (version& v) {v = sp->version;}));
+
+ return make_shared<available_package> (move (m));
+ }
+
+ pair<shared_ptr<selected_package>, database*>
+ find_dependency (database& db, const package_name& pn, bool buildtime)
+ {
+ pair<shared_ptr<selected_package>, database*> r;
+
+ for (database& ldb: db.dependency_configs (pn, buildtime))
+ {
+ shared_ptr<selected_package> p (ldb.find<selected_package> (pn));
+
+ if (p != nullptr)
+ {
+ if (r.first == nullptr)
+ {
+ r.first = move (p);
+ r.second = &ldb;
+ }
+ else
+ {
+ fail << "package " << pn << " appears in multiple configurations" <<
+ info << r.first->state << " in " << r.second->config_orig <<
+ info << p->state << " in " << ldb.config_orig;
+ }
+ }
+ }
+
+ return r;
}
optional<version>
package_iteration (const common_options& o,
- const dir_path& c,
- transaction& t,
+ database& db,
+ transaction&,
const dir_path& d,
const package_name& n,
const version& v,
+ const package_info* pi,
bool check_external)
{
tracer trace ("package_iteration");
- database& db (t.database ());
tracer_guard tg (db, trace);
if (check_external)
@@ -416,7 +416,7 @@ namespace bpkg
{
const shared_ptr<repository_fragment>& rf (prf.repository_fragment);
- if (rf->location.directory_based ())
+ if (!rep_masked_fragment (db, rf) && rf->location.directory_based ())
fail << "external package " << n << '/' << v
<< " is already available from "
<< rf->location.canonical_name ();
@@ -432,30 +432,54 @@ namespace bpkg
false /* iteration */))
return nullopt;
- string mc (sha256 (o, d / manifest_file));
+ bool changed (!p->external ());
- // The selected package must not be "simulated" (see pkg-build for
- // details).
+ // If the selected package is not external, then increment the iteration
+ // number to make the external package preferable. Note that for such
+ // packages the manifest/subprojects and buildfiles checksums are absent.
//
- assert (p->manifest_checksum);
+ if (!changed)
+ {
+ // The selected package must not be "simulated" (see pkg-build for
+ // details).
+ //
+ assert (p->manifest_checksum);
- bool changed (mc != *p->manifest_checksum);
+ changed = (package_checksum (o, d, pi) != *p->manifest_checksum);
- // If the manifest didn't changed but the selected package points to an
- // external source directory, then we also check if the directory have
- // moved.
- //
- if (!changed && p->external ())
- {
- dir_path src_root (p->effective_src_root (c));
+ // If the manifest hasn't changed and the package has buildfile clauses
+ // in the dependencies, then check if the buildfiles haven't changed
+ // either.
+ //
+ if (!changed && p->buildfiles_checksum)
+ {
+ // Always calculate the checksum over the buildfiles since the package
+ // is external.
+ //
+ changed = package_buildfiles_checksum (
+ nullopt /* bootstrap_build */,
+ nullopt /* root_build */,
+ {} /* buildfiles */,
+ d) != *p->buildfiles_checksum;
+ }
- // We need to complete and normalize the source directory as it may
- // generally be completed against the configuration directory (unlikely
- // but possible), that can be relative and/or not normalized.
+ // If the manifest hasn't changed but the selected package points to an
+ // external source directory, then we also check if the directory have
+ // moved.
//
- normalize (src_root, "package source");
+ if (!changed)
+ {
+ dir_path src_root (p->effective_src_root (db.config));
+
+ // We need to complete and normalize the source directory as it may
+ // generally be completed against the configuration directory
+ // (unlikely but possible), that can be relative and/or not
+ // normalized.
+ //
+ normalize (src_root, "package source");
- changed = src_root != normalize (d, "package source");
+ changed = src_root != normalize (d, "package source");
+ }
}
return !changed
@@ -492,7 +516,7 @@ namespace bpkg
else if (s == "fetched") return package_state::fetched;
else if (s == "unpacked") return package_state::unpacked;
else if (s == "configured") return package_state::configured;
- else throw invalid_argument ("invalid package state '" + s + "'");
+ else throw invalid_argument ("invalid package state '" + s + '\'');
}
// substate
@@ -514,7 +538,7 @@ namespace bpkg
{
if (s == "none") return package_substate::none;
else if (s == "system") return package_substate::system;
- else throw invalid_argument ("invalid package substate '" + s + "'");
+ else throw invalid_argument ("invalid package substate '" + s + '\'');
}
// certificate
@@ -532,4 +556,113 @@ namespace bpkg
return os;
}
+
+ // package_dependent
+ //
+ odb::result<package_dependent>
+ query_dependents (database& db,
+ const package_name& dep,
+ database& dep_db)
+ {
+ // Prepare and cache this query since it's executed a lot. Note that we
+ // have to cache one per database.
+ //
+ using query = query<package_dependent>;
+ using prep_query = prepared_query<package_dependent>;
+
+ struct params
+ {
+ string name;
+ string config; // Configuration UUID.
+ string query_name;
+ };
+
+ params* qp;
+ string qn (db.uuid.string () + "-package-dependent-query");
+ prep_query pq (db.lookup_query<package_dependent> (qn.c_str (), qp));
+
+ if (!pq)
+ {
+ unique_ptr<params> p (qp = new params ());
+ p->query_name = move (qn);
+
+ query q ("prerequisite = " + query::_ref (p->name) + "AND" +
+ "configuration = " + query::_ref (p->config));
+
+ pq = db.prepare_query<package_dependent> (p->query_name.c_str (), q);
+ db.cache_query (pq, move (p));
+ }
+
+ qp->name = dep.string ();
+ qp->config = dep_db.uuid.string ();
+
+ return pq.execute ();
+ }
+
+ vector<package_dependent>
+ query_dependents_cache (database& db,
+ const package_name& dep,
+ database& dep_db)
+ {
+ vector<package_dependent> r;
+ for (package_dependent& pd: query_dependents (db, dep, dep_db))
+ r.push_back (move (pd));
+ return r;
+ }
+
+ bool
+ toolchain_buildtime_dependency (const common_options& o,
+ const dependency_alternatives& das,
+ const package_name* pkg)
+ {
+ if (das.buildtime)
+ {
+ for (const dependency_alternative& da: das)
+ {
+ for (const dependency& d: da)
+ {
+ const package_name& dn (d.name);
+
+ if (dn == "build2")
+ {
+ if (pkg != nullptr && d.constraint && !satisfy_build2 (o, d))
+ {
+ fail << "unable to satisfy constraint (" << d << ") for "
+ << "package " << *pkg <<
+ info << "available build2 version is " << build2_version;
+ }
+
+ return true;
+ }
+ else if (dn == "bpkg")
+ {
+ if (pkg != nullptr && d.constraint && !satisfy_bpkg (o, d))
+ {
+ fail << "unable to satisfy constraint (" << d << ") for "
+ << "package " << *pkg <<
+ info << "available bpkg version is " << bpkg_version;
+ }
+
+ return true;
+ }
+ }
+ }
+ }
+
+ return false;
+ }
+
+ bool
+ has_dependencies (const common_options& o,
+ const dependencies& deps,
+ const package_name* pkg)
+ {
+ for (const auto& das: deps)
+ {
+ if (!toolchain_buildtime_dependency (o, das, pkg))
+ return true;
+ }
+
+ return false;
+ }
}
diff --git a/bpkg/package.hxx b/bpkg/package.hxx
index cee2dd6..400519a 100644
--- a/bpkg/package.hxx
+++ b/bpkg/package.hxx
@@ -11,39 +11,33 @@
#include <type_traits> // static_assert
#include <odb/core.hxx>
+#include <odb/section.hxx>
#include <odb/nested-container.hxx>
-#include <libbutl/timestamp.mxx>
+#include <libbutl/timestamp.hxx>
#include <libbpkg/package-name.hxx>
#include <bpkg/types.hxx>
-#include <bpkg/forward.hxx> // transaction
+#include <bpkg/forward.hxx> // database, linked_databases, transaction
#include <bpkg/utility.hxx>
#include <bpkg/diagnostics.hxx>
// Used by the data migration entries.
//
-#define DB_SCHEMA_VERSION_BASE 6
+// NOTE: drop all the `#pragma db member(...) default(...)` pragmas when
+// migration is no longer supported (i.e., the current and base schema
+// versions are the same).
+//
+#define DB_SCHEMA_VERSION_BASE 12
-#pragma db model version(DB_SCHEMA_VERSION_BASE, 8, closed)
+#pragma db model version(DB_SCHEMA_VERSION_BASE, 26, closed)
namespace bpkg
{
- // Compare two lazy pointers via the pointed-to object ids.
- //
- struct compare_lazy_ptr
- {
- template <typename P>
- bool
- operator() (const P& x, const P& y) const
- {
- return x.object_id () < y.object_id ();
- }
- };
-
using optional_string = optional<string>;
+ using optional_uint64_t = optional<uint64_t>; // Preserve uint64_t alias.
// path
//
@@ -67,6 +61,10 @@ namespace bpkg
to((?) ? (?)->string () : bpkg::optional_string ()) \
from((?) ? bpkg::dir_path (*(?)) : bpkg::optional_dir_path ())
+ // uuid
+ //
+ #pragma db map type(uuid) as(string) to((?).string ()) from(bpkg::uuid (?))
+
// timestamp
//
using butl::timestamp;
@@ -80,7 +78,7 @@ namespace bpkg
std::chrono::nanoseconds::period>::value,
"The following timestamp ODB mapping is invalid");
- // As pointed out in libbutl/timestamp.mxx we will overflow in year 2262, but
+ // As pointed out in libbutl/timestamp.hxx we will overflow in year 2262, but
// by that time some larger basic type will be available for mapping.
//
#pragma db map type(timestamp) as(uint64_t) \
@@ -106,7 +104,7 @@ namespace bpkg
string upstream;
optional<string> release;
- // @@ TMP: work around MSVC 16.2 bug.
+ // Work around MSVC 16.2 bug.
//
_version () = default;
_version (uint16_t e,
@@ -122,8 +120,6 @@ namespace bpkg
#include <libbpkg/manifest.hxx>
-#include <bpkg/system-repository.hxx>
-
// Prevent assert() macro expansion in get/set expressions. This should
// appear after all #include directives since the assert() macro is
// redefined in each <assert.h> inclusion.
@@ -136,6 +132,97 @@ void assert (int);
namespace bpkg
{
+ // Linked bpkg configuration.
+ //
+ // Link with id 0 is the special self-link which captures information about
+ // the current configuration. This information is cached in links of other
+ // configurations.
+ //
+ // Note that linked configurations information will normally be accessed
+ // through the database object functions, which load and cache this
+ // information on the first call. This makes the session support for the
+ // configuration class redundant. Moreover, with the session support
+ // disabled the database implementation can freely move out the data from
+ // the configuration objects into the internal cache and safely load them
+ // from the temporary database objects (see database::attach() for details).
+ //
+ #pragma db object pointer(shared_ptr)
+ class configuration
+ {
+ public:
+ using uuid_type = bpkg::uuid;
+
+ // Link id.
+ //
+ // Zero for the self-link and is auto-assigned for linked configurations
+ // when the object is persisted.
+ //
+ optional_uint64_t id; // Object id.
+
+ uuid_type uuid;
+ optional<string> name;
+ string type;
+ dir_path path; // Empty for the self-link.
+
+ // True if the link is created explicitly by the user rather than
+ // automatically as a backlink.
+ //
+ bool expl;
+
+ // Database mapping.
+ //
+ #pragma db member(id) id auto
+ #pragma db member(uuid) unique
+ #pragma db member(name) unique
+ #pragma db member(path) unique
+ #pragma db member(expl) column("explicit")
+
+ public:
+ // Create the self-link. Generate the UUID, unless specified.
+ //
+ configuration (optional<string> n,
+ string t,
+ optional<uuid_type> uid = nullopt);
+
+ // Create a linked configuration.
+ //
+ configuration (const uuid_type& uid,
+ optional<string> n,
+ string t,
+ dir_path p,
+ bool e)
+ : uuid (uid),
+ name (move (n)),
+ type (move (t)),
+ path (move (p)),
+ expl (e) {}
+
+ // If the configuration path is absolute, then return it as is. Otherwise,
+ // return it completed relative to the specified linked configuration
+ // directory path and then normalized. The specified directory path should
+ // be absolute and normalized. Issue diagnostics and fail on the path
+ // conversion error.
+ //
+ // Note that the self-link object is naturally supported by this function,
+ // since its path is empty.
+ //
+ dir_path
+ effective_path (const dir_path&) const;
+
+ const dir_path&
+ make_effective_path (const dir_path& d)
+ {
+ if (path.relative ())
+ path = effective_path (d);
+
+ return path;
+ }
+
+ private:
+ friend class odb::access;
+ configuration () = default;
+ };
+
// version
//
// Sometimes we need to split the version into two parts: the part
@@ -261,7 +348,7 @@ namespace bpkg
repository_url url;
repository_type type;
- // @@ TMP: work around MSVC 16.2 bug.
+ // Work around MSVC 16.2 bug.
//
_repository_location () = default;
_repository_location (repository_url u, repository_type t)
@@ -328,7 +415,8 @@ namespace bpkg
//
// Also note that these point to repositories, not repository fragments.
//
- using dependencies = std::set<lazy_weak_ptr<repository>, compare_lazy_ptr>;
+ using dependencies = std::set<lazy_weak_ptr<repository>,
+ compare_lazy_ptr_id>;
dependencies complements;
dependencies prerequisites;
@@ -430,6 +518,10 @@ namespace bpkg
operator size_t () const {return result;}
};
+ // language
+ //
+ #pragma db value(language) definition
+
// package_location
//
#pragma db value
@@ -452,6 +544,7 @@ namespace bpkg
#pragma db value(version_constraint) definition
#pragma db value(dependency) definition
#pragma db member(dependency::constraint) column("")
+ #pragma db value(dependency_alternative) definition
#pragma db value(dependency_alternatives) definition
// Extend dependency_alternatives to also represent the special test
@@ -472,12 +565,15 @@ namespace bpkg
dependency_alternatives_ex (dependency_alternatives da)
: dependency_alternatives (move (da)) {}
+ // As above but built incrementally.
+ //
+ dependency_alternatives_ex (bool b, std::string c)
+ : dependency_alternatives (b, move (c)) {}
+
// Create the special test dependencies object (built incrementally).
//
- dependency_alternatives_ex (test_dependency_type t)
- : dependency_alternatives (false /* conditional */,
- false /* buildtime */,
- "" /* comment */),
+ dependency_alternatives_ex (test_dependency_type t, bool buildtime)
+ : dependency_alternatives (buildtime, "" /* comment */),
type (t) {}
};
@@ -493,10 +589,41 @@ namespace bpkg
make_move_iterator (das.end ()));
}
+ // Return true if this is a toolchain build-time dependency. If the package
+ // argument is specified and this is a toolchain build-time dependency then
+ // also verify its constraint and fail if it is unsatisfied. Note that the
+ // package argument is used for diagnostics only.
+ //
+ class common_options;
+
+ bool
+ toolchain_buildtime_dependency (const common_options&,
+ const dependency_alternatives&,
+ const package_name*);
+
+ // Return true if any dependency other than toolchain build-time
+ // dependencies is specified. Optionally, verify toolchain build-time
+ // dependencies specifying the package argument which will be used for
+ // diagnostics only.
+ //
+ bool
+ has_dependencies (const common_options&,
+ const dependencies&,
+ const package_name* = nullptr);
+
+ // Return true if some clause that is a buildfile fragment is specified for
+ // any of the dependencies.
+ //
+ template <typename T>
+ bool
+ has_buildfile_clause (const vector<T>& dependencies);
+
// tests
//
#pragma db value(test_dependency) definition
+ #pragma db member(test_dependency::buildtime) default(false)
+
using optional_test_dependency_type = optional<test_dependency_type>;
#pragma db map type(test_dependency_type) as(string) \
@@ -516,6 +643,19 @@ namespace bpkg
//
extern const version wildcard_version;
+ // Return true if the version constraint represents the wildcard version.
+ //
+ inline bool
+ wildcard (const version_constraint& vc)
+ {
+ bool r (vc.min_version && *vc.min_version == wildcard_version);
+
+ if (r)
+ assert (vc.max_version == vc.min_version);
+
+ return r;
+ }
+
// package_name
//
#pragma db value(package_name) type("TEXT") options("COLLATE NOCASE")
@@ -532,17 +672,31 @@ namespace bpkg
available_package_id (package_name, const bpkg::version&);
};
- bool
- operator< (const available_package_id&, const available_package_id&);
+ // buildfile
+ //
+ #pragma db value(buildfile) definition
+
+ // distribution_name_value
+ //
+ #pragma db value(distribution_name_value) definition
#pragma db object pointer(shared_ptr) session
class available_package
{
public:
using version_type = bpkg::version;
+ using upstream_version_type = bpkg::upstream_version;
available_package_id id;
- upstream_version version;
+ upstream_version_type version;
+
+ optional<string> upstream_version;
+ optional<string> type;
+
+ small_vector<language, 1> languages;
+ odb::section languages_section;
+
+ optional<package_name> project;
// List of repository fragments to which this package version belongs
// (yes, in our world, it can be in multiple, unrelated repositories)
@@ -562,8 +716,17 @@ namespace bpkg
// Package manifest data and, potentially, the special test dependencies.
//
- // Note that there can be only one special test dependencies entry in the
- // list and it's always the last one, if present.
+ // Note that there can only be one special test dependencies entry in the
+ // list. It can only be present for a test package and specifies all the
+ // main packages as the alternative dependencies. If present, it is
+ // located right after the last explicit depends clause which specifies a
+ // main package for this test package, if such a clause is present, and as
+ // the first entry otherwise. The idea here is to inject the special
+ // depends clause as early as possible, so that the other clauses could
+ // potentially refer to the reflection variables it may set. But not too
+ // early, so that the explicit main package dependencies are already
+ // resolved by the time of resolving the special clause to avoid the
+ // 'unable to select dependency alternative' error.
//
using dependencies_type = bpkg::dependencies;
@@ -571,6 +734,18 @@ namespace bpkg
small_vector<test_dependency, 1> tests;
+ // Note that while the bootstrap buildfile is always present for stub
+ // packages, we don't save buildfiles for stubs of any kind (can come from
+ // repository, be based on system selected package, etc), leaving *_build
+ // as nullopt and buildfiles empty.
+ //
+ optional<bool> alt_naming;
+ optional<string> bootstrap_build;
+ optional<string> root_build;
+ vector<buildfile> buildfiles;
+
+ vector<distribution_name_value> distribution_values;
+
// Present for non-transient objects only (and only for certain repository
// types).
//
@@ -581,14 +756,31 @@ namespace bpkg
mutable optional<version_type> system_version_;
public:
- // Note: version constraints must be complete.
+ // Note: version constraints must be complete and the bootstrap build must
+ // be present, unless this is a stub.
//
available_package (package_manifest&& m)
: id (move (m.name), m.version),
version (move (m.version)),
+ upstream_version (move (m.upstream_version)),
+ type (move (m.type)),
+ languages (move (m.languages)),
+ project (move (m.project)),
dependencies (convert (move (m.dependencies))),
tests (move (m.tests)),
- sha256sum (move (m.sha256sum)) {}
+ distribution_values (move (m.distribution_values)),
+ sha256sum (move (m.sha256sum))
+ {
+ if (!stub ())
+ {
+ assert (m.bootstrap_build.has_value () && m.alt_naming.has_value ());
+
+ alt_naming = m.alt_naming;
+ bootstrap_build = move (m.bootstrap_build);
+ root_build = move (m.root_build);
+ buildfiles = move (m.buildfiles);
+ }
+ }
// Create available stub package.
//
@@ -608,88 +800,139 @@ namespace bpkg
bool
stub () const {return version.compare (wildcard_version, true) == 0;}
+ string
+ effective_type () const
+ {
+ return package_manifest::effective_type (type, id.name);
+ }
+
+ small_vector<language, 1>
+ effective_languages () const
+ {
+ return package_manifest::effective_languages (languages, id.name);
+ }
+
// Return package system version if one has been discovered. Note that
// we do not implicitly assume a wildcard version.
//
const version_type*
- system_version () const
- {
- if (!system_version_)
- {
- if (const system_package* sp = system_repository.find (id.name))
- {
- // Only cache if it is authoritative.
- //
- if (sp->authoritative)
- system_version_ = sp->version;
- else
- return &sp->version;
- }
- }
-
- return system_version_ ? &*system_version_ : nullptr;
- }
+ system_version (database&) const;
// As above but also return an indication if the version information is
// authoritative.
//
pair<const version_type*, bool>
- system_version_authoritative () const
- {
- const system_package* sp (system_repository.find (id.name));
-
- if (!system_version_)
- {
- if (sp != nullptr)
- {
- // Only cache if it is authoritative.
- //
- if (sp->authoritative)
- system_version_ = sp->version;
- else
- return make_pair (&sp->version, false);
- }
- }
-
- return make_pair (system_version_ ? &*system_version_ : nullptr,
- sp != nullptr ? sp->authoritative : false);
- }
+ system_version_authoritative (database&) const;
// Database mapping.
//
#pragma db member(id) id column("")
#pragma db member(version) set(this.version.init (this.id.version, (?)))
+
+ // languages
+ //
+ #pragma db member(languages) id_column("") value_column("language_") \
+ section(languages_section)
+
+ #pragma db member(languages_section) load(lazy) update(always)
+
+ // locations
+ //
#pragma db member(locations) id_column("") value_column("") \
unordered value_not_null
// dependencies
//
- using _dependency_key = odb::nested_key<dependency_alternatives_ex>;
- using _dependency_alternatives_ex_type =
- std::map<_dependency_key, dependency>;
-
- #pragma db value(_dependency_key)
- #pragma db member(_dependency_key::outer) column("dependency_index")
- #pragma db member(_dependency_key::inner) column("index")
+ // Note that this is a 2-level nested container which is mapped to three
+ // container tables each containing data of each dimension.
+ // Container of the dependency_alternatives_ex values.
+ //
#pragma db member(dependencies) id_column("") value_column("")
- #pragma db member(dependency_alternatives_ex) \
- table("available_package_dependency_alternatives") \
- virtual(_dependency_alternatives_ex_type) \
+
+ // Container of the dependency_alternative values.
+ //
+ using _dependency_alternative_key =
+ odb::nested_key<dependency_alternatives_ex>;
+
+ using _dependency_alternatives_type =
+ std::map<_dependency_alternative_key, dependency_alternative>;
+
+ #pragma db value(_dependency_alternative_key)
+ #pragma db member(_dependency_alternative_key::outer) column("dependency_index")
+ #pragma db member(_dependency_alternative_key::inner) column("index")
+
+ #pragma db member(dependency_alternatives) \
+ virtual(_dependency_alternatives_type) \
after(dependencies) \
get(odb::nested_get (this.dependencies)) \
set(odb::nested_set (this.dependencies, std::move (?))) \
+ id_column("") key_column("") value_column("")
+
+ // Container of the dependency values.
+ //
+ using _dependency_key = odb::nested2_key<dependency_alternatives_ex>;
+ using _dependency_alternative_dependencies_type =
+ std::map<_dependency_key, dependency>;
+
+ #pragma db value(_dependency_key)
+ #pragma db member(_dependency_key::outer) column("dependency_index")
+ #pragma db member(_dependency_key::middle) column("alternative_index")
+ #pragma db member(_dependency_key::inner) column("index")
+
+ #pragma db member(dependency_alternative_dependencies) \
+ virtual(_dependency_alternative_dependencies_type) \
+ after(dependency_alternatives) \
+ get(odb::nested2_get (this.dependencies)) \
+ set(odb::nested2_set (this.dependencies, std::move (?))) \
id_column("") key_column("") value_column("dep_")
// tests
//
#pragma db member(tests) id_column("") value_column("test_")
+ // distribution_values
+ //
+ #pragma db member(distribution_values) id_column("") value_column("dist_")
+
+ // alt_naming
+ //
+ // Note that since no real packages with alternative buildfile naming use
+ // conditional dependencies yet, we can just set alt_naming to false
+ // during migration to the database schema version 20. Also we never rely
+ // on alt_naming to be nullopt for the stub packages, so let's not
+ // complicate things and set alt_naming to false for them either.
+ //
+ #pragma db member(alt_naming) default(false)
+
+ // *_build
+ //
+ // Note that since no real packages use conditional dependencies yet, we
+ // can just set bootstrap_build to the empty string during migration to
+ // the database schema version 15. Also we never rely on bootstrap_build
+ // to be nullopt for the stub packages, so let's not complicate things and
+ // set bootstrap_build to the empty string for them either.
+ //
+ #pragma db member(bootstrap_build) default("")
+
+ // buildfiles
+ //
+ #pragma db member(buildfiles) id_column("") value_column("")
+
private:
friend class odb::access;
available_package () = default;
};
+ // The available packages together with the repository fragments they belong
+ // to.
+ //
+ // Note that lazy_shared_ptr is used to also convey the databases the
+ // objects belong to.
+ //
+ using available_packages = vector<pair<shared_ptr<available_package>,
+ lazy_shared_ptr<repository_fragment>>>;
+
#pragma db view object(available_package)
struct available_package_count
{
@@ -740,55 +983,18 @@ namespace bpkg
shared_ptr<available_package> package;
};
- // Query the available packages that optionally satisfy the specified
- // version constraint and return them in the version descending order, by
- // default. Note that a stub satisfies any constraint.
- //
- odb::result<available_package>
- query_available (database&,
- const package_name&,
- const optional<version_constraint>&,
- bool order = true);
-
- // Only return packages that are in the specified repository fragments, their
- // complements or prerequisites (if prereq is true), recursively. While you
- // could maybe come up with a (barely comprehensible) view/query to achieve
- // this, doing it on the "client side" is definitely more straightforward.
- //
- vector<shared_ptr<available_package>>
- filter (const shared_ptr<repository_fragment>&,
- odb::result<available_package>&&,
- bool prereq = true);
-
- pair<shared_ptr<available_package>, shared_ptr<repository_fragment>>
- filter_one (const shared_ptr<repository_fragment>&,
- odb::result<available_package>&&,
- bool prereq = true);
-
- shared_ptr<repository_fragment>
- filter (const shared_ptr<repository_fragment>&,
- const shared_ptr<available_package>&,
- bool prereq = true);
-
- vector<pair<shared_ptr<available_package>, shared_ptr<repository_fragment>>>
- filter (const vector<shared_ptr<repository_fragment>>&,
- odb::result<available_package>&&,
- bool prereq = true);
-
- pair<shared_ptr<available_package>, shared_ptr<repository_fragment>>
- filter_one (const vector<shared_ptr<repository_fragment>>&,
- odb::result<available_package>&&,
- bool prereq = true);
-
- // Check if there are packages available in the configuration. If that's not
- // the case then print the info message into the diag record or, if it is
- // NULL, print the error message and fail.
+ // Check if there are packages available in the specified configurations. If
+ // that's not the case then print the info message into the diag record or,
+ // if it is NULL, print the error message and fail.
//
void
- check_any_available (const dir_path& configuration,
+ check_any_available (const linked_databases&,
transaction&,
const diag_record* = nullptr);
+ void
+ check_any_available (database&, transaction&, const diag_record* = nullptr);
+
// package_state
//
enum class package_state
@@ -862,17 +1068,97 @@ namespace bpkg
const optional<version_constraint>&,
bool system = false);
+ // Return true if the package is a build2 build system module.
+ //
+ inline bool
+ build2_module (const package_name& name)
+ {
+ return name.string ().compare (0, 10, "libbuild2-") == 0;
+ }
+
// A map of "effective" prerequisites (i.e., pointers to other selected
- // packages) to optional version constraint. Note that because it is a
- // single constraint, we don't support multiple dependencies on the same
- // package (e.g., two ranges of versions). See pkg_configure().
+ // packages) to optional version constraint (plus some other info). Note
+ // that because it is a single constraint, we don't support multiple
+ // dependencies on the same package (e.g., two ranges of versions). See
+ // pkg_configure().
+ //
+ // Note also that the pointer can refer to a selected package in another
+ // database.
//
class selected_package;
+ #pragma db value
+ struct prerequisite_info
+ {
+ // The "tightest" version constraint among all dependencies resolved to
+ // this prerequisite.
+ //
+ optional<version_constraint> constraint;
+
+ // Database mapping.
+ //
+ #pragma db member(constraint) column("")
+ };
+
+ // Note that the keys for this map need to be created with the database
+ // passed to their constructor, which is required for persisting them (see
+ // _selected_package_ref() implementation for details).
+ //
using package_prerequisites = std::map<lazy_shared_ptr<selected_package>,
- optional<version_constraint>,
+ prerequisite_info,
compare_lazy_ptr>;
+ // Database mapping for lazy_shared_ptr<selected_package> to configuration
+ // UUID and package name.
+ //
+ #pragma db value
+ struct _selected_package_ref
+ {
+ using ptr = lazy_shared_ptr<selected_package>;
+
+ uuid configuration;
+ package_name prerequisite;
+
+ explicit
+ _selected_package_ref (const ptr&);
+
+ _selected_package_ref () = default;
+
+ ptr
+ to_ptr (odb::database&) &&;
+
+ #pragma db member(configuration)
+ };
+
+ #pragma db map type(_selected_package_ref::ptr) \
+ as(_selected_package_ref) \
+ to(bpkg::_selected_package_ref (?)) \
+ from(std::move (?).to_ptr (*db))
+
+ enum class config_source
+ {
+ user, // User configuration specified on command line.
+ dependent, // Dependent-imposed configuration from prefer/require clauses.
+ reflect // Package-reflected configuration from reflect clause.
+ };
+
+ string
+ to_string (config_source);
+
+ config_source
+ to_config_source (const string&); // May throw std::invalid_argument.
+
+ #pragma db map type(config_source) as(string) \
+ to(to_string (?)) \
+ from(bpkg::to_config_source (?))
+
+ #pragma db value
+ struct config_variable
+ {
+ string name;
+ config_source source;
+ };
+
#pragma db object pointer(shared_ptr) session
class selected_package
{
@@ -920,27 +1206,65 @@ namespace bpkg
optional<dir_path> src_root;
bool purge_src;
- // The checksum of the manifest file located in the source directory.
+ // The checksum of the manifest file located in the source directory and
+ // the subproject set. Changes to this information should trigger the
+ // package version revision increment. In particular, new subprojects
+ // should trigger the package reconfiguration.
//
- // Must be present if the source directory is present, unless the object
- // is created/updated during the package build simulation (see pkg-build
- // for details). Note that during the simulation the manifest may not be
+ // Only present for external packages, unless the objects are
+ // created/updated during the package build simulation (see pkg-build for
+ // details). Note that during the simulation the manifest may not be
// available.
//
+ // @@ Currently we don't consider subprojects recursively (would most
+ // likely require extension to b info, also could be a performance
+ // concern).
+ //
+ // @@ We should probably rename it if/when ODB add support for that for
+ // SQlite.
+ //
optional<std::string> manifest_checksum;
- // Path to the output directory of this package, if any. It is
- // always relative to the configuration directory, and is <name>
- // for external packages and <name>-<version> for others. It is
- // only set once the package is configured and its main purse is
- // to keep track of what needs to be cleaned by the user before
- // a broken package can be purged. Note that it could be the
- // same as src_root.
+ // Only present for external packages which have buildfile clauses in the
+ // dependencies, unless the objects are created/updated during the package
+ // build simulation (see pkg-build for details).
+ //
+ // Note that the checksum is always calculated over the files rather than
+ // the *-build manifest values. This is "parallel" to the package skeleton
+ // logic.
+ //
+ optional<std::string> buildfiles_checksum;
+
+ // Path to the output directory of this package, if any. It is always
+ // relative to the configuration directory, and is <name> for external
+ // packages and <name>-<version> for others. It is only set once the
+ // package is configured and its main purpose is to keep track of what
+ // needs to be cleaned by the user before a broken package can be
+ // purged. Note that it could be the same as src_root.
//
optional<dir_path> out_root;
package_prerequisites prerequisites;
+ // 1-based indexes of the selected dependency alternatives which the
+ // prerequisite packages are resolved from. Parallel to the dependencies
+ // member of the respective available package. Entries which don't
+ // correspond to a selected alternative (toolchain build-time dependency,
+ // not enabled alternatives, etc) are set to 0.
+ //
+ using indexes_type = vector<size_t>; // Make sure ODB maps it portably.
+ indexes_type dependency_alternatives;
+ odb::section dependency_alternatives_section;
+
+ // Project configuration variable names and their sources.
+ //
+ vector<config_variable> config_variables;
+
+ // SHA256 checksum of variables (names and values) referred to by the
+ // config_variables member.
+ //
+ std::string config_checksum;
+
public:
bool
system () const
@@ -964,18 +1288,39 @@ namespace bpkg
// pkg-unpack --existing <dir>
//
- (repository_fragment.empty () && !archive);
+ // Note that the system package can have no repository associated (see
+ // imaginary system repository in pkg-build.cxx for details).
+ //
+ (repository_fragment.empty () && !archive && !system ());
}
// Represent the wildcard version with the "*" string. Represent naturally
// all other versions.
//
std::string
- version_string () const;
+ version_string () const
+ {
+ return version != wildcard_version ? version.string () : "*";
+ }
std::string
string () const {return package_string (name, version, system ());}
+ std::string
+ string (database&) const;
+
+ // Return the relative archive path completed using the configuration
+ // directory. Return the absolute archive path as is.
+ //
+ path
+ effective_archive (const dir_path& configuration) const
+ {
+ // Cast for compiling with ODB (see above).
+ //
+ assert (static_cast<bool> (archive));
+ return archive->absolute () ? *archive : configuration / *archive;
+ }
+
// Return the relative source directory completed using the configuration
// directory. Return the absolute source directory as is.
//
@@ -988,8 +1333,7 @@ namespace bpkg
return src_root->absolute () ? *src_root : configuration / *src_root;
}
- // Return the output directory using the configuration directory. Note
- // that the output directory is always relative.
+ // Return the output directory using the configuration directory.
//
dir_path
effective_out_root (const dir_path& configuration) const
@@ -997,6 +1341,9 @@ namespace bpkg
// Cast for compiling with ODB (see above).
//
assert (static_cast<bool> (out_root));
+
+ // Note that out_root is always relative.
+ //
return configuration / *out_root;
}
@@ -1004,8 +1351,23 @@ namespace bpkg
//
#pragma db member(name) id
- #pragma db member(prerequisites) id_column("package") \
- key_column("prerequisite") key_not_null value_column("")
+ #pragma db member(prerequisites) id_column("package") \
+ key_column("") value_column("")
+
+ #pragma db member(dependency_alternatives) id_column("package") \
+ value_column("position") section(dependency_alternatives_section)
+
+ #pragma db member(dependency_alternatives_section) load(lazy) update(always)
+
+ #pragma db member(config_variables) id_column("package") value_column("")
+
+ // For the sake of simplicity let's not calculate the checksum during
+ // migration. It seems that the only drawback of this approach is a
+ // (single) spurious reconfiguration of a dependency of a dependent with
+ // configuration clause previously configured by bpkg with the database
+ // schema version prior to 24.
+ //
+ #pragma db member(config_checksum) default("")
// Explicit aggregate initialization for C++20 (private default ctor).
//
@@ -1021,6 +1383,7 @@ namespace bpkg
optional<dir_path> sr,
bool ps,
optional<std::string> mc,
+ optional<std::string> bc,
optional<dir_path> o,
package_prerequisites pps)
: name (move (n)),
@@ -1035,6 +1398,7 @@ namespace bpkg
src_root (move (sr)),
purge_src (ps),
manifest_checksum (move (mc)),
+ buildfiles_checksum (move (bc)),
out_root (move (o)),
prerequisites (move (pps)) {}
@@ -1049,23 +1413,49 @@ namespace bpkg
return os << p.string ();
}
+ // Create a transient (or fake, if you prefer) available_package object
+ // corresponding to the specified selected object, which is expected to not
+ // be in the broken state. Note that the package locations list is left
+ // empty.
+ //
+ shared_ptr<available_package>
+ make_available (const common_options&,
+ database&,
+ const shared_ptr<selected_package>&);
+
+ // Try to find a dependency in the dependency configurations (see
+ // database::dependency_configs() for details). Return pointers to the found
+ // package and the configuration it belongs to. Return a pair of NULLs if no
+ // package is found and issue diagnostics and fail if multiple packages (in
+ // multiple configurations) are found.
+ //
+ pair<shared_ptr<selected_package>, database*>
+ find_dependency (database&, const package_name&, bool buildtime);
+
// Check if the directory containing the specified package version should be
// considered its iteration. Return the version of this iteration if that's
// the case and nullopt otherwise.
//
+ // Pass the build2 project info for the package, if available, to speed up
+ // the call and NULL otherwise (in which case it will be queried by the
+ // implementation). In the former case it is assumed that the package info
+ // has been retrieved with the b_info_flags::subprojects flag.
+ //
// Notes:
//
// - The package directory is considered an iteration of the package if this
// upstream version and revision is already present (selected) in the
- // configuration and has a source directory. If that's the case, then the
- // specified directory path and the checksum of the manifest file it
- // contains are compared to the ones of the package present in the
- // configuration. If both match, then the present package version
- // (including its iteration, if any) is returned. Otherwise (the package
- // has moved and/or the packaging information has changed), the present
- // package version with the incremented iteration number is returned. Note
- // that the directory path is matched only for the external selected
- // packages.
+ // configuration and has a source directory. If that's the case and if the
+ // present version is not external (the package is being switched to a
+ // local potentially amended version), then the present package version
+ // with the incremented iteration number is returned. Otherwise (the
+ // present package is external), the specified directory path and the
+ // package checksum (see package_checksum() for details) are compared to
+ // the ones of the package present in the configuration. If both match,
+ // then the present package version (including its iteration, if any) is
+ // returned. Otherwise (the package has moved and/or the package
+ // information has changed), the present package version with the
+ // incremented iteration number is returned.
//
// - Only a single package iteration is valid per version in the
// configuration. This, in particular, means that a package of the
@@ -1079,15 +1469,16 @@ namespace bpkg
// - The manifest file located in the specified directory is not parsed, and
// so is not checked to match the specified package name and version.
//
- class common_options;
-
+ // Note: loads selected packages.
+ //
optional<version>
package_iteration (const common_options&,
- const dir_path& configuration,
+ database&,
transaction&,
const dir_path&,
const package_name&,
const version&,
+ const package_info*,
bool check_external);
// certificate
@@ -1179,25 +1570,22 @@ namespace bpkg
// Return a list of packages that depend on this package along with
// their constraints.
//
+ // @@ Using raw container table since ODB doesn't support containers in
+ // views yet.
+ //
/*
- #pragma db view object(selected_package) \
- container(selected_package::prerequisites = pp inner: pp.key)
+ #pragma db view container(selected_package::prerequisites = pp)
struct package_dependent
{
- #pragma db column(pp.id)
- string name;
+ #pragma db column("pp.package")
+ package_name name;
- #pragma db column(pp.value)
+ #pragma db column("pp.")
optional<version_constraint> constraint;
};
*/
- // @@ Using raw container table since ODB doesn't support containers
- // in views yet.
- //
- #pragma db view object(selected_package) \
- table("main.selected_package_prerequisites" = "pp" inner: \
- "pp.prerequisite = " + selected_package::name)
+ #pragma db view table("main.selected_package_prerequisites" = "pp")
struct package_dependent
{
#pragma db column("pp.package")
@@ -1207,6 +1595,125 @@ namespace bpkg
optional<version_constraint> constraint;
};
+ // In the specified database query dependents of a dependency that resided
+ // in a potentially different database (yeah, it's a mouthful).
+ //
+ odb::result<package_dependent>
+ query_dependents (database& dependent_db,
+ const package_name& dependency,
+ database& dependency_db);
+
+ // As above but cache the result in a vector. This version should be used if
+ // query_dependents*() may be called recursively.
+ //
+ vector<package_dependent>
+ query_dependents_cache (database&, const package_name&, database&);
+
+ // Database and package name pair.
+ //
+ // It is normally used as a key for maps containing data for packages across
+ // multiple linked configurations. Assumes that the respective databases are
+ // not detached during such map lifetimes. Considers both package name and
+ // database for objects comparison.
+ //
+ struct package_key
+ {
+ reference_wrapper<database> db;
+ package_name name;
+
+ package_key (database& d, package_name n): db (d), name (move (n)) {}
+
+ bool
+ operator== (const package_key& v) const
+ {
+ // See operator==(database, database).
+ //
+ return name == v.name && &db.get () == &v.db.get ();
+ }
+
+ bool
+ operator!= (const package_key& v) const
+ {
+ return !(*this == v);
+ }
+
+ bool
+ operator< (const package_key&) const;
+
+ // Return the package string representation in the form:
+ //
+ // <name>[ <config-dir>]
+ //
+ std::string
+ string () const;
+ };
+
+ inline ostream&
+ operator<< (ostream& os, const package_key& p)
+ {
+ return os << p.string ();
+ }
+
+ // Database, package name, and package version.
+ //
+ // It is normally used as a key for maps containing data for package
+ // versions across multiple linked configurations. Assumes that the
+ // respective databases are not detached during such map lifetimes.
+ // Considers all package name, package version, and database for objects
+ // comparison.
+ //
+ // The package name can be a pseudo-package (command line as a dependent,
+ // etc), in which case the version is absent. The version can also be empty,
+ // denoting a package of an unknown version.
+ //
+ struct package_version_key
+ {
+ reference_wrapper<database> db;
+ package_name name;
+ optional<bpkg::version> version;
+
+ package_version_key (database& d, package_name n, bpkg::version v)
+ : db (d), name (move (n)), version (move (v)) {}
+
+ // Create a pseudo-package (command line as a dependent, etc).
+ //
+ package_version_key (database& d, string n)
+ : db (d),
+ name (move (n), package_name::raw_string) {}
+
+ bool
+ operator== (const package_version_key& v) const
+ {
+ // See operator==(database, database).
+ //
+ return name == v.name &&
+ version == v.version &&
+ &db.get () == &v.db.get ();
+ }
+
+ bool
+ operator!= (const package_version_key& v) const
+ {
+ return !(*this == v);
+ }
+
+ bool
+ operator< (const package_version_key&) const;
+
+ // Return the package string representation in the form:
+ //
+ // <name>[/<version>] [ <config-dir>]
+ //
+ std::string
+ string (bool ignore_version = false) const;
+ };
+
+ inline ostream&
+ operator<< (ostream& os, const package_version_key& p)
+ {
+ return os << p.string ();
+ }
+
// Return a count of repositories that contain this repository fragment.
//
#pragma db view table("main.repository_fragments")
@@ -1472,6 +1979,13 @@ namespace bpkg
}
*/
+ inline bool
+ operator< (const available_package_id& x, const available_package_id& y)
+ {
+ int r (x.name.compare (y.name));
+ return r != 0 ? r < 0 : x.version < y.version;
+ }
+
template <typename T1, typename T2>
inline auto
compare_version_gt (const T1& x, const T2& y, bool revision, bool iteration)
diff --git a/bpkg/package.ixx b/bpkg/package.ixx
index a870eb8..9c85407 100644
--- a/bpkg/package.ixx
+++ b/bpkg/package.ixx
@@ -11,4 +11,23 @@ namespace bpkg
version (v)
{
}
+
+ template <typename T>
+ inline bool
+ has_buildfile_clause (const vector<T>& ds)
+ {
+ for (const dependency_alternatives& das: ds)
+ {
+ for (const dependency_alternative& da: das)
+ {
+ // Note: the accept clause cannot be present if the prefer clause is
+ // absent.
+ //
+ if (da.enable || da.reflect || da.prefer || da.require)
+ return true;
+ }
+ }
+
+ return false;
+ }
}
diff --git a/bpkg/package.xml b/bpkg/package.xml
index e54829c..8020ff3 100644
--- a/bpkg/package.xml
+++ b/bpkg/package.xml
@@ -1,15 +1,45 @@
<changelog xmlns="http://www.codesynthesis.com/xmlns/odb/changelog" database="sqlite" version="1">
- <changeset version="8">
- <alter-table name="main.repository">
- <add-column name="local" type="INTEGER" null="true"/>
+ <changeset version="26">
+ <alter-table name="main.available_package_tests">
+ <add-column name="test_enable" type="TEXT" null="true"/>
</alter-table>
</changeset>
- <changeset version="7">
- <alter-table name="main.available_package_dependencies">
+ <changeset version="25">
+ <alter-table name="main.selected_package_prerequisites">
+ <drop-column name="config_dependency_index"/>
+ <drop-column name="config_alternative_index"/>
+ </alter-table>
+ <add-table name="main.selected_package_dependency_alternatives" kind="container">
+ <column name="package" type="TEXT" null="true" options="COLLATE NOCASE"/>
+ <column name="index" type="INTEGER" null="true"/>
+ <column name="position" type="INTEGER" null="true"/>
+ <foreign-key name="package_fk" on-delete="CASCADE">
+ <column name="package"/>
+ <references table="main.selected_package">
+ <column name="name"/>
+ </references>
+ </foreign-key>
+ <index name="selected_package_dependency_alternatives_package_i">
+ <column name="package"/>
+ </index>
+ <index name="selected_package_dependency_alternatives_index_i">
+ <column name="index"/>
+ </index>
+ </add-table>
+ </changeset>
+
+ <changeset version="24">
+ <alter-table name="main.selected_package">
+ <add-column name="config_checksum" type="TEXT" null="true" default="''"/>
+ </alter-table>
+ </changeset>
+
+ <changeset version="23">
+ <alter-table name="main.available_package">
<add-column name="type" type="TEXT" null="true"/>
</alter-table>
- <add-table name="main.available_package_tests" kind="container">
+ <add-table name="main.available_package_languages" kind="container">
<column name="name" type="TEXT" null="true" options="COLLATE NOCASE"/>
<column name="version_epoch" type="INTEGER" null="true"/>
<column name="version_canonical_upstream" type="TEXT" null="true"/>
@@ -17,24 +47,8 @@
<column name="version_revision" type="INTEGER" null="true"/>
<column name="version_iteration" type="INTEGER" null="true"/>
<column name="index" type="INTEGER" null="true"/>
- <column name="test_name" type="TEXT" null="true" options="COLLATE NOCASE"/>
- <column name="test_min_version_epoch" type="INTEGER" null="true"/>
- <column name="test_min_version_canonical_upstream" type="TEXT" null="true"/>
- <column name="test_min_version_canonical_release" type="TEXT" null="true"/>
- <column name="test_min_version_revision" type="INTEGER" null="true"/>
- <column name="test_min_version_iteration" type="INTEGER" null="true"/>
- <column name="test_min_version_upstream" type="TEXT" null="true"/>
- <column name="test_min_version_release" type="TEXT" null="true"/>
- <column name="test_max_version_epoch" type="INTEGER" null="true"/>
- <column name="test_max_version_canonical_upstream" type="TEXT" null="true"/>
- <column name="test_max_version_canonical_release" type="TEXT" null="true"/>
- <column name="test_max_version_revision" type="INTEGER" null="true"/>
- <column name="test_max_version_iteration" type="INTEGER" null="true"/>
- <column name="test_max_version_upstream" type="TEXT" null="true"/>
- <column name="test_max_version_release" type="TEXT" null="true"/>
- <column name="test_min_open" type="INTEGER" null="true"/>
- <column name="test_max_open" type="INTEGER" null="true"/>
- <column name="test_type" type="TEXT" null="true"/>
+ <column name="language_name" type="TEXT" null="true"/>
+ <column name="language_impl" type="INTEGER" null="true"/>
<foreign-key name="object_id_fk" on-delete="CASCADE">
<column name="name"/>
<column name="version_epoch"/>
@@ -51,7 +65,7 @@
<column name="version_iteration"/>
</references>
</foreign-key>
- <index name="available_package_tests_object_id_i">
+ <index name="available_package_languages_object_id_i">
<column name="name"/>
<column name="version_epoch"/>
<column name="version_canonical_upstream"/>
@@ -59,13 +73,239 @@
<column name="version_revision"/>
<column name="version_iteration"/>
</index>
- <index name="available_package_tests_index_i">
+ <index name="available_package_languages_index_i">
+ <column name="index"/>
+ </index>
+ </add-table>
+ </changeset>
+
+ <changeset version="22">
+ <alter-table name="main.available_package">
+ <add-column name="upstream_version" type="TEXT" null="true"/>
+ <add-column name="project" type="TEXT" null="true" options="COLLATE NOCASE"/>
+ </alter-table>
+ </changeset>
+
+ <changeset version="21">
+ <add-table name="main.available_package_distribution_values" kind="container">
+ <column name="name" type="TEXT" null="true" options="COLLATE NOCASE"/>
+ <column name="version_epoch" type="INTEGER" null="true"/>
+ <column name="version_canonical_upstream" type="TEXT" null="true"/>
+ <column name="version_canonical_release" type="TEXT" null="true" options="COLLATE BINARY"/>
+ <column name="version_revision" type="INTEGER" null="true"/>
+ <column name="version_iteration" type="INTEGER" null="true"/>
+ <column name="index" type="INTEGER" null="true"/>
+ <column name="dist_name" type="TEXT" null="true"/>
+ <column name="dist_value" type="TEXT" null="true"/>
+ <foreign-key name="object_id_fk" on-delete="CASCADE">
+ <column name="name"/>
+ <column name="version_epoch"/>
+ <column name="version_canonical_upstream"/>
+ <column name="version_canonical_release"/>
+ <column name="version_revision"/>
+ <column name="version_iteration"/>
+ <references table="main.available_package">
+ <column name="name"/>
+ <column name="version_epoch"/>
+ <column name="version_canonical_upstream"/>
+ <column name="version_canonical_release"/>
+ <column name="version_revision"/>
+ <column name="version_iteration"/>
+ </references>
+ </foreign-key>
+ <index name="available_package_distribution_values_object_id_i">
+ <column name="name"/>
+ <column name="version_epoch"/>
+ <column name="version_canonical_upstream"/>
+ <column name="version_canonical_release"/>
+ <column name="version_revision"/>
+ <column name="version_iteration"/>
+ </index>
+ <index name="available_package_distribution_values_index_i">
<column name="index"/>
</index>
</add-table>
</changeset>
- <model version="6">
+ <changeset version="20">
+ <alter-table name="main.available_package">
+ <add-column name="alt_naming" type="INTEGER" null="true" default="0"/>
+ </alter-table>
+ <add-table name="main.available_package_buildfiles" kind="container">
+ <column name="name" type="TEXT" null="true" options="COLLATE NOCASE"/>
+ <column name="version_epoch" type="INTEGER" null="true"/>
+ <column name="version_canonical_upstream" type="TEXT" null="true"/>
+ <column name="version_canonical_release" type="TEXT" null="true" options="COLLATE BINARY"/>
+ <column name="version_revision" type="INTEGER" null="true"/>
+ <column name="version_iteration" type="INTEGER" null="true"/>
+ <column name="index" type="INTEGER" null="true"/>
+ <column name="path" type="TEXT" null="true"/>
+ <column name="content" type="TEXT" null="true"/>
+ <foreign-key name="object_id_fk" on-delete="CASCADE">
+ <column name="name"/>
+ <column name="version_epoch"/>
+ <column name="version_canonical_upstream"/>
+ <column name="version_canonical_release"/>
+ <column name="version_revision"/>
+ <column name="version_iteration"/>
+ <references table="main.available_package">
+ <column name="name"/>
+ <column name="version_epoch"/>
+ <column name="version_canonical_upstream"/>
+ <column name="version_canonical_release"/>
+ <column name="version_revision"/>
+ <column name="version_iteration"/>
+ </references>
+ </foreign-key>
+ <index name="available_package_buildfiles_object_id_i">
+ <column name="name"/>
+ <column name="version_epoch"/>
+ <column name="version_canonical_upstream"/>
+ <column name="version_canonical_release"/>
+ <column name="version_revision"/>
+ <column name="version_iteration"/>
+ </index>
+ <index name="available_package_buildfiles_index_i">
+ <column name="index"/>
+ </index>
+ </add-table>
+ </changeset>
+
+ <changeset version="19">
+ <alter-table name="main.selected_package_prerequisites">
+ <add-column name="config_dependency_index" type="INTEGER" null="true" default="0"/>
+ <add-column name="config_alternative_index" type="INTEGER" null="true" default="0"/>
+ </alter-table>
+ </changeset>
+
+ <changeset version="18">
+ <add-table name="main.selected_package_config_variables" kind="container">
+ <column name="package" type="TEXT" null="true" options="COLLATE NOCASE"/>
+ <column name="index" type="INTEGER" null="true"/>
+ <column name="name" type="TEXT" null="true"/>
+ <column name="source" type="TEXT" null="true"/>
+ <foreign-key name="package_fk" on-delete="CASCADE">
+ <column name="package"/>
+ <references table="main.selected_package">
+ <column name="name"/>
+ </references>
+ </foreign-key>
+ <index name="selected_package_config_variables_package_i">
+ <column name="package"/>
+ </index>
+ <index name="selected_package_config_variables_index_i">
+ <column name="index"/>
+ </index>
+ </add-table>
+ </changeset>
+
+ <changeset version="17">
+ <alter-table name="main.selected_package">
+ <add-column name="buildfiles_checksum" type="TEXT" null="true"/>
+ </alter-table>
+ </changeset>
+
+ <changeset version="16">
+ <alter-table name="main.available_package_tests">
+ <add-column name="test_reflect" type="TEXT" null="true"/>
+ </alter-table>
+ </changeset>
+
+ <changeset version="15">
+ <alter-table name="main.available_package">
+ <add-column name="bootstrap_build" type="TEXT" null="true" default="''"/>
+ <add-column name="root_build" type="TEXT" null="true"/>
+ </alter-table>
+ </changeset>
+
+ <changeset version="14">
+ <alter-table name="main.available_package_dependencies">
+ <drop-column name="conditional"/>
+ </alter-table>
+ <alter-table name="main.available_package_dependency_alternatives">
+ <add-column name="reflect" type="TEXT" null="true"/>
+ <add-column name="prefer" type="TEXT" null="true"/>
+ <add-column name="accept" type="TEXT" null="true"/>
+ <add-column name="require" type="TEXT" null="true"/>
+ </alter-table>
+ </changeset>
+
+ <changeset version="13">
+ <alter-table name="main.available_package_dependency_alternatives">
+ <add-column name="enable" type="TEXT" null="true"/>
+ <drop-column name="dep_name"/>
+ <drop-column name="dep_min_version_epoch"/>
+ <drop-column name="dep_min_version_canonical_upstream"/>
+ <drop-column name="dep_min_version_canonical_release"/>
+ <drop-column name="dep_min_version_revision"/>
+ <drop-column name="dep_min_version_iteration"/>
+ <drop-column name="dep_min_version_upstream"/>
+ <drop-column name="dep_min_version_release"/>
+ <drop-column name="dep_max_version_epoch"/>
+ <drop-column name="dep_max_version_canonical_upstream"/>
+ <drop-column name="dep_max_version_canonical_release"/>
+ <drop-column name="dep_max_version_revision"/>
+ <drop-column name="dep_max_version_iteration"/>
+ <drop-column name="dep_max_version_upstream"/>
+ <drop-column name="dep_max_version_release"/>
+ <drop-column name="dep_min_open"/>
+ <drop-column name="dep_max_open"/>
+ </alter-table>
+ <add-table name="main.available_package_dependency_alternative_dependencies" kind="container">
+ <column name="name" type="TEXT" null="true" options="COLLATE NOCASE"/>
+ <column name="version_epoch" type="INTEGER" null="true"/>
+ <column name="version_canonical_upstream" type="TEXT" null="true"/>
+ <column name="version_canonical_release" type="TEXT" null="true" options="COLLATE BINARY"/>
+ <column name="version_revision" type="INTEGER" null="true"/>
+ <column name="version_iteration" type="INTEGER" null="true"/>
+ <column name="dependency_index" type="INTEGER" null="true"/>
+ <column name="alternative_index" type="INTEGER" null="true"/>
+ <column name="index" type="INTEGER" null="true"/>
+ <column name="dep_name" type="TEXT" null="true" options="COLLATE NOCASE"/>
+ <column name="dep_min_version_epoch" type="INTEGER" null="true"/>
+ <column name="dep_min_version_canonical_upstream" type="TEXT" null="true"/>
+ <column name="dep_min_version_canonical_release" type="TEXT" null="true"/>
+ <column name="dep_min_version_revision" type="INTEGER" null="true"/>
+ <column name="dep_min_version_iteration" type="INTEGER" null="true"/>
+ <column name="dep_min_version_upstream" type="TEXT" null="true"/>
+ <column name="dep_min_version_release" type="TEXT" null="true"/>
+ <column name="dep_max_version_epoch" type="INTEGER" null="true"/>
+ <column name="dep_max_version_canonical_upstream" type="TEXT" null="true"/>
+ <column name="dep_max_version_canonical_release" type="TEXT" null="true"/>
+ <column name="dep_max_version_revision" type="INTEGER" null="true"/>
+ <column name="dep_max_version_iteration" type="INTEGER" null="true"/>
+ <column name="dep_max_version_upstream" type="TEXT" null="true"/>
+ <column name="dep_max_version_release" type="TEXT" null="true"/>
+ <column name="dep_min_open" type="INTEGER" null="true"/>
+ <column name="dep_max_open" type="INTEGER" null="true"/>
+ <foreign-key name="object_id_fk" on-delete="CASCADE">
+ <column name="name"/>
+ <column name="version_epoch"/>
+ <column name="version_canonical_upstream"/>
+ <column name="version_canonical_release"/>
+ <column name="version_revision"/>
+ <column name="version_iteration"/>
+ <references table="main.available_package">
+ <column name="name"/>
+ <column name="version_epoch"/>
+ <column name="version_canonical_upstream"/>
+ <column name="version_canonical_release"/>
+ <column name="version_revision"/>
+ <column name="version_iteration"/>
+ </references>
+ </foreign-key>
+ <index name="available_package_dependency_alternative_dependencies_object_id_i">
+ <column name="name"/>
+ <column name="version_epoch"/>
+ <column name="version_canonical_upstream"/>
+ <column name="version_canonical_release"/>
+ <column name="version_revision"/>
+ <column name="version_iteration"/>
+ </index>
+ </add-table>
+ </changeset>
+
+ <model version="12">
<table name="main.repository_fragment" kind="object">
<column name="name" type="TEXT" null="true"/>
<column name="url" type="TEXT" null="true"/>
@@ -117,6 +357,7 @@
<column name="url" type="TEXT" null="true"/>
<column name="type" type="TEXT" null="true"/>
<column name="certificate" type="TEXT" null="true"/>
+ <column name="local" type="INTEGER" null="true"/>
<primary-key>
<column name="name"/>
</primary-key>
@@ -215,6 +456,7 @@
<column name="conditional" type="INTEGER" null="true"/>
<column name="buildtime" type="INTEGER" null="true"/>
<column name="comment" type="TEXT" null="true"/>
+ <column name="type" type="TEXT" null="true"/>
<foreign-key name="object_id_fk" on-delete="CASCADE">
<column name="name"/>
<column name="version_epoch"/>
@@ -338,6 +580,7 @@
<column name="max_version_release" type="TEXT" null="true"/>
<column name="min_open" type="INTEGER" null="true"/>
<column name="max_open" type="INTEGER" null="true"/>
+ <column name="configuration" type="TEXT" null="true"/>
<foreign-key name="package_fk" on-delete="CASCADE">
<column name="package"/>
<references table="main.selected_package">
@@ -347,12 +590,6 @@
<index name="selected_package_prerequisites_package_i">
<column name="package"/>
</index>
- <foreign-key name="prerequisite_fk" deferrable="DEFERRED">
- <column name="prerequisite"/>
- <references table="main.selected_package">
- <column name="name"/>
- </references>
- </foreign-key>
</table>
<table name="main.certificate" kind="object">
<column name="id" type="TEXT" null="true"/>
@@ -366,5 +603,80 @@
<column name="id"/>
</primary-key>
</table>
+ <table name="main.available_package_tests" kind="container">
+ <column name="name" type="TEXT" null="true" options="COLLATE NOCASE"/>
+ <column name="version_epoch" type="INTEGER" null="true"/>
+ <column name="version_canonical_upstream" type="TEXT" null="true"/>
+ <column name="version_canonical_release" type="TEXT" null="true" options="COLLATE BINARY"/>
+ <column name="version_revision" type="INTEGER" null="true"/>
+ <column name="version_iteration" type="INTEGER" null="true"/>
+ <column name="index" type="INTEGER" null="true"/>
+ <column name="test_name" type="TEXT" null="true" options="COLLATE NOCASE"/>
+ <column name="test_min_version_epoch" type="INTEGER" null="true"/>
+ <column name="test_min_version_canonical_upstream" type="TEXT" null="true"/>
+ <column name="test_min_version_canonical_release" type="TEXT" null="true"/>
+ <column name="test_min_version_revision" type="INTEGER" null="true"/>
+ <column name="test_min_version_iteration" type="INTEGER" null="true"/>
+ <column name="test_min_version_upstream" type="TEXT" null="true"/>
+ <column name="test_min_version_release" type="TEXT" null="true"/>
+ <column name="test_max_version_epoch" type="INTEGER" null="true"/>
+ <column name="test_max_version_canonical_upstream" type="TEXT" null="true"/>
+ <column name="test_max_version_canonical_release" type="TEXT" null="true"/>
+ <column name="test_max_version_revision" type="INTEGER" null="true"/>
+ <column name="test_max_version_iteration" type="INTEGER" null="true"/>
+ <column name="test_max_version_upstream" type="TEXT" null="true"/>
+ <column name="test_max_version_release" type="TEXT" null="true"/>
+ <column name="test_min_open" type="INTEGER" null="true"/>
+ <column name="test_max_open" type="INTEGER" null="true"/>
+ <column name="test_type" type="TEXT" null="true"/>
+ <column name="test_buildtime" type="INTEGER" null="true" default="0"/>
+ <foreign-key name="object_id_fk" on-delete="CASCADE">
+ <column name="name"/>
+ <column name="version_epoch"/>
+ <column name="version_canonical_upstream"/>
+ <column name="version_canonical_release"/>
+ <column name="version_revision"/>
+ <column name="version_iteration"/>
+ <references table="main.available_package">
+ <column name="name"/>
+ <column name="version_epoch"/>
+ <column name="version_canonical_upstream"/>
+ <column name="version_canonical_release"/>
+ <column name="version_revision"/>
+ <column name="version_iteration"/>
+ </references>
+ </foreign-key>
+ <index name="available_package_tests_object_id_i">
+ <column name="name"/>
+ <column name="version_epoch"/>
+ <column name="version_canonical_upstream"/>
+ <column name="version_canonical_release"/>
+ <column name="version_revision"/>
+ <column name="version_iteration"/>
+ </index>
+ <index name="available_package_tests_index_i">
+ <column name="index"/>
+ </index>
+ </table>
+ <table name="main.configuration" kind="object">
+ <column name="id" type="INTEGER" null="true"/>
+ <column name="uuid" type="TEXT" null="true"/>
+ <column name="name" type="TEXT" null="true"/>
+ <column name="type" type="TEXT" null="true"/>
+ <column name="path" type="TEXT" null="true"/>
+ <column name="explicit" type="INTEGER" null="true"/>
+ <primary-key auto="true">
+ <column name="id"/>
+ </primary-key>
+ <index name="configuration_uuid_i" type="UNIQUE">
+ <column name="uuid"/>
+ </index>
+ <index name="configuration_name_i" type="UNIQUE">
+ <column name="name"/>
+ </index>
+ <index name="configuration_path_i" type="UNIQUE">
+ <column name="path"/>
+ </index>
+ </table>
</model>
</changelog>
diff --git a/bpkg/pkg-bindist.cli b/bpkg/pkg-bindist.cli
new file mode 100644
index 0000000..1401723
--- /dev/null
+++ b/bpkg/pkg-bindist.cli
@@ -0,0 +1,908 @@
+// file : bpkg/pkg-bindist.cli
+// license : MIT; see accompanying LICENSE file
+
+include <map>;
+
+include <bpkg/configuration.cli>;
+
+"\section=1"
+"\name=bpkg-pkg-bindist"
+"\summary=generate binary distribution package"
+
+namespace bpkg
+{
+ {
+ "<options> <dir> <vars> <pkg>",
+
+ "\h|SYNOPSIS|
+
+ \c{\b{bpkg pkg-bindist}|\b{bindist} [\b{--output-root}|\b{-o} <dir>] [<options>] [<vars>] <pkg>...}
+
+ \h|DESCRIPTION|
+
+ The \cb{pkg-bindist} command generates a binary distribution package for
+ the specified package. If additional packages are specified, then they
+ are bundled in the same distribution package. All the specified packages
+ must have been previously configured with \l{bpkg-pkg-build(1)} or
+ \l{bpkg-pkg-configure(1)}. For some system package managers a directory
+ for intermediate files and subdirectories as well as the resulting binary
+ package may have to be specified explicitly with the
+ \c{\b{--output-root}|\b{-o}} option.
+
+ Underneath, this command roughly performs the following steps: First it
+ installs the specified packages similar to the \l{bpkg-pkg-install(1)}
+ command except that it may override the installation locations (via the
+ \cb{config.install.*} variables) to match the distribution's layout. Then
+ it generates any necessary distribution package metadata files based on
+ the information from the package \cb{manifest} files. Finally, it invokes
+ the distribution-specific command to produce the binary package. Unless
+ overridden with the \cb{--architecture} and \cb{--distribution} options,
+ the binary package is generated for the host architecture using the
+ host's standard system package manager. Additional command line variables
+ (<vars>, normally \cb{config.*}) can be passed to the build system during
+ the installation step. See the following distribution-specific
+ description sections below for details and invocation examples:
+
+ \l{#debian DEBIAN DESCRIPTION}
+
+ \l{#fedora FEDORA DESCRIPTION}
+
+ \l{#archive ARCHIVE DESCRIPTION}
+
+ The specified packages may have dependencies and the default behavior is
+ to not bundle them but rather to specify them as dependencies in the
+ corresponding distribution package metadata, if applicable. This default
+ behavior can be overridden with the \cb{--recursive} option (see the
+ option description for the available modes). Note, however, that
+ dependencies that are satisfied by system packages are always specified
+ as dependencies in the distribution package metadata (if applicable).
+ "
+ }
+
+ // Place distribution-specific options into separate classes in case one day
+ // we want to only pass their own options to each implementation.
+ //
+ class pkg_bindist_common_options: configuration_options
+ {
+ "\h|PKG-BINDIST OPTIONS|
+
+ See the following sections below for distribution-specific options:
+
+ \l{#debian-options PKG-BINDIST DEBIAN OPTIONS}
+
+ \l{#fedora-options PKG-BINDIST FEDORA OPTIONS}
+
+ \l{#archive-options PKG-BINDIST ARCHIVE OPTIONS}
+ "
+
+ string --distribution
+ {
+ "<name>",
+ "Alternative system/distribution package manager to generate the binary
+ package for. The valid <name> values are \cb{debian} (Debian and alike,
+ such as Ubuntu, etc), \cb{fedora} (Fedora and alike, such as RHEL,
+ CentOS, etc), and \cb{archive} (installation archive on any operating
+ system). Note that some package managers may only be supported when
+ running on certain host operating systems."
+ }
+
+ string --architecture
+ {
+ "<name>",
+ "Alternative architecture to generate the binary package for. The
+ valid <name> values are system/distribution package manager-specific.
+ If unspecified, the host architecture is used."
+ }
+
+ string --recursive = "none"
+ {
+ "<mode>",
+ "Bundle or generate dependencies of the specified packages. The <mode>
+ value can be either \cb{auto}, in which case only the required files
+ from each dependency package are bundled, \cb{full}, in which case
+ all the files are bundled, or \cb{separate}, in which case a separate
+ binary package is generated for each non-system dependency. It can
+ also be \cb{none} which is equivalent to not specifying this option
+ (primarily useful for overriding a previously-specified value).
+
+ Specifically, in the \cb{auto} mode any required files, such as shared
+ libraries, are pulled implicitly by the \cb{install} build system
+ operation, for example, as part of installing an executable from one of
+ the specified packages. In contrast, in the \cb{full} mode, each
+ dependency package is installed explicitly and completely, as if they
+ were specified as additional package on the command line. The
+ \cb{separate} mode is equivalent to invoking the \cb{pkg-bindist}
+ command on each dependency package. See also the \cb{--private} option."
+ }
+
+ bool --private
+ {
+ "Enable the private installation subdirectory functionality using the
+ package name as the private subdirectory. This is primarily useful when
+ bundling dependencies, such as shared libraries, of an executable that
+ is being installed into a shared location, such as \cb{/usr/}. See the
+ \cb{config.install.private} configuration variable documentation in the
+ build system manual for details. This option only makes sense together
+ with the \cb{--recursive} option \cb{auto} and \cb{full} modes."
+ }
+
+ dir_path --output-root|-o
+ {
+ "<dir>",
+ "Directory for intermediate files and subdirectories as well as the
+ resulting binary package. Note that this option may be required for
+ some system package managers and may not be specified for others."
+ }
+
+ bool --wipe-output
+ {
+ "Wipe the output root directory (either specified with \ci{--output-root}
+ or system package manager-specific) clean before using it to generate
+ the binary package."
+ }
+
+ bool --keep-output
+ {
+ "Keep intermediate files in the output root directory (either specified
+ with \ci{--output-root} or system package manager-specific) that were
+ used to generate the binary package. This is primarily useful for
+ troubleshooting."
+ }
+
+ bool --allow-dependent-config
+ {
+ "Allow configuration that is imposed by dependent packages. Normally
+ this is undesirable because the resulting binary packages become
+ configured specificaly for particular dependent packages."
+ }
+
+ string --os-release-id
+ {
+ "<v>",
+ "Override the \cb{ID} component in \cb{os-release(5)} or equivalent.
+ Note that unlike the rest of the \cb{--os-release-*} options, this
+ option suppresses automatic detection of the host operating system
+ information."
+ }
+
+ string --os-release-version-id
+ {
+ "<v>",
+ "Override the \cb{VERSION_ID} component in \cb{os-release(5)} or
+ equivalent."
+ }
+
+ string --os-release-name
+ {
+ "<v>",
+ "Override the \cb{NAME} component in \cb{os-release(5)} or equivalent."
+ }
+ };
+
+ class pkg_bindist_debian_options
+ {
+ "\h#debian|DEBIAN DESCRIPTION|
+
+ The Debian binary packages are generated by producing the standard
+ \cb{debian/control}, \cb{debian/rules}, and other package metadata files
+ and then invoking \cb{dpkg-buildpackage(1)} to build the binary package
+ from that. In particular, the \cb{debian/rules} implemenation is based on
+ the \cb{dh(1)} command sequencer. While this approach is normally used to
+ build packages from source, this implementation \"pretends\" that this is
+ what's happening by overriding a number of \cb{dh} targets to invoke the
+ \cb{build2} build system on the required packages directly in their
+ \cb{bpkg} configuration locations.
+
+ The \cb{dpkg-dev} (or \cb{build-essential}) and \cb{debhelper} Debian
+ packages must be installed before invocation. Typical invocation:
+
+ \
+ bpkg build libhello
+ bpkg test libhello
+ bpkg bindist -o /tmp/output/ libhello
+ \
+
+ Unless the \cb{--recursive} option \cb{auto} or \cb{full} modes are
+ specified, dependencies of the specified package are translated to
+ dependencies in the resulting binary package using names and versions
+ that refer to packages that would be generated by the \cb{pkg-bindist}
+ command (called \"non-native\" packages). If instead you would like
+ certain dependencies to refer to binary packages provided by the
+ distribution (called \"native\" packages), then you need to arrange for
+ them to be built as system (see \l{bpkg-pkg-build(1)} for details). For
+ example, if our \cb{libhello} has a dependency on \cb{libsqlite3} and we
+ would like the binary package for \cb{libhello} to refer to
+ \cb{libsqlite3} from Debian (or alike), then the \cb{pkg-build} command
+ would need to be (\cb{--sys-install} is optional):
+
+ \
+ bpkg build --sys-install libhello ?sys:libsqlite3
+ \
+
+ Such a package with native dependencies can then be installed (including
+ any missing native dependencies) using the \cb{apt} or \cb{apt-get}
+ \cb{install} command. Note that the specified \cb{.deb} file must include
+ a directory separator (\c{/}) in order to be recognized as a file rather
+ than a package name. For example:
+
+ \
+ sudo apt-get install ./libhello_1.2.3-0~debian11_amd64.deb \
+ ./libhello-dev_1.2.3-0~debian11_amd64.deb
+ \
+
+ See \l{bpkg#bindist-mapping-debian-produce Debian Package Mapping for
+ Production} for details on \cb{bpkg} to Debian package name and version
+ mapping.
+ "
+
+ "\h#debian-options|PKG-BINDIST DEBIAN OPTIONS|"
+
+ bool --debian-prepare-only
+ {
+ "Prepare all the package metadata files (\cb{control}, \cb{rules}, etc)
+ but do not invoke \cb{dpkg-buildpackage} to generate the binary
+ package, printing its command line instead unless requested to be
+ quiet. Implies \cb{--keep-output}."
+ }
+
+ string --debian-buildflags = "assign"
+ {
+ "<mode>",
+ "Package build flags (\cb{dpkg-buildflags}) usage mode. Valid <mode>
+ values are \cb{assign} (use the build flags instead of configured),
+ \cb{append} (use the build flags in addition to configured, putting
+ them last), \cb{prepend} (use the build flags in addition to
+ configured, putting them first), and \cb{ignore} (ignore build
+ flags). The default mode is \cb{assign}. Note that compiler mode
+ options, if any, are used as configured."
+ }
+
+ strings --debian-maint-option
+ {
+ "<o>",
+ "Alternative options to specify in the \cb{DEB_BUILD_MAINT_OPTIONS}
+ variable of the \cb{rules} file. To specify multiple maintainer options
+ repeat this option and/or specify them as a single value separated
+ with spaces."
+ }
+
+ strings --debian-build-option
+ {
+ "<o>",
+ "Additional option to pass to the \cb{dpkg-buildpackage} program. Repeat
+ this option to specify multiple build options."
+ }
+
+ string --debian-build-meta
+ {
+ "<data>",
+ "Alternative or additional build metadata to include in the binary
+ package version. If the specified value starts/ends with \cb{+} then
+ the value (with \cb{+} removed) is added after/before the default
+ metadata. Otherwise it is used as is instead of the default metadata.
+ If empty value is specified, then no build metadata is included. By
+ default, the build metadata is the \cb{ID} and \cb{VERSION_ID}
+ components from \cb{os-release(5)}, for example, \cb{debian10} in
+ version \cb{1.2.3-0~debian10}. See also \cb{--os-release-*}."
+ }
+
+ string --debian-section
+ {
+ "<v>",
+ "Alternative \cb{Section} \cb{control} file field value for the main
+ binary package. The default is either \cb{libs} or \cb{devel},
+ depending on the package type."
+ }
+
+ string --debian-priority
+ {
+ "<v>",
+ "Alternative \cb{Priority} \cb{control} file field value. The default
+ is \cb{optional}."
+ }
+
+ string --debian-maintainer
+ {
+ "<v>",
+ "Alternative \cb{Maintainer} \cb{control} file field value. The
+ default is the \cb{package-email} value from package \cb{manifest}."
+ }
+
+ string --debian-architecture
+ {
+ "<v>",
+ "Alternative \cb{Architecture} \cb{control} file field value for
+ the main binary package, normally \cb{all} (architecture-independent).
+ The default is \cb{any} (architecture-dependent)."
+ }
+
+ string --debian-main-langdep
+ {
+ "<v>",
+ "Override the language runtime dependencies (such as \cb{libc6},
+ \cb{libstdc++6}, etc) in the \cb{Depends} \cb{control} file field
+ value of the main binary package."
+ }
+
+ string --debian-dev-langdep
+ {
+ "<v>",
+ "Override the language runtime dependencies (such as \cb{libc-dev},
+ \cb{libstdc++-dev}, etc) in the \cb{Depends} \cb{control} file field
+ value of the development (\cb{-dev}) binary package."
+ }
+
+ string --debian-main-extradep
+ {
+ "<v>",
+ "Extra dependencies to add to the \cb{Depends} \cb{control} file field
+ value of the main binary package."
+ }
+
+ string --debian-dev-extradep
+ {
+ "<v>",
+ "Extra dependencies to add to the \cb{Depends} \cb{control} file field
+ value of the development (\cb{-dev}) binary package."
+ }
+ };
+
+ class pkg_bindist_fedora_options
+ {
+ "\h#fedora|FEDORA DESCRIPTION|
+
+ The Fedora binary packages are generated by producing the standard RPM
+ spec file and then invoking \cb{rpmbuild(8)} to build the binary package
+ from that. While this approach is normally used to build packages from
+ source, this implementation \"pretends\" that this is what's happening by
+ overriding a number of RPM spec file sections to invoke the \cb{build2}
+ build system on the required packages directly in their \cb{bpkg}
+ configuration locations.
+
+ The \cb{rpmdevtools} Fedora package must be installed before invocation.
+ Typical invocation:
+
+ \
+ bpkg build libhello
+ bpkg test libhello
+ bpkg bindist libhello
+ \
+
+ The resulting binary packages are placed into the standard \cb{rpmbuild}
+ output directory (normally \c{\b{~/rpmbuild/RPMS/}\i{arch}\b{/}}).
+
+ Unless the \cb{--recursive} option \cb{auto} or \cb{full} modes are
+ specified, dependencies of the specified package are translated to
+ dependencies in the resulting binary package using names and versions
+ that refer to packages that would be generated by the \cb{pkg-bindist}
+ command (called \"non-native\" packages). If instead you would like
+ certain dependencies to refer to binary packages provided by the
+ distribution (called \"native\" packages), then you need to arrange for
+ them to be built as system (see \l{bpkg-pkg-build(1)} for details). For
+ example, if our \cb{libhello} has a dependency on \cb{libsqlite3} and we
+ would like the binary package for \cb{libhello} to refer to
+ \cb{sqlite-libs} from Fedora (or alike), then the \cb{pkg-build} command
+ would need to be (\cb{--sys-install} is optional):
+
+ \
+ bpkg build --sys-install libhello ?sys:libsqlite3
+ \
+
+ Such a package with native dependencies can then be installed (including
+ any missing native dependencies) using the \cb{dnf install} command.
+ For example:
+
+ \
+ sudo dnf install libhello-1.2.3-1.fc35.x86_64.rpm \
+ libhello-devel-1.2.3-1.fc35.x86_64.rpm
+ \
+
+ See \l{bpkg#bindist-mapping-fedora-produce Fedora Package Mapping for
+ Production} for details on \cb{bpkg} to Fedora package name and version
+ mapping.
+ "
+
+ "\h#fedora-options|PKG-BINDIST FEDORA OPTIONS|"
+
+ bool --fedora-prepare-only
+ {
+ "Prepare the RPM spec file but do not invoke \cb{rpmbuild} to generate
+ the binary package, printing its command line instead unless requested
+ to be quiet."
+ }
+
+ string --fedora-buildflags = "assign"
+ {
+ "<mode>",
+ "Package build flags (\cb{%{build_*flags\}} macros) usage mode. Valid
+ <mode> values are \cb{assign} (use the build flags instead of
+ configured), \cb{append} (use the build flags in addition to
+ configured, putting them last), \cb{prepend} (use the build flags in
+ addition to configured, putting them first), and \cb{ignore} (ignore
+ build flags). The default mode is \cb{assign}. Note that compiler mode
+ options, if any, are used as configured."
+ }
+
+ strings --fedora-build-option
+ {
+ "<o>",
+ "Additional option to pass to the \cb{rpmbuild} program. If specified,
+ these options must be consistent with the query options
+ (\cb{--fedora-query-option}) to result in identical macro
+ expansions. Repeat this option to specify multiple build options."
+ }
+
+ strings --fedora-query-option
+ {
+ "<o>",
+ "Additional option to pass to the \cb{rpm} program. This program is used
+ to query RPM macro values which affect the binary package. If
+ specified, these options must be consistent with the build options
+ (\cb{--fedora-build-option}) to result in identical macro expansions.
+ Repeat this option to specify multiple query options."
+ }
+
+ string --fedora-dist-tag
+ {
+ "<tag>",
+ "Alternative or additional distribution tag to use in the binary package
+ release. If the specified value starts/ends with \cb{+} then the value
+ (with \cb{+} removed) is added after/before the default distribution
+ tag. Otherwise it is used as is instead of the default tag. If empty
+ value is specified, then no distribution tag is included. The default
+ is a value that identifies the distribution being used to build the
+ package, for example, \cb{fc35} for Fedora 35 or \cb{el8} for RHEL 8."
+ }
+
+ string --fedora-packager
+ {
+ "<v>",
+ "Alternative \cb{Packager} RPM spec file directive value. The default is
+ the \cb{package-email} value from package \cb{manifest}. If empty value
+ is specified, then the \cb{Packager} directive is omitted from the spec
+ file."
+ }
+
+ string --fedora-build-arch
+ {
+ "<v>",
+ "\cb{BuildArch} RPM spec file directive value for the main binary
+ package, normally \cb{noarch} (architecture-independent). By default
+ the directive is omitted, assuming that the package is
+ architecture-dependent."
+ }
+
+ strings --fedora-main-langreq
+ {
+ "<v>",
+ "Override the language runtime dependencies (such as \cb{glibc},
+ \cb{libstdc++}, etc) of the main binary package by replacing the
+ corresponding \cb{Requires} RPM spec file directives. If empty value is
+ specified then no language runtime dependencies are specified. Repeat
+ this option to specify multiple language runtime dependencies."
+ }
+
+ strings --fedora-devel-langreq
+ {
+ "<v>",
+ "Override the language runtime dependencies (such as \cb{glibc-devel},
+ \cb{libstdc++-devel}, etc) of the development (\cb{-devel}) binary
+ package by replacing the corresponding \cb{Requires} RPM spec file
+ directives. If empty value is specified then no language runtime
+ dependencies are specified. Repeat this option to specify multiple
+ language runtime dependencies."
+ }
+
+ strings --fedora-stat-langreq
+ {
+ "<v>",
+ "Override the language runtime dependencies (such as \cb{glibc-static},
+ \cb{libstdc++-static}, etc) of the static libraries (\cb{-static})
+ binary package by replacing the corresponding \cb{Requires} RPM spec
+ file directives. If empty value is specified then no language runtime
+ dependencies are specified. Repeat this option to specify multiple
+ language runtime dependencies."
+ }
+
+ strings --fedora-main-extrareq
+ {
+ "<v>",
+ "Extra dependency to add to the main binary package as an additional
+ \cb{Requires} RPM spec file directive. Repeat this option to specify
+ multiple extra dependencies."
+ }
+
+ strings --fedora-devel-extrareq
+ {
+ "<v>",
+ "Extra dependency to add to the development (\cb{-devel}) binary package
+ as an additional \cb{Requires} RPM spec file directive. Repeat this
+ option to specify multiple extra dependencies."
+ }
+
+ strings --fedora-stat-extrareq
+ {
+ "<v>",
+ "Extra dependency to add to the static libraries (\cb{-static}) binary
+ package as an additional \cb{Requires} RPM spec file directive. Repeat
+ this option to specify multiple extra dependencies."
+ }
+ };
+
+ class pkg_bindist_archive_options
+ {
+ "\h#archive|ARCHIVE DESCRIPTION|
+
+ The installation archive binary packages are generated by invoking the
+ \cb{build2} build system on the required packages directly in their
+ \cb{bpkg} configuration locations and installing them into the binary
+ package directory using the \cb{config.install.chroot} mechanism. Then
+ this directory is packaged with \cb{tar} or \cb{zip} to produce one or
+ more binary package archives.
+
+ The generation of installation archive packages is never the default and
+ should be requested explicitly with the \cb{--distribution=archive}
+ option. The installation directory layout and the package archives to
+ generate can be specified with the \cb{--archive-install-*} and
+ \cb{--archive-type} options (refer to their documentation for defaults).
+
+ The binary package directory (the top-level directory inside the
+ archive) as well as the archive file base (the file name without
+ the extension) are the same and have the following form:
+
+ \c{\i{package}-\i{version}-\i{build_metadata}}
+
+ Where \ci{package} is the package name and \ci{version} is the \cb{bpkg}
+ package version. Unless overridden with the \cb{--archive-build-meta}
+ option, \ci{build_metadata} has the following form:
+
+ \c{\i{cpu}-\i{os}[-\i{langrt}...]}
+
+ Where \ci{cpu} is the target CPU (for example, \cb{x86_64} or
+ \cb{aarch64}; omitted if \cb{--archive-no-cpu} is specified), \ci{os} is
+ the \cb{ID} and \cb{VERSION_ID} components from \cb{os-release(5)} (or
+ equivalent, for example, \cb{debian11} or \cb{windows10}; omitted if
+ \cb{--archive-no-os} is specified), and \ci{langrt} are the language
+ runtimes as mapped by the \cb{--archive-lang*} options (for example,
+ \cb{gcc12} or \cb{msvc17.4}).
+
+ For example, given the following invocation on Debian 11 running on
+ \cb{x86_64}:
+
+ \
+ bpkg build libhello
+ bpkg test libhello
+ bpkg bindist \
+ -o /tmp/output/ \
+ --distribution=archive \
+ --archive-lang cc=gcc12 \
+ libhello
+ \
+
+ We will end up with the package archive in the following form:
+
+ \
+ libhello-1.2.3-x86_64-debian11-gcc12.tar.xz
+ \
+
+ The recommended language runtime id format is the runtime name followed
+ by the version, for example, \cb{gcc12} or \cb{msvc17.4}. Note that its
+ purpose is not to provide a precise specification of requirements but
+ rather to help the user of a binary package to pick the appropriate
+ variant. Refer to the \cb{--archive-lang*} options documentation for
+ details on the mapping semantics.
+
+ Instead of mapping languages individually you can specify entire build
+ metadata as a single value with the \cb{--archive-build-meta} (it is also
+ possible to add additional metadata; see the option documentation for
+ details). For example:
+
+ \
+ bpkg bindist \
+ -o /tmp/output/ \
+ --distribution=archive \
+ --archive-build-meta=x86_64-linux-glibc
+ libhello
+ \
+
+ This will produce the package archive in the following form:
+
+ \
+ libhello-1.2.3-x86_64-linux-glibc.tar.xz
+ \
+
+ To install the binary package from archive simply unpack it using
+ \cb{tar} or \cb{zip}. You can use the \cb{--strip-components} \cb{tar}
+ option to remove the top-level package directory (the same can be
+ achieved for \cb{zip} archives by using \cb{bsdtar} on Windows). For
+ example, to unpack the package contents so that they end up in
+ \cb{/usr/local/}:
+
+ \
+ sudo tar -xf libhello-1.2.3-x86_64-debian11-gcc12.tar.xz \
+ -C / --strip-components=1
+ \
+
+ If you expect the binary package to be unpacked into a directory other
+ than its original installation directory (\cb{--archive-install-root}),
+ then it's recommended to make it relocatable by specifying the
+ \cb{config.install.relocatable=true} configuration variable. For example:
+
+ \
+ bpkg bindist \
+ ... \
+ config.install.relocatable=true \
+ libhello
+ \
+
+ Note that not all source packages support relocatable installation (see
+ \l{b#install-reloc Rolocatable Installation} for details).
+
+ Another mechanism that can useful when generating archive packages is the
+ ability to filter the files being installed. This, for example, can be
+ used to create binary packages that don't contain any development-related
+ files. See \l{b#install-filter Installation Filtering} for details. See
+ also the \cb{--archive-split} option.
+
+ The installation archive package can be generated for a target other than
+ the host by specifying the target triplet with the \cb{--architecture}
+ option. In this case the \cb{bpkg} configuration is assumed to be
+ appropriately configured for cross-compiling to the specified target. You
+ will also need to explicitly specify the \cb{--archive-install-root}
+ option (or \cb{--archive-install-config}) as well as the
+ \cb{--os-release-id} option (and likely want to specify other
+ \cb{--os-release-*} options). For example, for cross-compiling from Linux
+ to Windows using the MinGW GCC toolchain:
+
+ \
+ bpkg bindist \
+ --distribution=archive \
+ --architecture=x86_64-w64-mingw32 \
+ --os-release-id=windows \
+ --os-release-name=Windows \
+ --os-release-version-id=10 \
+ --archive-install-root / \
+ --archive-lang cc=mingw_w64_gcc12 \
+ ...
+ \
+ "
+
+ "\h#archive-options|PKG-BINDIST ARCHIVE OPTIONS|"
+
+ bool --archive-prepare-only
+ {
+ "Prepare all the package contents but do not create the binary package
+ archive, printing its directory instead unless requested to be quiet.
+ Implies \cb{--keep-output}."
+ }
+
+ strings --archive-type
+ {
+ "<ext>",
+ "Archive type to create specified as a file extension, for example,
+ \cb{tar.xz}, \cb{tar.gz}, \cb{tar}, \cb{zip}. Repeat this option to
+ generate multiple archive types. If unspecified, then a default type
+ appropriate for the target operating system is used, currently \cb{zip}
+ for Windows and \cb{tar.xz} for POSIX. Note, however, that these
+ defaults may change in the future."
+ }
+
+ std::multimap<string, string> --archive-lang
+ {
+ "<ln>=<rt>",
+ "Map interface language name <ln> to runtime id <rt>. If no mapping is
+ found for an interface language in this map, then fallback to the
+ \cb{--archive-lang-impl} map. If still no mapping is found, then
+ fail. If the information about an interface language is unimportant and
+ should be ignored, then empty runtime id can be specified. Note that
+ the mapping specified with this option is only considered if the
+ package type is a library (for other package types all languages used
+ are implementation). Note also that multiple runtime ids specified for
+ the same language are combined except for an empty id, which is treated
+ as a request to clear previous entries."
+ }
+
+ std::multimap<string, string> --archive-lang-impl
+ {
+ "<ln>=<rt>",
+ "Map implementation language name <ln> to runtime id <rt>. If no mapping
+ is found for an implementation language in this map, then assume
+ the information about this implementation language is unimportant
+ and ignore it (examples of such cases include static linking as well
+ as a language runtime that is always present). See \cb{--archive-lang}
+ for background."
+ }
+
+ bool --archive-no-cpu
+ {
+ "Assume the package is CPU architecture-independent and omit it from
+ the binary package directory name and archive file base."
+ }
+
+ bool --archive-no-os
+ {
+ "Assume the package is operating system-independent and omit it from
+ the binary package directory name and archive file base."
+ }
+
+ string --archive-build-meta
+ {
+ "<data>",
+ "Alternative or additional build metadata to include after the version
+ in the binary package directory and file names. If the specified value
+ starts/ends with \cb{+} then the value (with \cb{+} removed) is added
+ after/before the default metadata. Otherwise it is used as is instead
+ of the default metadata. If empty value is specified, then no build
+ metadata is included."
+ }
+
+ dir_path --archive-install-root
+ {
+ "<d>",
+ "Alternative installation root directory. The default is \cb{/usr/local/}
+ on POSIX and \c{\b{C:\\}\i{project}\b{\\}} on Windows, where
+ \ci{project} is the \l{bpkg#manifest-package-project \cb{project}}
+ package manifest value."
+ }
+
+ bool --archive-install-config
+ {
+ "Use the installation directory layout (\cb{config.install.*} variables)
+ as configured instead of overriding them with defaults appropriate for
+ the target operating system. Note that this includes
+ \cb{config.install.private} and \cb{config.bin.rpath} if needed for a
+ private installation. Note also that the \cb{config.install.root} value
+ is still overridden with the \cb{--archive-install-root} option value
+ if specified."
+ }
+
+ std::map<string, string> --archive-split
+ {
+ "<key>=<filt>",
+ "Split the installation into multiple binary packages. Specifically,
+ for each <key>=<filt> pair, perform the \cb{install} operation with
+ \c{\b{config.install.filter=}\i{filt}} and package the resulting files
+ as \ci{package-key-version-build_metadata} omitting the \ci{-key} part
+ if <key> is empty. Note that wildcard patterns in <filt> must be
+ quoted. See \l{b#install-filter Installation Filtering} for background."
+ }
+ };
+
+ "
+ \h|STRUCTURED RESULT|
+
+ Instead of printing to \cb{stderr} the list of generated binary packages in
+ a format more suitable for human consumption, the \cb{pkg-bindist} command
+ can be instructed to write it to \cb{stdout} in a machine-readable form by
+ specifying the \cb{--structured-result} option. Currently, the only
+ recognized format value for this option is \cb{json} with the output being
+ a JSON object that is a serialized representation of the following C++
+ struct \cb{bindist_result}:
+
+ \
+ struct os_release
+ {
+ string name_id; // ID
+ vector<string> like_ids; // ID_LIKE
+ optional<string> version_id; // VERSION_ID
+ optional<string> variant_id; // VARIANT_ID
+
+ optional<string> name; // NAME
+ optional<string> version_codename; // VERSION_CODENAME
+ optional<string> variant; // VARIANT
+ };
+
+ struct file
+ {
+ string type;
+ string path;
+ optional<string> system_name;
+ };
+
+ struct package
+ {
+ string name;
+ string version;
+ optional<string> system_version;
+ vector<file> files;
+ };
+
+ struct bindist_result
+ {
+ string distribution; // --distribution or auto-detected
+ string architecture; // --architecture or auto-detected
+ os_release os_release; // --os-release-* or auto-detected
+ optional<string> recursive; // --recursive
+ bool private; // --private
+ bool dependent_config; // See --allow-dependent-config
+
+ package package;
+ vector<package> dependencies; // Only in --recursive=separate
+ };
+ \
+
+ For example:
+
+ \
+ {
+ \"distribution\": \"debian\",
+ \"architecture\": \"amd64\",
+ \"os_release\": {
+ \"name_id\": \"debian\",
+ \"version_id\": \"11\",
+ \"name\": \"Debian GNU/Linux\"
+ },
+ \"package\": {
+ \"name\": \"libfoo\",
+ \"version\": \"2.5.0-b.23\",
+ \"system_version\": \"2.5.0~b.23-0~debian11\",
+ \"files\": [
+ {
+ \"type\": \"main.deb\",
+ \"path\": \"/tmp/libfoo_2.5.0~b.23-0~debian11_amd64.deb\",
+ \"system_name\": \"libfoo\"
+ },
+ {
+ \"type\": \"dev.deb\",
+ \"path\": \"/tmp/libfoo-dev_2.5.0~b.23-0~debian11_amd64.deb\",
+ \"system_name\": \"libfoo-dev\"
+ },
+ ...
+ ]
+ }
+ }
+ \
+
+ See the JSON OUTPUT section in \l{bpkg-common-options(1)} for details on
+ the overall properties of this format and the semantics of the \cb{struct}
+ serialization.
+
+ The \cb{file::type} member is a distribution-specific value that classifies
+ the file. For the \cb{debian} distribution the possible values are
+ \cb{main.deb}, \cb{dev.deb}, \cb{doc.deb}, \cb{common.deb},
+ \cb{dbgsym.deb}, \cb{changes} (\cb{.changes} file), and \cb{buildid}
+ (\cb{.buildid} file); see \l{bpkg#bindist-mapping-debian-produce Debian
+ Package Mapping for Production} for background. For the \cb{fedora}
+ distribution the possible values are \cb{main.rpm}, \cb{devel.rpm},
+ \cb{static.rpm}, \cb{doc.rpm}, \cb{common.rpm}, and \cb{debuginfo.rpm}; see
+ \l{bpkg#bindist-mapping-fedora-produce Fedora Package Mapping for
+ Production} for background. For the \cb{archive} distribution this is the
+ archive type (\cb{--archive-type}), for example, \cb{tar.xz} or \cb{zip},
+ potentially prefixed with \ci{key} if the \cb{--archive-split}
+ functionality is used, for example, \cb{dev.tar.xz}.
+
+ The \cb{package::system_version} and/or \cb{file::system_name} members are
+ absent if not applicable to the distribution. The \cb{file::system_name}
+ member is also absent if the file is not a binary package (for example,
+ \cb{.changes} and \cb{.buildid} files in the \cb{debian} distribution).
+ "
+
+ // NOTE: remember to add the corresponding `--class-doc ...=exclude-base`
+ // (both in bpkg/ and doc/) if adding a new base class.
+ //
+ class pkg_bindist_options: pkg_bindist_common_options,
+ pkg_bindist_debian_options,
+ pkg_bindist_fedora_options,
+ pkg_bindist_archive_options {};
+
+ "
+ \h|DEFAULT OPTIONS FILES|
+
+ See \l{bpkg-default-options-files(1)} for an overview of the default
+ options files. For the \cb{pkg-bindist} command the search start
+ directory is the configuration directory. The following options files are
+ searched for in each directory and, if found, loaded in the order listed:
+
+ \
+ bpkg.options
+ bpkg-pkg-bindist.options
+ \
+
+ The following \cb{pkg-bindist} command options cannot be specified in the
+ default options files:
+
+ \
+ --directory|-d
+ \
+ "
+}
diff --git a/bpkg/pkg-bindist.cxx b/bpkg/pkg-bindist.cxx
new file mode 100644
index 0000000..4639746
--- /dev/null
+++ b/bpkg/pkg-bindist.cxx
@@ -0,0 +1,689 @@
+// file : bpkg/pkg-bindist.cxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#include <bpkg/pkg-bindist.hxx>
+
+#include <list>
+#include <iostream> // cout
+
+#include <libbutl/json/serializer.hxx>
+
+#include <bpkg/package.hxx>
+#include <bpkg/package-odb.hxx>
+#include <bpkg/package-query.hxx>
+#include <bpkg/database.hxx>
+#include <bpkg/pkg-verify.hxx>
+#include <bpkg/diagnostics.hxx>
+#include <bpkg/system-package-manager.hxx>
+
+using namespace std;
+using namespace butl;
+
+namespace bpkg
+{
+ using package = system_package_manager::package;
+ using packages = system_package_manager::packages;
+
+ // Find the available package(s) for the specified selected package.
+ //
+ // Specifically, for non-system packages we look for a single available
+ // package. For system packages we look for all the available packages
+ // analogous to pkg-build. If none are found then we assume the
+ // --sys-no-stub option was used to configure this package and return an
+ // empty list. @@ What if it was configured with a specific bpkg version or
+ // `*`?
+ //
+ static available_packages
+ find_available_packages (const common_options& co,
+ database& db,
+ const shared_ptr<selected_package>& p)
+ {
+ assert (p->state == package_state::configured);
+
+ available_packages r;
+ if (p->substate == package_substate::system)
+ {
+ r = find_available_all (repo_configs, p->name);
+ }
+ else
+ {
+ pair<shared_ptr<available_package>,
+ lazy_shared_ptr<repository_fragment>> ap (
+ find_available_fragment (co, db, p));
+
+ if (ap.second.loaded () && ap.second == nullptr)
+ {
+ // This is an orphan. We used to fail but there is no reason we cannot
+ // just load its manifest and make an available package out of that.
+ // And it's handy to be able to run this command on packages built
+ // from archives.
+ //
+ package_manifest m (
+ pkg_verify (co,
+ p->effective_src_root (db.config_orig),
+ true /* ignore_unknown */,
+ false /* ignore_toolchain */,
+ false /* load_buildfiles */,
+ // Copy potentially fixed up version from selected package.
+ [&p] (version& v) {v = p->version;}));
+
+ // Fake the buildfile information (not used).
+ //
+ m.alt_naming = false;
+ m.bootstrap_build = "project = " + p->name.string () + '\n';
+
+ ap.first = make_shared<available_package> (move (m));
+
+ // Fake the location (only used for diagnostics).
+ //
+ ap.second = make_shared<repository_fragment> (
+ repository_location (
+ p->effective_src_root (db.config).representation (),
+ repository_type::dir));
+
+ ap.first->locations.push_back (
+ package_location {ap.second, current_dir});
+ }
+
+ r.push_back (move (ap));
+ }
+
+ return r;
+ }
+
+ // Merge dependency languages for the (ultimate) dependent of the specified
+ // type.
+ //
+ static void
+ merge_languages (const string& type,
+ small_vector<language, 1>& langs,
+ const available_package& ap)
+ {
+ for (const language& l: ap.effective_languages ())
+ {
+ // Unless both the dependent and dependency types are libraries, the
+ // interface/implementation distinction does not apply.
+ //
+ bool lib (type == "lib" && ap.effective_type () == "lib");
+
+ auto i (find_if (langs.begin (), langs.end (),
+ [&l] (const language& x)
+ {
+ return x.name == l.name;
+ }));
+
+ if (i == langs.end ())
+ {
+ // If this is an implementation language for a dependency, then it is
+ // also an implementation language for a dependent. The converse,
+ // howevere, depends on whether this dependency is an interface or
+ // imlementation of this dependent, which we do not know. So we have
+ // to assume it's interface.
+ //
+ langs.push_back (language {l.name, lib && l.impl});
+ }
+ else
+ {
+ i->impl = i->impl && (lib && l.impl); // Merge.
+ }
+ }
+ }
+
+ // Collect dependencies of the specified package, potentially recursively.
+ // System dependencies go to deps, non-system -- to pkgs, which could be the
+ // same as deps or NULL, depending on the desired semantics (see the call
+ // site for details). Find available packages for pkgs and deps and merge
+ // languages.
+ //
+ static void
+ collect_dependencies (const common_options& co,
+ database& db,
+ packages* pkgs,
+ packages& deps,
+ const string& type,
+ small_vector<language, 1>& langs,
+ const selected_package& p,
+ bool recursive)
+ {
+ for (const auto& pr: p.prerequisites)
+ {
+ const lazy_shared_ptr<selected_package>& ld (pr.first);
+
+ // We only consider dependencies from target configurations, similar
+ // to pkg-install.
+ //
+ database& pdb (ld.database ());
+ if (pdb.type == host_config_type || pdb.type == build2_config_type)
+ continue;
+
+ shared_ptr<selected_package> d (ld.load ());
+
+ // Packaging stuff that is spread over multiple configurations is just
+ // too hairy so we don't support it. Specifically, it becomes tricky to
+ // override build options since using a global override will also affect
+ // host/build2 configurations.
+ //
+ if (db != pdb)
+ fail << "dependency package " << *d << " belongs to different "
+ << "configuration " << pdb.config_orig;
+
+ // The selected package can only be configured if all its dependencies
+ // are configured.
+ //
+ assert (d->state == package_state::configured);
+
+ bool sys (d->substate == package_substate::system);
+ packages* ps (sys ? &deps : pkgs);
+
+ // Skip duplicates.
+ //
+ if (ps == nullptr || find_if (ps->begin (), ps->end (),
+ [&d] (const package& p)
+ {
+ return p.selected == d;
+ }) == ps->end ())
+ {
+ const selected_package& p (*d);
+
+ if (ps != nullptr || (recursive && !sys))
+ {
+ available_packages aps (find_available_packages (co, db, d));
+
+ // Load and merge languages.
+ //
+ if (recursive && !sys)
+ {
+ const shared_ptr<available_package>& ap (aps.front ().first);
+ db.load (*ap, ap->languages_section);
+ merge_languages (type, langs, *ap);
+ }
+
+ if (ps != nullptr)
+ {
+ dir_path out;
+ if (ps != &deps)
+ out = p.effective_out_root (db.config);
+
+ ps->push_back (package {move (d), move (aps), move (out)});
+ }
+ }
+
+ if (recursive && !sys)
+ collect_dependencies (co, db, pkgs, deps, type, langs, p, recursive);
+ }
+ }
+ }
+
+ int
+ pkg_bindist (const pkg_bindist_options& o, cli::scanner& args)
+ {
+ tracer trace ("pkg_bindist");
+
+ dir_path c (o.directory ());
+ l4 ([&]{trace << "configuration: " << c;});
+
+ // Verify options.
+ //
+ enum class recursive_mode {auto_, full, separate};
+
+ optional<recursive_mode> rec;
+ {
+ diag_record dr;
+
+ if (o.recursive_specified ())
+ {
+ const string& m (o.recursive ());
+
+ if (m == "auto") rec = recursive_mode::auto_;
+ else if (m == "full") rec = recursive_mode::full;
+ else if (m == "separate") rec = recursive_mode::separate;
+ else if (m != "none")
+ dr << fail << "unknown --recursive mode '" << m << "'";
+ }
+
+ if (o.private_ ())
+ {
+ if (!rec)
+ {
+ dr << fail << "--private specified without --recursive";
+ }
+ else if (*rec == recursive_mode::separate)
+ {
+ dr << fail << "--private specified without --recursive=separate";
+ }
+ }
+
+ if (!dr.empty ())
+ dr << info << "run 'bpkg help pkg-bindist' for more information";
+ }
+
+ if (o.structured_result_specified ())
+ {
+ if (o.no_result ())
+ fail << "both --structured-result and --no-result specified";
+
+ if (o.structured_result () != "json")
+ fail << "unknown --structured-result format '"
+ << o.structured_result () << "'";
+ }
+
+ // Sort arguments into package names and configuration variables.
+ //
+ vector<package_name> pns;
+ strings vars;
+ {
+ bool sep (false); // Seen `--`.
+
+ while (args.more ())
+ {
+ string a (args.next ());
+
+ // If we see the `--` separator, then we are done parsing variables
+ // (while they won't clash with package names, we may be given a
+ // directory path that contains `=`).
+ //
+ if (!sep && a == "--")
+ {
+ sep = true;
+ continue;
+ }
+
+ if (a.find ('=') != string::npos)
+ vars.push_back (move (trim (a)));
+ else
+ {
+ try
+ {
+ pns.push_back (package_name (move (a))); // Not moved on failure.
+ }
+ catch (const invalid_argument& e)
+ {
+ fail << "invalid package name '" << a << "': " << e;
+ }
+ }
+ }
+
+ if (pns.empty ())
+ fail << "package name argument expected" <<
+ info << "run 'bpkg help pkg-bindist' for more information";
+ }
+
+ // Note that we shouldn't need to install anything or use sudo.
+ //
+ pair<unique_ptr<system_package_manager>, string> spm (
+ make_production_system_package_manager (o,
+ host_triplet,
+ o.distribution (),
+ o.architecture ()));
+ if (spm.first == nullptr)
+ {
+ fail << "no standard distribution package manager for this host "
+ << "or it is not yet supported" <<
+ info << "consider specifying alternative distribution package "
+ << "manager with --distribution" <<
+ info << "specify --distribution=archive to generate installation "
+ << "archive" <<
+ info << "consider specifying --os-release-* if unable to correctly "
+ << "auto-detect host operating system";
+ }
+
+ database db (c, trace, true /* pre_attach */);
+
+ // Similar to pkg-install we disallow generating packages from the
+ // host/build2 configurations.
+ //
+ if (db.type == host_config_type || db.type == build2_config_type)
+ {
+ fail << "unable to generate distribution package from " << db.type
+ << " configuration" <<
+ info << "use target configuration instead";
+ }
+
+ // Prepare for the find_available_*() calls.
+ //
+ repo_configs.push_back (db);
+
+ transaction t (db);
+
+ // We need to suppress duplicate dependencies for the recursive mode.
+ //
+ session ses;
+
+ // Generate one binary package.
+ //
+ using binary_file = system_package_manager::binary_file;
+ using binary_files = system_package_manager::binary_files;
+
+ struct result
+ {
+ binary_files bins;
+ packages deps;
+ shared_ptr<selected_package> pkg;
+ };
+
+ bool dependent_config (false);
+
+ auto generate = [&o, &vars,
+ rec, &spm,
+ &c, &db,
+ &dependent_config] (const vector<package_name>& pns,
+ bool first) -> result
+ {
+ // Resolve package names to selected packages and verify they are all
+ // configured. While at it collect their available packages and
+ // dependencies as well as figure out type and languages.
+ //
+ packages pkgs, deps;
+ string type;
+ small_vector<language, 1> langs;
+
+ for (const package_name& n: pns)
+ {
+ shared_ptr<selected_package> p (db.find<selected_package> (n));
+
+ if (p == nullptr)
+ fail << "package " << n << " does not exist in configuration " << c;
+
+ if (p->state != package_state::configured)
+ fail << "package " << n << " is " << p->state <<
+ info << "expected it to be configured";
+
+ if (p->substate == package_substate::system)
+ fail << "package " << n << " is configured as system";
+
+ // Make sure there are no dependent configuration variables. The
+ // rationale here is that we most likely don't want to generate a
+ // binary package in a configuration that is specific to some
+ // dependents.
+ //
+ for (const config_variable& v: p->config_variables)
+ {
+ switch (v.source)
+ {
+ case config_source::dependent:
+ {
+ if (!o.allow_dependent_config ())
+ {
+ fail << "configuration variable " << v.name << " is imposed "
+ << " by dependent package" <<
+ info << "specify it as user configuration to allow" <<
+ info << "or specify --allow-dependent-config";
+ }
+
+ dependent_config = true;
+ break;
+ }
+ case config_source::user:
+ case config_source::reflect:
+ break;
+ }
+
+ if (dependent_config)
+ break;
+ }
+
+ // Load the available package for type/languages as well as the
+ // mapping information.
+ //
+ available_packages aps (find_available_packages (o, db, p));
+ const shared_ptr<available_package>& ap (aps.front ().first);
+ db.load (*ap, ap->languages_section);
+
+ if (pkgs.empty ()) // First.
+ {
+ type = ap->effective_type ();
+ langs = ap->effective_languages ();
+ }
+ else
+ merge_languages (type, langs, *ap);
+
+ const selected_package& r (*p);
+ pkgs.push_back (
+ package {move (p), move (aps), r.effective_out_root (db.config)});
+
+ // If --recursive is not specified or specified with the seperate mode
+ // then we want all the immediate (system and non-) dependecies in
+ // deps. Otherwise, if the recursive mode is full, then we want all
+ // the transitive non-system dependecies in pkgs. In both recursive
+ // modes we also want all the transitive system dependecies in deps.
+ //
+ // Note also that in the auto recursive mode it's possible that some
+ // of the system dependencies are not really needed. But there is no
+ // way for us to detect this and it's better to over- than
+ // under-specify.
+ //
+ collect_dependencies (
+ o,
+ db,
+ (!rec || *rec == recursive_mode::separate
+ ? &deps
+ : *rec == recursive_mode::full ? &pkgs : nullptr),
+ deps,
+ type,
+ langs,
+ r,
+ rec.has_value ());
+ }
+
+ // Load the package manifest (source of extra metadata). This should be
+ // always possible since the package is configured and is not system.
+ //
+ const shared_ptr<selected_package>& sp (pkgs.front ().selected);
+
+ package_manifest pm (
+ pkg_verify (o,
+ sp->effective_src_root (db.config_orig),
+ true /* ignore_unknown */,
+ false /* ignore_toolchain */,
+ false /* load_buildfiles */,
+ // Copy potentially fixed up version from selected package.
+ [&sp] (version& v) {v = sp->version;}));
+
+ optional<bool> recursive_full;
+ if (rec && *rec != recursive_mode::separate)
+ recursive_full = (*rec == recursive_mode::full);
+
+ // Note that we pass type from here in case one day we want to provide
+ // an option to specify/override it (along with languages). Note that
+ // there will probably be no way to override type for dependencies.
+ //
+ binary_files r (spm.first->generate (pkgs,
+ deps,
+ vars,
+ db.config,
+ pm,
+ type, langs,
+ recursive_full,
+ first));
+
+ return result {move (r), move (deps), move (pkgs.front ().selected)};
+ };
+
+ list<result> rs; // Note: list for reference stability.
+
+ // Generate packages for dependencies, recursively, suppressing
+ // duplicates. Note: recursive lambda.
+ //
+ auto generate_deps = [&generate, &rs] (const packages& deps,
+ const auto& generate_deps) -> void
+ {
+ for (const package& d: deps)
+ {
+ const shared_ptr<selected_package>& p (d.selected);
+
+ // Skip system dependencies.
+ //
+ if (p->substate == package_substate::system)
+ continue;
+
+ // Make sure we don't generate the same dependency multiple times.
+ //
+ if (find_if (rs.begin (), rs.end (),
+ [&p] (const result& r)
+ {
+ return r.pkg == p;
+ }) != rs.end ())
+ continue;
+
+ if (verb >= 1)
+ text << "generating package for dependency " << p->name;
+
+ rs.push_back (generate ({p->name}, false /* first */));
+ generate_deps (rs.back ().deps, generate_deps);
+ }
+ };
+
+ // Generate top-level package(s).
+ //
+ rs.push_back (generate (pns, true /* first */));
+
+ // Generate dependencies, if requested.
+ //
+ if (rec && rec == recursive_mode::separate)
+ generate_deps (rs.back ().deps, generate_deps);
+
+ t.commit ();
+
+ if (rs.front ().bins.empty ())
+ return 0; // Assume prepare-only mode or similar.
+
+ if (o.no_result ())
+ ;
+ else if (!o.structured_result_specified ())
+ {
+ if (verb)
+ {
+ const string& d (o.distribution_specified ()
+ ? o.distribution ()
+ : spm.first->os_release.name_id);
+
+ for (auto b (rs.begin ()), i (b); i != rs.end (); ++i)
+ {
+ const selected_package& p (*i->pkg);
+
+ string ver (p.version.string (false /* ignore_revision */,
+ true /* ignore_iteration */));
+
+ diag_record dr (text);
+
+ dr << "generated " << d << " package for "
+ << (i != b ? "dependency " : "")
+ << p.name << '/' << ver << ':';
+
+ for (const binary_file& f: i->bins)
+ dr << "\n " << f.path;
+ }
+ }
+ }
+ else
+ {
+ json::stream_serializer s (cout);
+
+ auto member = [&s] (const char* n, const string& v, const char* d = "")
+ {
+ if (v != d)
+ s.member (n, v);
+ };
+
+ auto package = [&s, &member] (const result& r)
+ {
+ const selected_package& p (*r.pkg);
+ const binary_files& bfs (r.bins);
+
+ string ver (p.version.string (false /* ignore_revision */,
+ true /* ignore_iteration */));
+
+ s.begin_object (); // package
+ {
+ member ("name", p.name.string ());
+ member ("version", ver);
+ member ("system_version", bfs.system_version);
+ s.member_begin_array ("files");
+ for (const binary_file& bf: bfs)
+ {
+ s.begin_object (); // file
+ {
+ member ("type", bf.type);
+ member ("path", bf.path.string ());
+ member ("system_name", bf.system_name);
+ }
+ s.end_object (); // file
+ };
+ s.end_array ();
+ }
+ s.end_object (); // package
+ };
+
+ s.begin_object (); // bindist_result
+ {
+ member ("distribution", spm.second);
+ member ("architecture", spm.first->arch);
+
+ s.member_begin_object ("os_release");
+ {
+ const auto& r (spm.first->os_release);
+
+ member ("name_id", r.name_id);
+
+ if (!r.like_ids.empty ())
+ {
+ s.member_begin_array ("like_ids");
+ for (const string& id: r.like_ids) s.value (id);
+ s.end_array ();
+ }
+
+ member ("version_id", r.version_id);
+ member ("variant_id", r.variant_id);
+
+ member ("name", r.name);
+ member ("version_codename", r.version_codename);
+ member ("variant", r.variant);
+ }
+ s.end_object (); // os_release
+
+ member ("recursive", o.recursive (), "none");
+ if (o.private_ ()) s.member ("private", true);
+ if (dependent_config) s.member ("dependent_config", true);
+
+ s.member_name ("package");
+ package (rs.front ());
+
+ if (rs.size () > 1)
+ {
+ s.member_begin_array ("dependencies");
+ for (auto i (rs.begin ()); ++i != rs.end (); ) package (*i);
+ s.end_array ();
+ }
+ }
+ s.end_object (); // bindist_result
+
+ cout << endl;
+ }
+
+ return 0;
+ }
+
+ pkg_bindist_options
+ merge_options (const default_options<pkg_bindist_options>& defs,
+ const pkg_bindist_options& cmd)
+ {
+ // NOTE: remember to update the documentation if changing anything here.
+
+ return merge_default_options (
+ defs,
+ cmd,
+ [] (const default_options_entry<pkg_bindist_options>& e,
+ const pkg_bindist_options&)
+ {
+ const pkg_bindist_options& o (e.options);
+
+ auto forbid = [&e] (const char* opt, bool specified)
+ {
+ if (specified)
+ fail (e.file) << opt << " in default options file";
+ };
+
+ forbid ("--directory|-d", o.directory_specified ());
+ });
+ }
+}
diff --git a/bpkg/pkg-bindist.hxx b/bpkg/pkg-bindist.hxx
new file mode 100644
index 0000000..3a756f8
--- /dev/null
+++ b/bpkg/pkg-bindist.hxx
@@ -0,0 +1,27 @@
+// file : bpkg/pkg-bindist.hxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#ifndef BPKG_PKG_BINDIST_HXX
+#define BPKG_PKG_BINDIST_HXX
+
+#include <bpkg/types.hxx>
+#include <bpkg/utility.hxx>
+
+#include <bpkg/pkg-command.hxx>
+#include <bpkg/pkg-bindist-options.hxx>
+
+namespace bpkg
+{
+ // Note that for now it doesn't seem we need to bother with package-
+ // specific configuration variables so it's scanner instead of
+ // group_scanner.
+ //
+ int
+ pkg_bindist (const pkg_bindist_options&, cli::scanner&);
+
+ pkg_bindist_options
+ merge_options (const default_options<pkg_bindist_options>&,
+ const pkg_bindist_options&);
+}
+
+#endif // BPKG_PKG_BINDIST_HXX
diff --git a/bpkg/pkg-build-collect.cxx b/bpkg/pkg-build-collect.cxx
new file mode 100644
index 0000000..352fa52
--- /dev/null
+++ b/bpkg/pkg-build-collect.cxx
@@ -0,0 +1,8379 @@
+// file : bpkg/pkg-build-collect.cxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#include <bpkg/pkg-build-collect.hxx>
+
+#include <map>
+#include <set>
+#include <limits> // numeric_limits
+#include <iostream> // cout
+#include <functional> // ref()
+#include <forward_list>
+
+#include <bpkg/package.hxx>
+#include <bpkg/package-odb.hxx>
+#include <bpkg/database.hxx>
+#include <bpkg/rep-mask.hxx>
+#include <bpkg/diagnostics.hxx>
+#include <bpkg/satisfaction.hxx>
+
+#include <bpkg/common-options.hxx>
+
+#include <bpkg/cfg-link.hxx>
+#include <bpkg/cfg-create.hxx>
+#include <bpkg/package-query.hxx>
+#include <bpkg/package-configuration.hxx>
+
+using namespace std;
+
+namespace bpkg
+{
+ // build_package
+ //
+ const system_package_status* build_package::
+ system_status () const
+ {
+ assert (action);
+
+ if (*action != build_package::drop && system)
+ {
+ const optional<system_repository>& sys_rep (db.get ().system_repository);
+ assert (sys_rep);
+
+ if (const system_package* sys_pkg = sys_rep->find (name ()))
+ return sys_pkg->system_status;
+ }
+
+ return nullptr;
+ }
+
+ const system_package_status* build_package::
+ system_install () const
+ {
+ if (const system_package_status* s = system_status ())
+ return s->status == system_package_status::partially_installed ||
+ s->status == system_package_status::not_installed
+ ? s
+ : nullptr;
+
+ return nullptr;
+ }
+
+ bool build_package::
+ user_selection () const
+ {
+ return required_by.find (package_version_key (db.get ().main_database (),
+ "command line")) !=
+ required_by.end ();
+ }
+
+ bool build_package::
+ user_selection (const vector<build_package>& hold_pkgs) const
+ {
+ return find_if (hold_pkgs.begin (), hold_pkgs.end (),
+ [this] (const build_package& p)
+ {
+ return p.db == db && p.name () == name ();
+ }) != hold_pkgs.end ();
+ }
+
+ string build_package::
+ available_name_version_db () const
+ {
+ const string& s (db.get ().string);
+ return !s.empty ()
+ ? available_name_version () + ' ' + s
+ : available_name_version ();
+ }
+
+ bool build_package::
+ recollect_recursively (const repointed_dependents& rpt_depts) const
+ {
+ assert (action &&
+ *action == build_package::build &&
+ available != nullptr &&
+ selected != nullptr &&
+ selected->state == package_state::configured &&
+ selected->substate != package_substate::system);
+
+ // Note that if the skeleton is present then the package is either being
+ // already collected or its configuration has been negotiated between the
+ // dependents.
+ //
+ return !system &&
+ (dependencies ||
+ selected->version != available_version () ||
+ (flags & build_recollect) != 0 ||
+ ((!config_vars.empty () || skeleton) &&
+ has_buildfile_clause (available->dependencies)) ||
+ rpt_depts.find (package_key (db, name ())) != rpt_depts.end ());
+ }
+
+ bool build_package::
+ recursive_collection_postponed () const
+ {
+ assert (action && *action == build_package::build && available != nullptr);
+
+ return dependencies &&
+ dependencies->size () != available->dependencies.size ();
+ }
+
+ bool build_package::
+ reconfigure () const
+ {
+ assert (action && *action != drop);
+
+ return selected != nullptr &&
+ selected->state == package_state::configured &&
+ ((flags & adjust_reconfigure) != 0 ||
+ (*action == build &&
+ (selected->system () != system ||
+ selected->version != available_version () ||
+ replace () ||
+ (!system && (!config_vars.empty () || disfigure)))));
+ }
+
+ bool build_package::
+ configure_only () const
+ {
+ assert (action);
+
+ return configure_only_ ||
+ (*action == build && (flags & (build_repoint | build_reevaluate)) != 0);
+ }
+
+ const version& build_package::
+ available_version () const
+ {
+ // This should have been diagnosed before creating build_package object.
+ //
+ assert (available != nullptr &&
+ (system
+ ? available->system_version (db) != nullptr
+ : !available->stub ()));
+
+ return system ? *available->system_version (db) : available->version;
+ }
+
+ bool build_package::
+ external (dir_path* d) const
+ {
+ assert (action);
+
+ if (*action == build_package::drop)
+ return false;
+
+ // If adjustment or orphan, then new and old are the same.
+ //
+ // Note that in the common case a package version doesn't come from too
+ // many repositories (8).
+ //
+ small_vector<reference_wrapper<const package_location>, 8> locations;
+
+ if (available != nullptr) // Not adjustment?
+ {
+ locations.reserve (available->locations.size ());
+
+ for (const package_location& pl: available->locations)
+ {
+ if (!rep_masked_fragment (pl.repository_fragment))
+ locations.push_back (pl);
+ }
+ }
+
+ if (locations.empty ())
+ {
+ assert (selected != nullptr);
+
+ if (selected->external ())
+ {
+ assert (selected->src_root);
+
+ if (d != nullptr)
+ *d = *selected->src_root;
+
+ return true;
+ }
+ }
+ else
+ {
+ const package_location& pl (locations[0]);
+
+ if (pl.repository_fragment.object_id () == "") // Special root?
+ {
+ if (!exists (pl.location)) // Directory case?
+ {
+ if (d != nullptr)
+ *d = normalize (path_cast<dir_path> (pl.location), "package");
+
+ return true;
+ }
+ }
+ else
+ {
+ // See if the package comes from the directory-based repository, and
+ // so is external.
+ //
+ // Note that such repository fragments are always preferred over
+ // others (see below).
+ //
+ for (const package_location& pl: locations)
+ {
+ const repository_location& rl (
+ pl.repository_fragment.load ()->location);
+
+ if (rl.directory_based ())
+ {
+ // Note that the repository location path is always absolute for
+ // the directory-based repositories but the package location may
+ // potentially not be normalized. Thus, we normalize the resulting
+ // path, if requested.
+ //
+ if (d != nullptr)
+ *d = normalize (path_cast<dir_path> (rl.path () / pl.location),
+ "package");
+
+ return true;
+ }
+ }
+ }
+ }
+
+ return false;
+ }
+
+ void build_package::
+ merge (build_package&& p)
+ {
+ // We don't merge objects from different configurations.
+ //
+ assert (db == p.db);
+
+ // We don't merge into pre-entered objects, and from/into drops.
+ //
+ assert (action && *action != drop && (!p.action || *p.action != drop));
+
+ // We never merge two repointed dependent reconfigurations.
+ //
+ assert ((flags & build_repoint) == 0 || (p.flags & build_repoint) == 0);
+
+ // If true, then add the user-selection tag.
+ //
+ bool add_user_selection (false);
+
+ // Copy the user-specified options/variables.
+ //
+ if (p.user_selection ())
+ {
+ // We don't allow a package specified on the command line multiple times
+ // to have different sets of options/variables. Given that, it's
+ // tempting to assert that the options/variables don't change if we
+ // merge into a user selection. That's, however, not the case due to the
+ // iterative plan refinement implementation details (--checkout-*
+ // options and variables are only saved into the pre-entered
+ // dependencies, etc.).
+ //
+ // Note that configuration can only be specified for packages on the
+ // command line and such packages get collected/pre-entered early,
+ // before any prerequisites get collected. Thus, it doesn't seem
+ // possible that a package configuration/options may change after we
+ // have created the package skeleton.
+ //
+ // Also note that if it wouldn't be true, we would potentially need to
+ // re-collect the package prerequisites, since configuration change
+ // could affect the enable condition evaluation and, as a result, the
+ // dependency alternative choice.
+ //
+ assert (!skeleton ||
+ ((p.config_vars.empty () || p.config_vars == config_vars) &&
+ p.disfigure == disfigure));
+
+ if (p.keep_out)
+ keep_out = p.keep_out;
+
+ if (p.disfigure)
+ disfigure = p.disfigure;
+
+ if (p.configure_only_)
+ configure_only_ = p.configure_only_;
+
+ if (p.checkout_root)
+ checkout_root = move (p.checkout_root);
+
+ if (p.checkout_purge)
+ checkout_purge = p.checkout_purge;
+
+ if (!p.config_vars.empty ())
+ config_vars = move (p.config_vars);
+
+ // Propagate the user-selection tag.
+ //
+ add_user_selection = true;
+ }
+
+ // Merge in the required-by package names only if semantics matches.
+ // Otherwise, prefer the "required by dependents" semantics since we, in
+ // particular, should never replace such package builds in the map with
+ // package drops (see collect_drop() for details).
+ //
+ if (p.required_by_dependents == required_by_dependents)
+ {
+ required_by.insert (p.required_by.begin (), p.required_by.end ());
+ }
+ else if (p.required_by_dependents)
+ {
+ // Restore the user-selection tag.
+ //
+ if (user_selection ())
+ add_user_selection = true;
+
+ required_by_dependents = true;
+ required_by = move (p.required_by);
+ }
+
+ if (add_user_selection)
+ required_by.emplace (db.get ().main_database (), "command line");
+
+ // Copy constraints, suppressing duplicates.
+ //
+ if (!constraints.empty ())
+ {
+ for (constraint_type& c: p.constraints)
+ {
+ if (find_if (constraints.begin (), constraints.end (),
+ [&c] (const constraint_type& v)
+ {
+ return v.dependent == c.dependent && v.value == c.value;
+ }) == constraints.end ())
+ {
+ constraints.push_back (move (c));
+ }
+ }
+ }
+ else
+ constraints = move (p.constraints);
+
+ // Copy upgrade flag if "stronger" (existing wins over non-existing and
+ // upgrade wins over patch).
+ //
+ if (upgrade < p.upgrade)
+ upgrade = p.upgrade;
+
+ // Copy deorphan flag if greater.
+ //
+ if (p.deorphan)
+ deorphan = true;
+
+ // Copy hold_* flags if they are "stronger".
+ //
+ if (!hold_package || (p.hold_package && *p.hold_package > *hold_package))
+ hold_package = p.hold_package;
+
+ if (!hold_version || (p.hold_version && *p.hold_version > *hold_version))
+ hold_version = p.hold_version;
+
+ // Copy state flags and upgrade dependent repointments and re-evaluations
+ // to the full builds. But in contrast to the repointed dependents we may
+ // merge two dependent re-evaluations.
+ //
+ flags |= (p.flags & ~build_reevaluate);
+
+ if (*action == build)
+ {
+ flags &= ~build_repoint;
+
+ if ((p.flags & build_reevaluate) == 0)
+ flags &= ~build_reevaluate;
+ }
+
+ // Note that we don't copy the build_package::system flag. If it was
+ // set from the command line ("strong system") then we will also have
+ // the '==' constraint which means that this build_package object will
+ // never be replaced.
+ //
+ // For other cases ("weak system") we don't want to copy system over in
+ // order not prevent, for example, system to non-system upgrade.
+ }
+
+ package_skeleton& build_package::
+ init_skeleton (const common_options& options,
+ bool load_old_dependent_config,
+ const shared_ptr<available_package>& override)
+ {
+ shared_ptr<available_package> ap (override != nullptr
+ ? override
+ : available);
+
+ assert (!skeleton && ap != nullptr);
+
+ package_key pk (db, ap->id.name);
+
+ if (system)
+ {
+ // Keep the available package if its version is "close enough" to the
+ // system package version. For now we will require the exact match
+ // but in the future we could relax this (e.g., allow the user to
+ // specify something like libfoo/^1.2.0 or some such).
+ //
+ const version* v (!ap->stub () ? ap->system_version (db) : nullptr);
+
+ if (v == nullptr || *v != ap->version)
+ ap = nullptr;
+ }
+
+ optional<dir_path> src_root;
+ optional<dir_path> out_root;
+
+ optional<dir_path> old_src_root;
+ optional<dir_path> old_out_root;
+ uint16_t load_config_flags (0);
+
+ if (ap != nullptr)
+ {
+ bool src_conf (selected != nullptr &&
+ selected->state == package_state::configured &&
+ selected->substate != package_substate::system);
+
+ database& pdb (db);
+
+ // If the package is being reconfigured, then specify {src,out}_root as
+ // the existing source and output root directories not to create the
+ // skeleton directory needlessly. Otherwise, if the being built package
+ // is external, then specify src_root as its existing source directory
+ // and out_root as its potentially non-existing output directory.
+ //
+ // Can we actually use the existing output root directory if the package
+ // is being reconfigured but we are requested to ignore the current
+ // configuration? Yes we can, since load_config_flags stays 0 in this
+ // case and all the variables in config.build will be ignored.
+ //
+ if (src_conf && ap->version == selected->version)
+ {
+ src_root = selected->effective_src_root (pdb.config);
+ out_root = selected->effective_out_root (pdb.config);
+ }
+ else
+ {
+ src_root = external_dir ();
+
+ if (src_root)
+ out_root = dir_path (pdb.config) /= name ().string ();
+ }
+
+ // Specify old_{src,out}_root paths and set load_config_flags if the old
+ // configuration is present and is requested to be loaded.
+ //
+ if (src_conf && (!disfigure || load_old_dependent_config))
+ {
+ old_src_root = selected->effective_src_root (pdb.config);
+ old_out_root = selected->effective_out_root (pdb.config);
+
+ if (!disfigure)
+ load_config_flags |= package_skeleton::load_config_user;
+
+ if (load_old_dependent_config)
+ load_config_flags |= package_skeleton::load_config_dependent;
+ }
+ }
+
+ skeleton = package_skeleton (
+ options,
+ move (pk),
+ system,
+ move (ap),
+ config_vars, // @@ Maybe make optional<strings> and move?
+ disfigure,
+ (selected != nullptr ? &selected->config_variables : nullptr),
+ move (src_root),
+ move (out_root),
+ move (old_src_root),
+ move (old_out_root),
+ load_config_flags);
+
+ return *skeleton;
+ }
+
+ // replaced_versions
+ //
+ void replaced_versions::
+ cancel_bogus (tracer& trace, bool scratch)
+ {
+ bool bogus (false);
+ for (auto i (begin ()); i != end (); )
+ {
+ const replaced_version& v (i->second);
+
+ if (!v.replaced)
+ {
+ bogus = true;
+
+ l5 ([&]{trace << "erase bogus version replacement " << i->first;});
+
+ i = erase (i);
+ }
+ else
+ ++i;
+ }
+
+ if (bogus && scratch)
+ {
+ l5 ([&]{trace << "bogus version replacement erased, throwing";});
+ throw cancel_replacement ();
+ }
+ }
+
+ // unsatisfied_dependents
+ //
+ void unsatisfied_dependents::
+ add (const package_key& dpt,
+ const package_key& dep,
+ const version_constraint& c,
+ vector<unsatisfied_constraint>&& ucs,
+ vector<package_key>&& dc)
+ {
+ if (unsatisfied_dependent* ud = find_dependent (dpt))
+ {
+ vector<ignored_constraint>& ics (ud->ignored_constraints);
+
+ // Skip the dependency if it is already in the list.
+ //
+ // It feels that it may already be present in the list with a different
+ // constraint (think of multiple depends clauses with the same
+ // dependency), in which case we leave it unchanged.
+ //
+ if (find_if (ics.begin (), ics.end (),
+ [dep] (const auto& v) {return v.dependency == dep;}) ==
+ ics.end ())
+ {
+ ics.emplace_back (dep, c, move (ucs), move (dc));
+ }
+ }
+ else
+ push_back (
+ unsatisfied_dependent {
+ dpt, {ignored_constraint (dep, c, move (ucs), move (dc))}});
+ }
+
+ unsatisfied_dependent* unsatisfied_dependents::
+ find_dependent (const package_key& dk)
+ {
+ auto i (find_if (begin (), end (),
+ [&dk] (const unsatisfied_dependent& v)
+ {
+ return dk == v.dependent;
+ }));
+ return i != end () ? &*i : nullptr;
+ }
+
+ void unsatisfied_dependents::
+ diag (const build_packages& pkgs)
+ {
+ assert (!empty ());
+
+ const unsatisfied_dependent& dpt (front ());
+ const package_key& dk (dpt.dependent);
+
+ assert (!dpt.ignored_constraints.empty ());
+
+ const ignored_constraint& ic (dpt.ignored_constraints.front ());
+
+ const build_package* p (pkgs.entered_build (ic.dependency));
+ assert (p != nullptr); // The dependency must be collected.
+
+ const version_constraint& c (ic.constraint);
+ const vector<unsatisfied_constraint>& ucs (ic.unsatisfied_constraints);
+
+ const package_name& n (p->name ());
+
+ // " info: ..."
+ string indent (" ");
+
+ if (ucs.empty ()) // 'unable to up/downgrade package' failure.
+ {
+ database& pdb (p->db);
+ const shared_ptr<selected_package>& sp (p->selected);
+
+ // Otherwise, this would be a dependency adjustment (not an
+ // up/down-grade), and thus the dependent must be satisfied with the
+ // already configured dependency.
+ //
+ assert (p->available != nullptr);
+
+ const version& av (p->available_version ());
+
+ // See if we are upgrading or downgrading this package.
+ //
+ int ud (sp->version.compare (av));
+
+ // Otherwise, the dependent must be satisfied with the already
+ // configured dependency.
+ //
+ assert (ud != 0);
+
+ diag_record dr (fail);
+ dr << "unable to " << (ud < 0 ? "up" : "down") << "grade "
+ << "package " << *sp << pdb << " to ";
+
+ // Print both (old and new) package names in full if the system
+ // attribution changes.
+ //
+ if (p->system != sp->system ())
+ dr << p->available_name_version ();
+ else
+ dr << av; // Can't be the wildcard otherwise would satisfy.
+
+ shared_ptr<selected_package> dsp (
+ dk.db.get ().load<selected_package> (dk.name));
+
+ assert (dsp != nullptr); // By definition.
+
+ dr << info << "because configured package " << *dsp << dk.db
+ << " depends on (" << n << ' ' << c << ')';
+
+ // Print the dependency constraints tree for this unsatisfied dependent,
+ // which only contains constraints which come from its selected
+ // dependents, recursively.
+ //
+ {
+ set<package_key> printed;
+ pkgs.print_constraints (
+ dr,
+ dk,
+ indent,
+ printed,
+ (verb >= 2 ? optional<bool> () : true) /* selected_dependent */);
+ }
+
+ // If the dependency we failed to up/downgrade is not explicitly
+ // specified on the command line, then print its dependency constraints
+ // tree which only contains constraints which come from its being built
+ // dependents, recursively.
+ //
+ if (!p->user_selection ())
+ {
+ // The dependency upgrade is always required by someone, the command
+ // line or a package.
+ //
+ assert (!p->required_by.empty ());
+
+ dr << info << "package " << p->available_name_version_db ();
+
+ // Note that if the required_by member contains the dependencies,
+ // rather than the dependents, we will subsequently print the
+ // dependency constraints trees for these dependencies rather than a
+ // single constraints tree (rooted in the dependency we failed to
+ // up/downgrade). Also note that in this case we will still reuse the
+ // same printed packages cache for all print_constraints() calls,
+ // since it will likely be considered as a single dependency graph by
+ // the user.
+ //
+ bool rbd (p->required_by_dependents);
+ dr << (rbd ? " required by" : " dependent of");
+
+ set<package_key> printed;
+ for (const package_version_key& pvk: p->required_by)
+ {
+ dr << '\n' << indent << pvk;
+
+ if (rbd)
+ {
+ const vector<build_package::constraint_type>& cs (p->constraints);
+ auto i (find_if (cs.begin (), cs.end (),
+ [&pvk] (const build_package::constraint_type& v)
+ {
+ return v.dependent == pvk;
+ }));
+
+ if (i != cs.end ())
+ dr << " (" << p->name () << ' ' << i->value << ')';
+ }
+
+ indent += " ";
+ pkgs.print_constraints (
+ dr,
+ package_key (pvk.db, pvk.name),
+ indent,
+ printed,
+ (verb >= 2 ? optional<bool> () : false) /* selected_dependent */);
+
+ indent.resize (indent.size () - 2);
+ }
+ }
+
+ if (verb < 2)
+ dr << info << "re-run with -v for additional dependency information";
+
+ dr << info << "consider re-trying with --upgrade|-u potentially combined "
+ << "with --recursive|-r" <<
+ info << "or explicitly request up/downgrade of package " << dk.name <<
+ info << "or explicitly specify package " << n << " version to "
+ << "manually satisfy these constraints" << endf;
+ }
+ else // 'unable to satisfy constraints' failure.
+ {
+ diag_record dr (fail);
+ dr << "unable to satisfy constraints on package " << n;
+
+ for (const unsatisfied_constraint& uc: ucs)
+ {
+ const build_package::constraint_type& c (uc.constraint);
+
+ dr << info << c.dependent << " depends on (" << n << ' ' << c.value
+ << ')';
+
+ if (const build_package* d = pkgs.dependent_build (c))
+ {
+ set<package_key> printed;
+ pkgs.print_constraints (dr, *d, indent, printed);
+ }
+ }
+
+ for (const unsatisfied_constraint& uc: ucs)
+ {
+ dr << info << "available "
+ << package_string (n, uc.available_version, uc.available_system);
+ }
+
+ for (const package_key& d: reverse_iterate (ic.dependency_chain))
+ {
+ const build_package* p (pkgs.entered_build (d));
+ assert (p != nullptr);
+
+ dr << info << "while satisfying " << p->available_name_version_db ();
+ }
+
+ dr << info << "explicitly specify " << n << " version to "
+ << "manually satisfy both constraints" << endf;
+ }
+ }
+
+ // postponed_configuration
+ //
+ postponed_configuration::dependency*
+ postponed_configuration::dependent_info::
+ find_dependency (pair<size_t, size_t> pos)
+ {
+ auto i (find_if (dependencies.begin (), dependencies.end (),
+ [&pos] (const dependency& d)
+ {
+ return d.position == pos;
+ }));
+ return i != dependencies.end () ? &*i : nullptr;
+ }
+
+ void postponed_configuration::dependent_info::
+ add (dependency&& dep)
+ {
+ if (dependency* d = find_dependency (dep.position))
+ {
+ for (package_key& p: dep)
+ {
+ // Add the dependency unless it's already there.
+ //
+ if (find (d->begin (), d->end (), p) == d->end ())
+ {
+ // Feels like we can accumulate new dependencies into an existing
+ // position only for an existing dependent. Note that we could still
+ // try to add an (supposedly) identical entry for a non-existent
+ // dependent (via some murky paths). Feels like this should be
+ // harmless.
+ //
+ assert (existing);
+
+ d->push_back (move (p));
+ }
+ }
+
+ // Set the has_alternative flag for an existing dependent. Note that
+ // it shouldn't change if already set.
+ //
+ if (dep.has_alternative)
+ {
+ if (!d->has_alternative)
+ {
+ assert (existing); // As above.
+ d->has_alternative = *dep.has_alternative;
+ }
+ else
+ assert (*d->has_alternative == *dep.has_alternative);
+ }
+ }
+ else
+ dependencies.push_back (move (dep));
+ }
+
+ void postponed_configuration::
+ add (package_key&& dependent,
+ bool existing,
+ pair<size_t, size_t> position,
+ packages&& deps,
+ optional<bool> has_alternative)
+ {
+ assert (position.first != 0 && position.second != 0);
+
+ add_dependencies (deps); // Don't move from since will be used later.
+
+ auto i (dependents.find (dependent));
+
+ if (i != dependents.end ())
+ {
+ dependent_info& ddi (i->second);
+
+ ddi.add (dependency (position, move (deps), has_alternative));
+
+ // Conceptually, on the first glance, we can only move from existing to
+ // non-existing (e.g., due to a upgrade/downgrade later) and that case
+ // is handled via the version replacement rollback. However, after
+ // re-evaluation the existing dependent is handled similar to the new
+ // dependent and we can potentially up-negotiate the dependency
+ // configuration for it.
+ //
+ assert (ddi.existing || !existing);
+ }
+ else
+ {
+ small_vector<dependency, 1> ds ({
+ dependency (position, move (deps), has_alternative)});
+
+ dependents.emplace (move (dependent),
+ dependent_info {existing, move (ds)});
+ }
+ }
+
+ bool postponed_configuration::
+ contains_dependency (const packages& ds) const
+ {
+ for (const package_key& d: ds)
+ {
+ if (contains_dependency (d))
+ return true;
+ }
+
+ return false;
+ }
+
+ bool postponed_configuration::
+ contains_dependency (const postponed_configuration& c) const
+ {
+ for (const auto& d: c.dependencies)
+ {
+ if (contains_dependency (d))
+ return true;
+ }
+
+ return false;
+ }
+
+ void postponed_configuration::
+ merge (postponed_configuration&& c)
+ {
+ assert (c.id != id); // Can't merge to itself.
+
+ merged_ids.push_back (c.id);
+
+ for (size_t mid: c.merged_ids)
+ merged_ids.push_back (mid);
+
+ // Merge dependents.
+ //
+ for (auto& d: c.dependents)
+ {
+ auto i (dependents.find (d.first));
+
+ if (i != dependents.end ())
+ {
+ dependent_info& ddi (i->second); // Destination dependent info.
+ dependent_info& sdi (d.second); // Source dependent info.
+
+ for (dependency& sd: sdi.dependencies)
+ ddi.add (move (sd));
+ }
+ else
+ dependents.emplace (d.first, move (d.second));
+ }
+
+ // Merge dependencies.
+ //
+ add_dependencies (move (c.dependencies));
+
+ // Pick the depth of the outermost negotiated configuration (minimum
+ // non-zero depth) between the two.
+ //
+ if (depth != 0)
+ {
+ if (c.depth != 0 && depth > c.depth)
+ depth = c.depth;
+ }
+ else
+ depth = c.depth;
+ }
+
+ void postponed_configuration::
+ set_shadow_cluster (postponed_configuration&& c)
+ {
+ shadow_cluster.clear ();
+
+ for (auto& dt: c.dependents)
+ {
+ positions ps;
+ for (auto& d: dt.second.dependencies)
+ ps.push_back (d.position);
+
+ shadow_cluster.emplace (dt.first, move (ps));
+ }
+ }
+
+ bool postponed_configuration::
+ is_shadow_cluster (const postponed_configuration& c)
+ {
+ if (shadow_cluster.size () != c.dependents.size ())
+ return false;
+
+ for (auto& dt: c.dependents)
+ {
+ auto i (shadow_cluster.find (dt.first));
+ if (i == shadow_cluster.end ())
+ return false;
+
+ const positions& ps (i->second);
+
+ if (ps.size () != dt.second.dependencies.size ())
+ return false;
+
+ for (auto& d: dt.second.dependencies)
+ {
+ if (find (ps.begin (), ps.end (), d.position) == ps.end ())
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ bool postponed_configuration::
+ contains_in_shadow_cluster (package_key dependent,
+ pair<size_t, size_t> pos) const
+ {
+ auto i (shadow_cluster.find (dependent));
+
+ if (i != shadow_cluster.end ())
+ {
+ const positions& ps (i->second);
+ return find (ps.begin (), ps.end (), pos) != ps.end ();
+ }
+ else
+ return false;
+ }
+
+ std::string postponed_configuration::
+ string () const
+ {
+ std::string r;
+
+ for (const auto& d: dependents)
+ {
+ r += r.empty () ? '{' : ' ';
+ r += d.first.string ();
+
+ if (d.second.existing)
+ r += '^';
+ }
+
+ if (r.empty ())
+ r += '{';
+
+ r += " |";
+
+ for (const package_key& d: dependencies)
+ {
+ r += ' ';
+ r += d.string ();
+ r += "->{";
+
+ bool first (true);
+ for (const auto& dt: dependents)
+ {
+ for (const dependency& dp: dt.second.dependencies)
+ {
+ if (find (dp.begin (), dp.end (), d) != dp.end ())
+ {
+ if (!first)
+ r += ' ';
+ else
+ first = false;
+
+ r += dt.first.string ();
+ r += '/';
+ r += to_string (dp.position.first);
+ r += ',';
+ r += to_string (dp.position.second);
+ }
+ }
+ }
+
+ r += '}';
+ }
+
+ r += '}';
+
+ if (negotiated)
+ r += *negotiated ? '!' : '?';
+
+ return r;
+ }
+
+ void postponed_configuration::
+ add_dependencies (packages&& deps)
+ {
+ for (auto& d: deps)
+ {
+ if (find (dependencies.begin (), dependencies.end (), d) ==
+ dependencies.end ())
+ dependencies.push_back (move (d));
+ }
+ }
+
+ void postponed_configuration::
+ add_dependencies (const packages& deps)
+ {
+ for (const auto& d: deps)
+ {
+ if (find (dependencies.begin (), dependencies.end (), d) ==
+ dependencies.end ())
+ dependencies.push_back (d);
+ }
+ }
+
+ pair<postponed_configuration&, optional<bool>> postponed_configurations::
+ add (package_key dependent,
+ bool existing,
+ pair<size_t, size_t> position,
+ postponed_configuration::packages dependencies,
+ optional<bool> has_alternative)
+ {
+ tracer trace ("postponed_configurations::add");
+
+ assert (!dependencies.empty ());
+
+ // The plan is to first go through the existing clusters and check if any
+ // of them contain this dependent/dependencies in their shadow
+ // clusters. If such a cluster is found, then force-add them to
+ // it. Otherwise, if any dependency-intersecting clusters are present,
+ // then add the specified dependent/dependencies to the one with the
+ // minimum non-zero depth, if any, and to the first one otherwise.
+ // Otherwise, add the new cluster. Afterwards, merge into the resulting
+ // cluster other dependency-intersecting clusters. Note that in case of
+ // shadow, this should normally not happen because such a cluster should
+ // have been either pre-merged or its dependents should be in the
+ // cluster. But it feels like it may still happen if things change, in
+ // which case we will throw again (admittedly a bit fuzzy).
+ //
+ iterator ri;
+ bool rb (true);
+
+ // Note that if a single dependency is added, then it can only belong to a
+ // single existing cluster and so no clusters merge can happen, unless we
+ // are force-adding. In the later case we can only merge once for a single
+ // dependency.
+ //
+ // Let's optimize for the common case based on these facts.
+ //
+ bool single (dependencies.size () == 1);
+
+ // Merge dependency-intersecting clusters in the specified range into the
+ // resulting cluster and reset change rb to false if any of the merged in
+ // clusters is non-negotiated or is being negotiated.
+ //
+ // The iterator arguments refer to entries before and after the range
+ // endpoints, respectively.
+ //
+ auto merge = [&trace, &ri, &rb, single, this] (iterator i,
+ iterator e,
+ bool shadow_based)
+ {
+ postponed_configuration& rc (*ri);
+
+ iterator j (i);
+
+ // Merge the intersecting configurations.
+ //
+ bool merged (false);
+ for (++i; i != e; ++i)
+ {
+ postponed_configuration& c (*i);
+
+ if (c.contains_dependency (rc))
+ {
+ if (!c.negotiated || !*c.negotiated)
+ rb = false;
+
+ l5 ([&]{trace << "merge " << c << " into " << rc;});
+
+ assert (!shadow_based || (c.negotiated && *c.negotiated));
+
+ rc.merge (move (c));
+ c.dependencies.clear (); // Mark as merged from (see above).
+
+ merged = true;
+
+ if (single)
+ break;
+ }
+ }
+
+ // Erase configurations which we have merged from.
+ //
+ if (merged)
+ {
+ i = j;
+
+ for (++i; i != e; )
+ {
+ if (!i->dependencies.empty ())
+ {
+ ++i;
+ ++j;
+ }
+ else
+ i = erase_after (j);
+ }
+ }
+ };
+
+ auto trace_add = [&trace, &dependent, existing, position, &dependencies]
+ (const postponed_configuration& c, bool shadow)
+ {
+ if (verb >= 5)
+ {
+ diag_record dr (trace);
+ dr << "add {" << dependent;
+
+ if (existing)
+ dr << '^';
+
+ dr << ' ' << position.first << ',' << position.second << ':';
+
+ for (const auto& d: dependencies)
+ dr << ' ' << d;
+
+ dr << "} to " << c;
+
+ if (shadow)
+ dr << " (shadow cluster-based)";
+ }
+ };
+
+ // Try to add based on the shadow cluster.
+ //
+ {
+ auto i (begin ());
+ for (; i != end (); ++i)
+ {
+ postponed_configuration& c (*i);
+
+ if (c.contains_in_shadow_cluster (dependent, position))
+ {
+ trace_add (c, true /* shadow */);
+
+ c.add (move (dependent),
+ existing,
+ position,
+ move (dependencies),
+ has_alternative);
+
+ break;
+ }
+ }
+
+ if (i != end ())
+ {
+ // Note that the cluster with a shadow cluster is by definition
+ // either being negotiated or has been negotiated. Actually, there
+ // is also a special case when we didn't negotiate the configuration
+ // yet and are in the process of re-evaluating existing dependents.
+ // Note though, that in this case we have already got the try/catch
+ // frame corresponding to the cluster negotiation (see
+ // collect_build_postponed() for details).
+ //
+ assert (i->depth != 0);
+
+ ri = i;
+
+ merge (before_begin (), ri, true /* shadow_based */);
+ merge (ri, end (), true /* shadow_based */);
+
+ return make_pair (ref (*ri), optional<bool> ());
+ }
+ }
+
+ // Find the cluster to add the dependent/dependencies to.
+ //
+ optional<size_t> depth;
+
+ auto j (before_begin ()); // Precedes iterator i.
+ for (auto i (begin ()); i != end (); ++i, ++j)
+ {
+ postponed_configuration& c (*i);
+
+ if (c.contains_dependency (dependencies) &&
+ (!depth || (c.depth != 0 && (*depth == 0 || *depth > c.depth))))
+ {
+ ri = i;
+ depth = c.depth;
+ }
+ }
+
+ if (!depth) // No intersecting cluster?
+ {
+ // New cluster. Insert after the last element.
+ //
+ ri = insert_after (j,
+ postponed_configuration (
+ next_id_++,
+ move (dependent),
+ existing,
+ position,
+ move (dependencies),
+ has_alternative));
+
+ l5 ([&]{trace << "create " << *ri;});
+ }
+ else
+ {
+ // Add the dependent/dependencies into an existing cluster.
+ //
+ postponed_configuration& c (*ri);
+
+ trace_add (c, false /* shadow */);
+
+ c.add (move (dependent),
+ existing,
+ position,
+ move (dependencies),
+ has_alternative);
+
+ // Try to merge other clusters into this cluster.
+ //
+ merge (before_begin (), ri, false /* shadow_based */);
+ merge (ri, end (), false /* shadow_based */);
+ }
+
+ return make_pair (ref (*ri), optional<bool> (rb));
+ }
+
+ void postponed_configurations::
+ add (package_key dependent,
+ pair<size_t, size_t> position,
+ package_key dependency)
+ {
+ tracer trace ("postponed_configurations::add");
+
+ // Add the new cluster to the end of the list which we can only find by
+ // traversing the list. While at it, make sure that the dependency doesn't
+ // belong to any existing cluster.
+ //
+ auto i (before_begin ()); // Insert after this element.
+
+ for (auto j (begin ()); j != end (); ++i, ++j)
+ assert (!j->contains_dependency (dependency));
+
+ i = insert_after (i,
+ postponed_configuration (next_id_++,
+ move (dependent),
+ position,
+ move (dependency)));
+
+ l5 ([&]{trace << "create " << *i;});
+ }
+
+ postponed_configuration* postponed_configurations::
+ find (size_t id)
+ {
+ for (postponed_configuration& cfg: *this)
+ {
+ if (cfg.id == id)
+ return &cfg;
+ }
+
+ return nullptr;
+ }
+
+ const postponed_configuration* postponed_configurations::
+ find_dependency (const package_key& d) const
+ {
+ for (const postponed_configuration& cfg: *this)
+ {
+ if (cfg.contains_dependency (d))
+ return &cfg;
+ }
+
+ return nullptr;
+ }
+
+ bool postponed_configurations::
+ negotiated () const
+ {
+ for (const postponed_configuration& cfg: *this)
+ {
+ if (!cfg.negotiated || !*cfg.negotiated)
+ return false;
+ }
+
+ return true;
+ }
+
+ postponed_configuration& postponed_configurations::
+ operator[] (size_t index)
+ {
+ auto i (begin ());
+ for (size_t j (0); j != index; ++j, ++i) assert (i != end ());
+
+ assert (i != end ());
+ return *i;
+ }
+
+ size_t postponed_configurations::
+ size () const
+ {
+ size_t r (0);
+ for (auto i (begin ()); i != end (); ++i, ++r) ;
+ return r;
+ }
+
+ // build_packages
+ //
+ bool build_packages::package_ref::
+ operator== (const package_ref& v)
+ {
+ return name == v.name && db == v.db;
+ }
+
+ build_packages::
+ build_packages (const build_packages& v)
+ : build_package_list ()
+ {
+ // Copy the map.
+ //
+ for (const auto& p: v.map_)
+ map_.emplace (p.first, data_type {end (), p.second.package});
+
+ // Copy the list.
+ //
+ for (const auto& p: v)
+ {
+ auto i (map_.find (p.get ().db, p.get ().name ()));
+ assert (i != map_.end ());
+ i->second.position = insert (end (), i->second.package);
+ }
+ }
+
+ build_packages& build_packages::
+ operator= (build_packages&& v) noexcept (false)
+ {
+ clear ();
+
+ // Move the map.
+ //
+ // Similar to what we do in the copy-constructor, but here we also need to
+ // restore the database reference and the package shared pointers in the
+ // source entry after the move. This way we can obtain the source packages
+ // databases and names later while copying the list.
+ //
+ for (auto& p: v.map_)
+ {
+ build_package& bp (p.second.package);
+
+ database& db (bp.db);
+ shared_ptr<selected_package> sp (bp.selected);
+ shared_ptr<available_package> ap (bp.available);
+
+ map_.emplace (p.first, data_type {end (), move (bp)});
+
+ bp.db = db;
+ bp.selected = move (sp);
+ bp.available = move (ap);
+ }
+
+ // Copy the list.
+ //
+ for (const auto& p: v)
+ {
+ auto i (map_.find (p.get ().db, p.get ().name ()));
+ assert (i != map_.end ());
+ i->second.position = insert (end (), i->second.package);
+ }
+
+ return *this;
+ }
+
+ const build_package* build_packages::
+ dependent_build (const build_package::constraint_type& c) const
+ {
+ const build_package* r (nullptr);
+
+ if (c.dependent.version)
+ try
+ {
+ r = entered_build (c.dependent.db, c.dependent.name);
+ assert (r != nullptr); // Expected to be collected.
+ }
+ catch (const invalid_argument&)
+ {
+ // Must be a package name since the version is specified.
+ //
+ assert (false);
+ }
+
+ return r;
+ }
+
+ void build_packages::
+ enter (package_name name, build_package pkg)
+ {
+ assert (!pkg.action && pkg.repository_fragment == nullptr);
+
+ database& db (pkg.db); // Save before the move() call.
+ auto p (map_.emplace (package_key {db, move (name)},
+ data_type {end (), move (pkg)}));
+
+ assert (p.second);
+ }
+
+ build_package* build_packages::
+ collect_build (const pkg_build_options& options,
+ build_package pkg,
+ replaced_versions& replaced_vers,
+ postponed_configurations& postponed_cfgs,
+ unsatisfied_dependents& unsatisfied_depts,
+ build_package_refs* dep_chain,
+ const function<find_database_function>& fdb,
+ const function<add_priv_cfg_function>& apc,
+ const repointed_dependents* rpt_depts,
+ postponed_packages* postponed_repo,
+ postponed_packages* postponed_alts,
+ postponed_packages* postponed_recs,
+ postponed_existing_dependencies* postponed_edeps,
+ postponed_dependencies* postponed_deps,
+ unacceptable_alternatives* unacceptable_alts,
+ const function<verify_package_build_function>& vpb)
+ {
+ using std::swap; // ...and not list::swap().
+
+ using constraint_type = build_package::constraint_type;
+
+ tracer trace ("collect_build");
+
+ assert (pkg.repository_fragment == nullptr ||
+ !rep_masked_fragment (pkg.repository_fragment));
+
+ // See the above notes.
+ //
+ bool recursive (fdb != nullptr);
+
+ assert ((!recursive || dep_chain != nullptr) &&
+ (rpt_depts != nullptr) == recursive &&
+ (postponed_repo != nullptr) == recursive &&
+ (postponed_alts != nullptr) == recursive &&
+ (postponed_recs != nullptr) == recursive &&
+ (postponed_edeps != nullptr) == recursive &&
+ (postponed_deps != nullptr) == recursive &&
+ (unacceptable_alts != nullptr) == recursive);
+
+ // Only builds are allowed here.
+ //
+ assert (pkg.action && *pkg.action == build_package::build &&
+ pkg.available != nullptr);
+
+ package_key pk (pkg.db, pkg.available->id.name);
+
+ // Apply the version replacement, if requested, and indicate that it was
+ // applied. Ignore the replacement if its version doesn't satisfy the
+ // dependency constraints specified by the caller. Also ignore if this is
+ // a drop and the required-by package names of the specified build package
+ // object have the "required by dependents" semantics
+ //
+ auto vi (replaced_vers.find (pk));
+
+ if (vi != replaced_vers.end () && !vi->second.replaced)
+ {
+ l5 ([&]{trace << "apply version replacement for "
+ << pkg.available_name_version_db ();});
+
+ replaced_version& v (vi->second);
+
+ if (v.available != nullptr)
+ {
+ const version& rv (v.system
+ ? *v.available->system_version (pk.db)
+ : v.available->version);
+
+ bool replace (true);
+ for (const constraint_type& c: pkg.constraints)
+ {
+ if (!satisfies (rv, c.value))
+ {
+ replace = false;
+
+ l5 ([&]{trace << "replacement to " << rv << " is denied since "
+ << c.dependent << " depends on (" << pk.name << ' '
+ << c.value << ')';});
+ }
+ }
+
+ if (replace)
+ {
+ v.replaced = true;
+
+ pkg.available = v.available;
+ pkg.repository_fragment = v.repository_fragment;
+ pkg.system = v.system;
+
+ l5 ([&]{trace << "replacement: "
+ << pkg.available_name_version_db ();});
+ }
+ }
+ else
+ {
+ if (!pkg.required_by_dependents)
+ {
+ v.replaced = true;
+
+ l5 ([&]{trace << "replacement: drop";});
+
+ // We shouldn't be replacing a package build with the drop if someone
+ // depends on this package.
+ //
+ assert (pkg.selected != nullptr);
+
+ collect_drop (options, pkg.db, pkg.selected, replaced_vers);
+ return nullptr;
+ }
+ else
+ {
+ assert (!pkg.required_by.empty ());
+
+ l5 ([&]
+ {
+ diag_record dr (trace);
+ dr << "replacement to drop is denied since " << pk
+ << " is required by ";
+ for (auto b (pkg.required_by.begin ()), i (b);
+ i != pkg.required_by.end ();
+ ++i)
+ dr << (i != b ? ", " : "") << *i;
+ });
+ }
+ }
+ }
+
+ // Add the version replacement entry, call the verification function if
+ // specified, and throw replace_version.
+ //
+ // Note that this package can potentially be present in the unsatisfied
+ // dependents list on the dependency side with the replacement version
+ // being unsatisfactory for the ignored constraint. In this case, during
+ // the from-scratch re-collection this replacement will be ignored if/when
+ // this package is collected with this constraint specified. But it can
+ // still be applied for some later collect_build() call or potentially
+ // turn out bogus.
+ //
+ auto replace_ver = [&pk, &vpb, &vi, &replaced_vers] (const build_package& p)
+ {
+ replaced_version rv (p.available, p.repository_fragment, p.system);
+
+ if (vi != replaced_vers.end ())
+ vi->second = move (rv);
+ else
+ replaced_vers.emplace (move (pk), move (rv));
+
+ if (vpb)
+ vpb (p, true /* scratch */);
+
+ throw replace_version ();
+ };
+
+ auto i (map_.find (pk));
+
+ // If we already have an entry for this package name, then we have to pick
+ // one over the other.
+ //
+ // If the existing entry is a drop, then we override it. If the existing
+ // entry is a pre-entered or is non-build one, then we merge it into the
+ // new build entry. Otherwise (both are builds), we pick one and merge the
+ // other into it.
+ //
+ if (i != map_.end ())
+ {
+ build_package& bp (i->second.package);
+
+ // Note that we used to think that the scenario when the build could
+ // replace drop could never happen since we would start collecting from
+ // scratch. This has changed when we introduced replaced_versions for
+ // collecting drops.
+ //
+ if (bp.action && *bp.action == build_package::drop) // Drop.
+ {
+ bp = move (pkg);
+ }
+ else if (!bp.action || *bp.action != build_package::build) // Non-build.
+ {
+ pkg.merge (move (bp));
+ bp = move (pkg);
+ }
+ else // Build.
+ {
+ // At the end we want p1 to point to the object that we keep and p2 to
+ // the object that we merge from.
+ //
+ build_package* p1 (&bp);
+ build_package* p2 (&pkg);
+
+ // Pick with the following preference order: user selection over
+ // implicit one, source package over a system one, newer version over
+ // an older one. So get the preferred into p1 and the other into p2.
+ //
+ {
+ int us (p1->user_selection () - p2->user_selection ());
+ int sf (p1->system - p2->system);
+
+ if (us < 0 ||
+ (us == 0 && sf > 0) ||
+ (us == 0 &&
+ sf == 0 &&
+ p2->available_version () > p1->available_version ()))
+ swap (p1, p2);
+ }
+
+ // If the versions differ, pick the satisfactory one and if both are
+ // satisfactory, then keep the preferred.
+ //
+ // If neither of the versions is satisfactory, then ignore those
+ // unsatisfied constraints which prevent us from picking the package
+ // version which is currently in the map. It feels that the version in
+ // the map is no worse than the other one and we choose it
+ // specifically for the sake of optimization, trying to avoid throwing
+ // the replace_version exception.
+ //
+ if (p1->available_version () != p2->available_version ())
+ {
+ // See if pv's version satisfies pc's constraints, skipping those
+ // which are meant to be ignored (ics). Return the pointer to the
+ // unsatisfied constraint or NULL if all are satisfied.
+ //
+ vector<const constraint_type*> ics;
+
+ auto test = [&ics] (build_package* pv, build_package* pc)
+ -> const constraint_type*
+ {
+ for (const constraint_type& c: pc->constraints)
+ {
+ if (find (ics.begin (), ics.end (), &c) == ics.end () &&
+ !satisfies (pv->available_version (), c.value))
+ return &c;
+ }
+
+ return nullptr;
+ };
+
+ // Iterate until one of the versions becomes satisfactory due to
+ // ignoring some of the constraints.
+ //
+ for (;;)
+ {
+ // First see if p1 satisfies p2's constraints.
+ //
+ if (auto c2 = test (p1, p2))
+ {
+ // If not, try the other way around.
+ //
+ if (auto c1 = test (p2, p1))
+ {
+ // Add a constraint to the igrore-list and the dependent to
+ // the unsatisfied-list.
+ //
+ const constraint_type* c (p1 == &bp ? c2 : c1);
+ const build_package* p (dependent_build (*c));
+
+ // Note that if one of the constraints is imposed on the
+ // package by the command line, then another constraint must
+ // be imposed by a dependent. Also, in this case it feels that
+ // the map must contain the dependency constrained by the
+ // command line and so p may not be NULL. If that (suddenly)
+ // is not the case, then we will have to ignore the constraint
+ // imposed by the dependent which is not in the map, replace
+ // the version, and call replace_ver().
+ //
+ if (p == nullptr)
+ {
+ c = (c == c1 ? c2 : c1);
+ p = dependent_build (*c);
+
+ // One of the dependents must be a real package.
+ //
+ assert (p != nullptr);
+ }
+
+ ics.push_back (c);
+
+ package_key d (p->db, p->name ());
+
+ l5 ([&]{trace << "postpone failure for dependent " << d
+ << " unsatisfied with dependency "
+ << bp.available_name_version_db () << " ("
+ << c->value << ')';});
+
+ // Note that in contrast to collect_dependents(), here we also
+ // save both unsatisfied constraints and the dependency chain,
+ // for the sake of the diagnostics.
+ //
+ vector<unsatisfied_constraint> ucs {
+ unsatisfied_constraint {
+ *c1, p1->available_version (), p1->system},
+ unsatisfied_constraint {
+ *c2, p2->available_version (), p2->system}};
+
+ vector<package_key> dc;
+
+ if (dep_chain != nullptr)
+ {
+ dc.reserve (dep_chain->size ());
+
+ for (const build_package& p: *dep_chain)
+ dc.emplace_back (p.db, p.name ());
+ }
+
+ unsatisfied_depts.add (d, pk, c->value, move (ucs), move (dc));
+ continue;
+ }
+ else
+ swap (p1, p2);
+ }
+
+ break;
+ }
+
+ l4 ([&]{trace << "pick " << p1->available_name_version_db ()
+ << " over " << p2->available_name_version_db ();});
+ }
+
+ // See if we are replacing the object. If not, then we don't need to
+ // collect its prerequisites since that should have already been done.
+ // Remember, p1 points to the object we want to keep.
+ //
+ bool replace (p1 != &bp);
+
+ if (replace)
+ {
+ swap (*p1, *p2);
+ swap (p1, p2); // Setup for merge below.
+ }
+
+ p1->merge (move (*p2));
+
+ if (replace)
+ {
+ if (p1->available_version () != p2->available_version () ||
+ p1->system != p2->system)
+ {
+ // See if in-place replacement is possible (no dependencies, etc)
+ // and set scratch to false if that's the case.
+ //
+ // Firstly, such a package should not participate in any
+ // configuration negotiation.
+ //
+ // Other than that, it looks like the only optimization we can do
+ // easily is if the package has no dependencies (and thus cannot
+ // impose any constraints). Anything more advanced would require
+ // analyzing our dependencies (which we currently cannot easily
+ // get) and (1) either dropping the dependency build_package
+ // altogether if we are the only dependent (so that it doesn't
+ // influence any subsequent dependent) or (2) making sure our
+ // constraint is a sub-constraint of any other constraint and
+ // removing it from the dependency build_package. Maybe/later.
+ //
+ // NOTE: remember to update collect_drop() if changing anything
+ // here.
+ //
+ bool scratch (true);
+
+ // While checking if the package has any dependencies skip the
+ // toolchain build-time dependencies since they should be quite
+ // common.
+ //
+ // An update: it turned out that just the absence of dependencies
+ // is not the only condition that causes a package to be replaced
+ // in place. The following conditions must also be met:
+ //
+ // - The package must not participate in any configuration
+ // negotiation on the dependency side (otherwise we could have
+ // missed collecting its existing dependents).
+ //
+ // - The package up/downgrade doesn't cause the selection of a
+ // different dependency alternative for any of its dependents
+ // (see postponed_packages for possible outcomes).
+ //
+ // - The package must not be added to unsatisfied_depts on the
+ // dependency side.
+ //
+ // This all sounds quite hairy at the moment, so we won't be
+ // replacing in place for now (which is an optimization).
+#if 0
+ if (!has_dependencies (options, p2->available->dependencies))
+ scratch = false;
+#endif
+ l5 ([&]{trace << p2->available_name_version_db ()
+ << " package version needs to be replaced "
+ << (!scratch ? "in-place " : "") << "with "
+ << p1->available_name_version_db ();});
+
+ if (scratch)
+ replace_ver (*p1);
+ }
+ else
+ {
+ // It doesn't seem possible that replacing the build object
+ // without changing the package version may result in changing the
+ // package configuration since the configuration always gets into
+ // the initial package build entry (potentially pre-entered,
+ // etc). If it wouldn't be true then we would also need to add the
+ // replacement version entry and re-collect from scratch.
+ }
+ }
+ else
+ return nullptr;
+ }
+ }
+ else
+ {
+ // This is the first time we are adding this package name to the map.
+ //
+ l4 ([&]{trace << "add " << pkg.available_name_version_db ();});
+
+ i = map_.emplace (move (pk), data_type {end (), move (pkg)}).first;
+ }
+
+ build_package& p (i->second.package);
+
+ if (vpb)
+ vpb (p, false /* scratch */);
+
+ // Recursively collect build prerequisites, if requested.
+ //
+ // Note that detecting dependency cycles during the satisfaction phase
+ // would be premature since they may not be present in the final package
+ // list. Instead we check for them during the ordering phase.
+ //
+ // The question, of course, is whether we can still end up with an
+ // infinite recursion here? Note that for an existing map entry we only
+ // recurse after the entry replacement. The infinite recursion would mean
+ // that we may replace a package in the map with the same version multiple
+ // times:
+ //
+ // ... p1 -> p2 -> ... p1
+ //
+ // Every replacement increases the entry version and/or tightens the
+ // constraints the next replacement will need to satisfy. It feels
+ // impossible that a package version can "return" into the map being
+ // replaced once. So let's wait until some real use case proves this
+ // reasoning wrong.
+ //
+ if (recursive)
+ collect_build_prerequisites (options,
+ p,
+ *dep_chain,
+ fdb,
+ apc,
+ *rpt_depts,
+ replaced_vers,
+ postponed_repo,
+ postponed_alts,
+ 0 /* max_alt_index */,
+ *postponed_recs,
+ *postponed_edeps,
+ *postponed_deps,
+ postponed_cfgs,
+ *unacceptable_alts,
+ unsatisfied_depts);
+
+ return &p;
+ }
+
+ optional<build_packages::pre_reevaluate_result> build_packages::
+ collect_build_prerequisites (const pkg_build_options& options,
+ build_package& pkg,
+ build_package_refs& dep_chain,
+ const function<find_database_function>& fdb,
+ const function<add_priv_cfg_function>& apc,
+ const repointed_dependents& rpt_depts,
+ replaced_versions& replaced_vers,
+ postponed_packages* postponed_repo,
+ postponed_packages* postponed_alts,
+ size_t max_alt_index,
+ postponed_packages& postponed_recs,
+ postponed_existing_dependencies& postponed_edeps,
+ postponed_dependencies& postponed_deps,
+ postponed_configurations& postponed_cfgs,
+ unacceptable_alternatives& unacceptable_alts,
+ unsatisfied_dependents& unsatisfied_depts,
+ optional<pair<size_t, size_t>> reeval_pos,
+ const optional<package_key>& orig_dep)
+ {
+ // NOTE: don't forget to update collect_build_postponed() if changing
+ // anything in this function. Also enable and run the tests with the
+ // config.bpkg.tests.all=true variable when done.
+ //
+ tracer trace ("collect_build_prerequisites");
+
+ assert (pkg.action && *pkg.action == build_package::build);
+
+ const package_name& nm (pkg.name ());
+ database& pdb (pkg.db);
+ package_key pk (pdb, nm);
+
+ bool pre_reeval (reeval_pos && reeval_pos->first == 0);
+ assert (!pre_reeval || reeval_pos->second == 0);
+
+ // Must only be specified in the pre-reevaluation mode.
+ //
+ assert (orig_dep.has_value () == pre_reeval);
+
+ bool reeval (reeval_pos && reeval_pos->first != 0);
+ assert (!reeval || reeval_pos->second != 0);
+
+ // The being (pre-)re-evaluated dependent cannot be recursively collected
+ // yet. Also, we don't expect it being configured as system.
+ //
+ // Note that the configured package can still be re-evaluated after
+ // collect_build_prerequisites() has been called but didn't end up with
+ // the recursive collection.
+ //
+ assert ((!pre_reeval && !reeval) ||
+ ((!pkg.recursive_collection ||
+ !pkg.recollect_recursively (rpt_depts)) &&
+ !pkg.skeleton && !pkg.system));
+
+ // If this package is not being (pre-)re-evaluated, is not yet collected
+ // recursively, needs to be reconfigured, and is not yet postponed, then
+ // check if it is a dependency of any dependent with configuration clause
+ // and postpone the collection if that's the case.
+ //
+ // The reason why we don't need to do this for the re-evaluated case is as
+ // follows: this logic is used for an existing dependent that is not
+ // otherwise built (e.g., reconfigured) which means its externally-
+ // imposed configuration (user, dependents) is not being changed.
+ //
+ if (!pre_reeval &&
+ !reeval &&
+ !pkg.recursive_collection &&
+ pkg.reconfigure () &&
+ postponed_cfgs.find_dependency (pk) == nullptr &&
+ postponed_edeps.find (pk) == postponed_edeps.end ())
+ {
+ // Note that there can be multiple existing dependents for a dependency.
+ // Also note that we skip the existing dependents for which re-
+ // evaluation is optional not to initiate any negotiation in a simple
+ // case (see collect_build_prerequisites() description for details).
+ //
+ vector<existing_dependent> eds (
+ query_existing_dependents (trace,
+ options,
+ pk.db,
+ pk.name,
+ true /* exclude_optional */,
+ fdb,
+ rpt_depts,
+ replaced_vers));
+
+ if (!eds.empty ())
+ {
+ bool postpone (false);
+
+ for (existing_dependent& ed: eds)
+ {
+ if (ed.dependency) // Configuration clause is encountered.
+ {
+ const build_package* bp (&pkg);
+
+ package_key& dep (*ed.dependency);
+ package_key dpt (ed.db, ed.selected->name);
+
+ // If the earliest configuration clause applies to a different
+ // dependency, then collect it (non-recursively).
+ //
+ if (dep != pk)
+ bp = collect_existing_dependent_dependency (options,
+ ed,
+ replaced_vers,
+ postponed_cfgs,
+ unsatisfied_depts);
+
+ // If the dependency collection has already been postponed, then
+ // indicate that the dependent with configuration clauses is also
+ // present and thus the postponement is not bogus. But only add
+ // the new entry to postponed_deps and throw the
+ // postpone_dependency exception if the dependency is already
+ // collected. Note that adding the new entry unconditionally would
+ // be a bad idea, since by postponing the dependency collection we
+ // may not see its existing dependent with a configuration
+ // clauses, end up with a bogus postponement, and start
+ // yo-yoing. In other words, we add the entry only if absolutely
+ // necessary (who knows, maybe the existing dependent will be
+ // dropped before we try to collect it recursively).
+ //
+ auto i (postponed_deps.find (dep));
+
+ if (i != postponed_deps.end ())
+ i->second.with_config = true;
+
+ // Prematurely collected before we saw any config clauses.
+ //
+ if (bp->recursive_collection)
+ {
+ l5 ([&]{trace << "cannot cfg-postpone dependency "
+ << bp->available_name_version_db ()
+ << " of existing dependent " << dpt
+ << " (collected prematurely), "
+ << "throwing postpone_dependency";});
+
+ if (i == postponed_deps.end ())
+ {
+ postponed_deps.emplace (dep,
+ postponed_dependency {
+ false /* without_config */,
+ true /* with_config */});
+ }
+
+ // Don't print the "while satisfying..." chain.
+ //
+ dep_chain.clear ();
+
+ throw postpone_dependency (move (dep));
+ }
+
+ l5 ([&]{trace << "cfg-postpone dependency "
+ << bp->available_name_version_db ()
+ << " of existing dependent " << *ed.selected
+ << ed.db << " due to dependency "
+ << pkg.available_name_version_db ();});
+
+ collect_existing_dependent (options,
+ ed,
+ {pk},
+ replaced_vers,
+ postponed_cfgs,
+ unsatisfied_depts);
+
+ // Only add this dependent/dependency to the newly created cluster
+ // if this dependency doesn't belong to any cluster yet, which may
+ // not be the case if there are multiple existing dependents with
+ // configuration clause for this dependency.
+ //
+ // To put it another way, if there are multiple such existing
+ // dependents for this dependency, here we will create the
+ // configuration cluster only for the first one. The remaining
+ // dependents will be added to this dependency's cluster when the
+ // existing dependents of dependencies in this cluster are all
+ // discovered and reevaluated (see collect_build_postponed() for
+ // details).
+ //
+ if (postponed_cfgs.find_dependency (dep) == nullptr)
+ postponed_cfgs.add (move (dpt),
+ ed.dependency_position,
+ move (dep));
+ }
+ else // Existing dependent is deviated.
+ {
+ // Note that we could probably re-collect deviated dependents
+ // recursively right away but such a two-directional recursion
+ // would complicate implementation and troubleshooting. Thus,
+ // given that the deviated dependents are not very common, we just
+ // postpone their re-collection.
+ //
+ l5 ([&]{trace << "schedule re-collection of deviated "
+ << "existing dependent " << *ed.selected
+ << ed.db;});
+
+ recollect_existing_dependent (options,
+ ed,
+ replaced_vers,
+ postponed_recs,
+ postponed_cfgs,
+ unsatisfied_depts,
+ true /* add_required_by */);
+ }
+
+ // Postpone the recursive collection of a dependency if the existing
+ // dependent has deviated or the dependency belongs to the earliest
+ // depends clause with configuration clause or to some later depends
+ // clause. It is supposed that it will be collected during its
+ // existing dependent re-collection.
+ //
+ if (!ed.dependency || // Dependent has deviated.
+ ed.originating_dependency_position >= ed.dependency_position)
+ {
+ postpone = true;
+ postponed_edeps[pk].emplace_back (ed.db, ed.selected->name);
+ }
+ }
+
+ if (postpone)
+ return nullopt;
+ }
+ }
+
+ pkg.recursive_collection = true;
+
+ if (pkg.system)
+ {
+ l5 ([&]{trace << "skip system " << pkg.available_name_version_db ();});
+ return nullopt;
+ }
+
+ const shared_ptr<available_package>& ap (pkg.available);
+ assert (ap != nullptr);
+
+ const shared_ptr<selected_package>& sp (pkg.selected);
+
+ assert ((!pre_reeval && !reeval) || sp != nullptr);
+
+ // True if this is an up/down-grade.
+ //
+ bool ud (sp != nullptr && sp->version != pkg.available_version ());
+
+ // If this is a repointed dependent, then it points to its prerequisite
+ // replacements flag map (see repointed_dependents for details).
+ //
+ const map<package_key, bool>* rpt_prereq_flags (nullptr);
+
+ // Bail out if this is a configured non-system package and no recursive
+ // collection is required.
+ //
+ bool src_conf (sp != nullptr &&
+ sp->state == package_state::configured &&
+ sp->substate != package_substate::system);
+
+ // The being (pre-)re-evaluated dependent must be configured as a source
+ // package and should not be collected recursively (due to upgrade, etc).
+ //
+ assert ((!pre_reeval && !reeval) ||
+ (src_conf && !pkg.recollect_recursively (rpt_depts)));
+
+ if (src_conf)
+ {
+ if (!pre_reeval && !reeval && !pkg.recollect_recursively (rpt_depts))
+ {
+ l5 ([&]{trace << "skip configured "
+ << pkg.available_name_version_db ();});
+ return nullopt;
+ }
+
+ repointed_dependents::const_iterator i (rpt_depts.find (pk));
+
+ if (i != rpt_depts.end ())
+ rpt_prereq_flags = &i->second;
+ }
+
+ // Iterate over dependencies, trying to unambiguously select a
+ // satisfactory dependency alternative for each of them. Fail or postpone
+ // the collection if unable to do so.
+ //
+ const dependencies& deps (ap->dependencies);
+
+ // The skeleton can be pre-initialized before the recursive collection
+ // starts (as a part of dependency configuration negotiation, etc). The
+ // dependencies and alternatives members must both be either present or
+ // not.
+ //
+ assert ((!pkg.dependencies || pkg.skeleton) &&
+ pkg.dependencies.has_value () == pkg.alternatives.has_value ());
+
+ // Note that the selected alternatives list can be filled partially (see
+ // build_package::dependencies for details). In this case we continue
+ // collecting where we stopped previously.
+ //
+ if (!pkg.dependencies)
+ {
+ l5 ([&]{trace << (pre_reeval ? "pre-reeval " :
+ reeval ? "reeval " :
+ "begin " )
+ << pkg.available_name_version_db ();});
+
+ pkg.dependencies = dependencies ();
+ pkg.alternatives = vector<size_t> ();
+
+ if (size_t n = deps.size ())
+ {
+ pkg.dependencies->reserve (n);
+ pkg.alternatives->reserve (n);
+ }
+
+ if (!pkg.skeleton)
+ pkg.init_skeleton (options);
+ }
+ else
+ l5 ([&]{trace << "resume " << pkg.available_name_version_db ();});
+
+ dependencies& sdeps (*pkg.dependencies);
+ vector<size_t>& salts (*pkg.alternatives);
+
+ assert (sdeps.size () == salts.size ()); // Must be parallel.
+
+ // Check if there is nothing to collect anymore.
+ //
+ if (sdeps.size () == deps.size ())
+ {
+ l5 ([&]{trace << "end " << pkg.available_name_version_db ();});
+ return nullopt;
+ }
+
+ // Show how we got here if things go wrong.
+ //
+ // To suppress printing this information clear the dependency chain before
+ // throwing an exception.
+ //
+ auto g (
+ make_exception_guard (
+ [&dep_chain] ()
+ {
+ // Note that we also need to clear the dependency chain, to prevent
+ // the caller's exception guard from printing it.
+ //
+ while (!dep_chain.empty ())
+ {
+ info << "while satisfying "
+ << dep_chain.back ().get ().available_name_version_db ();
+
+ dep_chain.pop_back ();
+ }
+ }));
+
+ if (!pre_reeval)
+ dep_chain.push_back (pkg);
+
+ assert (sdeps.size () < deps.size ());
+
+ package_skeleton& skel (*pkg.skeleton);
+
+ // We shouldn't be failing in the reevaluation mode, given that we only
+ // reevaluate a package if its pre-reevaluation succeeds.
+ //
+ auto fail_reeval = [&pkg] ()
+ {
+ fail << "unable to re-create dependency information of already "
+ << "configured package " << pkg.available_name_version_db () <<
+ info << "likely cause is change in external environment" <<
+ info << "if not, please report in https://github.com/build2/build2/issues/302";
+ };
+
+ bool postponed (false);
+ bool reevaluated (false);
+
+ // In the pre-reevaluation mode keep track of configuration variable
+ // prefixes similar to what we do in pkg_configure_prerequisites(). Stop
+ // tracking if we discovered that the dependent re-evaluation is not
+ // optional.
+ //
+ vector<string> banned_var_prefixes;
+
+ auto references_banned_var = [&banned_var_prefixes] (const string& clause)
+ {
+ for (const string& p: banned_var_prefixes)
+ {
+ if (clause.find (p) != string::npos)
+ return true;
+ }
+
+ return false;
+ };
+
+ if (pre_reeval)
+ {
+ if (!sp->dependency_alternatives_section.loaded ())
+ pdb.load (*sp, sp->dependency_alternatives_section);
+
+ // It doesn't feel like the number of depends clauses may differ for the
+ // available and selected packages in the pre-reevaluation mode since
+ // they must refer to the same package version. If it still happens,
+ // maybe due to some manual tampering, let's assume this as a deviation
+ // case.
+ //
+ size_t nn (deps.size ());
+ size_t on (sp->dependency_alternatives.size ());
+
+ if (nn != on)
+ {
+ l5 ([&]{trace << "re-evaluation of dependent "
+ << pkg.available_name_version_db ()
+ << " deviated: number of depends clauses changed to "
+ << nn << " from " << on;});
+
+ throw reevaluation_deviated ();
+ }
+ }
+
+ pre_reevaluate_result r;
+
+ for (size_t di (sdeps.size ()); di != deps.size (); ++di)
+ {
+ // Fail if we missed the re-evaluation target position for any reason.
+ //
+ if (reeval && di == reeval_pos->first) // Note: reeval_pos is 1-based.
+ fail_reeval ();
+
+ const dependency_alternatives_ex& das (deps[di]);
+
+ // Add an empty alternatives list into the selected dependency list if
+ // this is a toolchain build-time dependency.
+ //
+ dependency_alternatives_ex sdas (das.buildtime, das.comment);
+
+ if (toolchain_buildtime_dependency (options, das, &nm))
+ {
+ if (pre_reeval)
+ {
+ size_t oi (sp->dependency_alternatives[di]);
+
+ // It doesn't feel like it may happen in the pre-reevaluation
+ // mode. If it still happens, maybe due to some manual tampering,
+ // let's assume this as a deviation case.
+ //
+ if (oi != 0)
+ {
+ l5 ([&]{trace << "re-evaluation of dependent "
+ << pkg.available_name_version_db ()
+ << " deviated at depends clause " << di + 1
+ << ": toolchain buildtime dependency replaced the "
+ << " regular one with selected alternative " << oi;});
+
+ throw reevaluation_deviated ();
+ }
+ }
+
+ sdeps.push_back (move (sdas));
+ salts.push_back (0); // Keep parallel to sdeps.
+ continue;
+ }
+
+ // Evaluate alternative conditions and filter enabled alternatives. Add
+ // an empty alternatives list into the selected dependency list if there
+ // are none.
+ //
+ build_package::dependency_alternatives_refs edas;
+
+ if (pkg.postponed_dependency_alternatives)
+ {
+ edas = move (*pkg.postponed_dependency_alternatives);
+ pkg.postponed_dependency_alternatives = nullopt;
+ }
+ else
+ {
+ for (size_t i (0); i != das.size (); ++i)
+ {
+ const dependency_alternative& da (das[i]);
+
+ bool enabled;
+
+ if (da.enable)
+ {
+ if (pre_reeval &&
+ r.reevaluation_optional &&
+ references_banned_var (*da.enable))
+ {
+ r.reevaluation_optional = false;
+ }
+
+ enabled = skel.evaluate_enable (*da.enable, make_pair (di, i));
+ }
+ else
+ enabled = true;
+
+ if (enabled)
+ edas.push_back (make_pair (ref (da), i));
+ }
+ }
+
+ if (edas.empty ())
+ {
+ if (pre_reeval)
+ {
+ size_t oi (sp->dependency_alternatives[di]);
+
+ if (oi != 0)
+ {
+ l5 ([&]{trace << "re-evaluation of dependent "
+ << pkg.available_name_version_db ()
+ << " deviated at depends clause " << di + 1
+ << ": dependency with previously selected "
+ << "alternative " << oi << " is now disabled";});
+
+ throw reevaluation_deviated ();
+ }
+ }
+
+ sdeps.push_back (move (sdas));
+ salts.push_back (0); // Keep parallel to sdeps.
+ continue;
+ }
+
+ // Try to pre-collect build information (pre-builds) for the
+ // dependencies of an alternative. Optionally, issue diagnostics into
+ // the specified diag record. In the dry-run mode don't change the
+ // packages collection state (postponed_repo set, etc).
+ //
+ // If an alternative dependency package is specified as a dependency
+ // with a version constraint on the command line, then overwrite the
+ // dependent's constraint with the command line's constraint, if the
+ // latter is a subset of former. If it is not a subset, then bail out
+ // indicating that the alternative dependencies cannot be resolved
+ // (builds is nullopt), unless ignore_unsatisfactory_dep_spec argument
+ // is true. In the latter case continue precollecting as if no
+ // constraint is specified on the command line for this dependency. That
+ // will likely result in the unsatisfied dependent problem, which will
+ // be either resolved or end up with the failure (see
+ // unsatisfied_dependents for details).
+ //
+ // Note that rather than considering an alternative as unsatisfactory
+ // (returning no pre-builds) the function can fail in some cases
+ // (multiple possible configurations for a build-time dependency, orphan
+ // or broken selected package, etc). The assumption here is that the
+ // user would prefer to fix a dependency-related issue first instead of
+ // proceeding with the build which can potentially end up with some less
+ // preferable dependency alternative.
+ //
+ struct prebuild
+ {
+ bpkg::dependency dependency;
+ reference_wrapper<database> db;
+ shared_ptr<selected_package> selected;
+ shared_ptr<available_package> available;
+ lazy_shared_ptr<bpkg::repository_fragment> repository_fragment;
+ bool system;
+ bool specified_dependency;
+ bool force;
+
+ // True if the dependency package is either selected in the
+ // configuration or is already being built.
+ //
+ bool reused;
+ };
+ using prebuilds = small_vector<prebuild, 1>;
+
+ class precollect_result
+ {
+ public:
+ // Nullopt if some dependencies cannot be resolved.
+ //
+ optional<prebuilds> builds;
+
+ // If true is passed as the check_constraints argument to precollect()
+ // and some dependency of the alternative cannot be resolved because
+ // there is no version available which can satisfy all the being built
+ // dependents, then this member contains all the dependency builds
+ // (which otherwise would be contained in the builds member).
+ //
+ optional<prebuilds> unsatisfactory;
+
+ // True if dependencies can all be resolved (builds is present) and
+ // are all reused (see above).
+ //
+ bool reused = false;
+
+ // True if some of the dependencies cannot be resolved (builds is
+ // nullopt) and the dependent package prerequisites collection needs
+ // to be postponed due to inability to find a version satisfying the
+ // pre-entered constraint from repositories available to the dependent
+ // package.
+ //
+ bool repo_postpone = false;
+
+ // Create precollect result containing dependency builds.
+ //
+ precollect_result (prebuilds&& bs, bool r)
+ : builds (move (bs)), reused (r) {}
+
+ // Create precollect result containing unsatisfactory dependency
+ // builds.
+ //
+ precollect_result (bool r, prebuilds&& bs)
+ : unsatisfactory (move (bs)), reused (r) {}
+
+ // Create precollect result without builds (some dependency can't be
+ // resolved, etc).
+ //
+ explicit
+ precollect_result (bool p): repo_postpone (p) {}
+ };
+
+ auto precollect = [&options,
+ &pkg,
+ &nm,
+ &pdb,
+ ud,
+ &fdb,
+ rpt_prereq_flags,
+ &apc,
+ postponed_repo,
+ &dep_chain,
+ pre_reeval,
+ &trace,
+ this]
+ (const dependency_alternative& da,
+ bool buildtime,
+ const package_prerequisites* prereqs,
+ bool check_constraints,
+ bool ignore_unsatisfactory_dep_spec,
+ diag_record* dr = nullptr,
+ bool dry_run = false) -> precollect_result
+ {
+ prebuilds r;
+ bool reused (true);
+
+ const lazy_shared_ptr<repository_fragment>& af (
+ pkg.repository_fragment);
+
+ for (const dependency& dp: da)
+ {
+ const package_name& dn (dp.name);
+
+ if (buildtime && pdb.type == build2_config_type)
+ {
+ // It doesn't feel like it may happen in the pre-reevaluation
+ // mode. If it still happens, maybe due to some manual
+ // tampering, let's assume this as a deviation case.
+ //
+ if (pre_reeval)
+ {
+ l5 ([&]{trace << "re-evaluation of dependent "
+ << pkg.available_name_version_db ()
+ << " deviated: build-time dependency " << dn
+ << " is now in build system module "
+ << "configuration";});
+
+ throw reevaluation_deviated ();
+ }
+
+ assert (dr == nullptr); // Should fail on the "silent" run.
+
+ // Note that the dependent is not necessarily a build system
+ // module.
+ //
+ fail << "build-time dependency " << dn << " in build system "
+ << "module configuration" <<
+ info << "build system modules cannot have build-time "
+ << "dependencies";
+ }
+
+ bool system (false);
+ bool specified (false);
+
+ // If the user specified the desired dependency version
+ // constraint, then we will use it to overwrite the constraint
+ // imposed by the dependent package, checking that it is still
+ // satisfied.
+ //
+ // Note that we can't just rely on the execution plan refinement
+ // that will pick up the proper dependency version at the end of
+ // the day. We may just not get to the plan execution simulation,
+ // failing due to inability for dependency versions collected by
+ // two dependents to satisfy each other constraints (for an
+ // example see the
+ // pkg-build/dependency/apply-constraints/resolve-conflict/
+ // tests).
+
+ // Points to the desired dependency version constraint, if
+ // specified, and is NULL otherwise. Can be used as boolean flag.
+ //
+ const version_constraint* dep_constr (nullptr);
+
+ database* ddb (fdb (pdb, dn, buildtime));
+
+ auto i (ddb != nullptr
+ ? map_.find (*ddb, dn)
+ : map_.find_dependency (pdb, dn, buildtime));
+
+ if (i != map_.end ())
+ {
+ const build_package& bp (i->second.package);
+
+ specified = !bp.action; // Is pre-entered.
+
+ if (specified &&
+ //
+ // The version constraint is specified,
+ //
+ !bp.constraints.empty ())
+ {
+ assert (bp.constraints.size () == 1);
+
+ const build_package::constraint_type& c (bp.constraints[0]);
+
+ // If the user-specified dependency constraint is the wildcard
+ // version, then it satisfies any dependency constraint.
+ //
+ if (!wildcard (c.value) && !satisfies (c.value, dp.constraint))
+ {
+ // We should end up throwing reevaluation_deviated exception
+ // before the diagnostics run in the pre-reevaluation mode.
+ //
+ assert (!pre_reeval || dr == nullptr);
+
+ if (!ignore_unsatisfactory_dep_spec)
+ {
+ if (dr != nullptr)
+ {
+ // " info: ..."
+ string indent (" ");
+
+ *dr << error << "unable to satisfy constraints on package "
+ << dn <<
+ info << nm << pdb << " depends on (" << dn << ' '
+ << *dp.constraint << ')';
+
+ {
+ set<package_key> printed;
+ print_constraints (*dr, pkg, indent, printed);
+ }
+
+ *dr << info << c.dependent << " depends on (" << dn << ' '
+ << c.value << ')';
+
+ if (const build_package* d = dependent_build (c))
+ {
+ set<package_key> printed;
+ print_constraints (*dr, *d, indent, printed);
+ }
+
+ *dr << info << "specify " << dn << " version to satisfy "
+ << nm << " constraint";
+ }
+
+ return precollect_result (false /* postpone */);
+ }
+ }
+ else
+ {
+ dep_constr = &c.value;
+ system = bp.system;
+ }
+ }
+ }
+
+ const dependency& d (!dep_constr
+ ? dp
+ : dependency {dn, *dep_constr});
+
+ // First see if this package is already selected. If we already
+ // have it in the configuration and it satisfies our dependency
+ // version constraint, then we don't want to be forcing its
+ // upgrade (or, worse, downgrade).
+ //
+ // If the prerequisite configuration is explicitly specified by
+ // the user, then search for the prerequisite in this specific
+ // configuration. Otherwise, search recursively in the explicitly
+ // linked configurations of the dependent configuration.
+ //
+ // Note that for the repointed dependent we will always find the
+ // prerequisite replacement rather than the prerequisite being
+ // replaced.
+ //
+ pair<shared_ptr<selected_package>, database*> spd (
+ ddb != nullptr
+ ? make_pair (ddb->find<selected_package> (dn), ddb)
+ : find_dependency (pdb, dn, buildtime));
+
+ if (ddb == nullptr)
+ ddb = &pdb;
+
+ shared_ptr<selected_package>& dsp (spd.first);
+
+ if (prereqs != nullptr &&
+ (dsp == nullptr ||
+ find_if (prereqs->begin (), prereqs->end (),
+ [&dsp] (const auto& v)
+ {
+ return v.first.object_id () == dsp->name;
+ }) == prereqs->end ()))
+ return precollect_result (false /* postpone */);
+
+ pair<shared_ptr<available_package>,
+ lazy_shared_ptr<repository_fragment>> rp;
+
+ shared_ptr<available_package>& dap (rp.first);
+
+ bool force (false);
+
+ if (dsp != nullptr)
+ {
+ // Switch to the selected package configuration.
+ //
+ ddb = spd.second;
+
+ // If we are collecting prerequisites of the repointed
+ // dependent, then only proceed further if this is either a
+ // replacement or unamended prerequisite and we are
+ // up/down-grading (only for the latter).
+ //
+ if (rpt_prereq_flags != nullptr)
+ {
+ auto i (rpt_prereq_flags->find (package_key {*ddb, dn}));
+
+ bool unamended (i == rpt_prereq_flags->end ());
+ bool replacement (!unamended && i->second);
+
+ // We can never end up with the prerequisite being replaced,
+ // since the fdb() function should always return the
+ // replacement instead (see above).
+ //
+ assert (unamended || replacement);
+
+ if (!(replacement || (unamended && ud)))
+ continue;
+ }
+
+ if (dsp->state == package_state::broken)
+ {
+ // If it happens in the pre-reevaluation mode, that may mean
+ // that the package has become broken since the time the
+ // dependent was built. Let's assume this as a deviation case
+ // and fail on the re-collection.
+ //
+ if (pre_reeval)
+ {
+ l5 ([&]{trace << "re-evaluation of dependent "
+ << pkg.available_name_version_db ()
+ << " deviated: package " << dn << *ddb
+ << " is broken";});
+
+ throw reevaluation_deviated ();
+ }
+
+ assert (dr == nullptr); // Should fail on the "silent" run.
+
+ fail << "unable to build broken package " << dn << *ddb <<
+ info << "use 'pkg-purge --force' to remove";
+ }
+
+ // If the constraint is imposed by the user we also need to make
+ // sure that the system flags are the same.
+ //
+ if (satisfies (dsp->version, d.constraint) &&
+ (!dep_constr || dsp->system () == system))
+ {
+ system = dsp->system ();
+
+ version_constraint vc (dsp->version);
+
+ // First try to find an available package for this exact
+ // version, falling back to ignoring version revision and
+ // iteration. In particular, this handles the case where a
+ // package moves from one repository to another (e.g., from
+ // testing to stable). For a system package we will try to
+ // find the available package that matches the selected
+ // package version (preferable for the configuration
+ // negotiation machinery) and, if fail, fallback to picking
+ // the latest one (its exact version doesn't really matter in
+ // this case).
+ //
+ // It seems reasonable to search for the package in the
+ // repositories explicitly added by the user if the selected
+ // package was explicitly specified on command line, and in
+ // the repository (and its complements/prerequisites) of the
+ // dependent being currently built otherwise.
+ //
+ if (dsp->hold_package)
+ {
+ linked_databases dbs (dependent_repo_configs (*ddb));
+
+ rp = find_available_one (dbs,
+ dn,
+ vc,
+ true /* prereq */,
+ true /* revision */);
+
+ if (dap == nullptr)
+ rp = find_available_one (dbs, dn, vc);
+
+ if (dap == nullptr && system)
+ rp = find_available_one (dbs, dn, nullopt);
+ }
+ else if (af != nullptr)
+ {
+ rp = find_available_one (dn,
+ vc,
+ af,
+ true /* prereq */,
+ true /* revision */);
+
+ if (dap == nullptr)
+ rp = find_available_one (dn, vc, af);
+
+ if (dap == nullptr && system)
+ rp = find_available_one (dn, nullopt, af);
+ }
+
+ // A stub satisfies any version constraint so we weed them out
+ // (returning stub as an available package feels wrong).
+ //
+ if (dap == nullptr || dap->stub ())
+ rp = make_available_fragment (options, *ddb, dsp);
+ }
+ else
+ // Remember that we may be forcing up/downgrade; we will deal
+ // with it below.
+ //
+ force = true;
+ }
+
+ // If this is a build-time dependency and we build it for the
+ // first time, then we need to find a suitable configuration (of
+ // the host or build2 type) to build it in.
+ //
+ // If the current configuration (ddb) is of the suitable type,
+ // then we use that. Otherwise, we go through its immediate
+ // explicit links. If only one of them has the suitable type, then
+ // we use that. If there are multiple of them, then we fail
+ // advising the user to pick one explicitly. If there are none,
+ // then we create the private configuration and use that. If the
+ // current configuration is private, then search/create in the
+ // parent configuration instead.
+ //
+ // Note that if the user has explicitly specified the
+ // configuration for this dependency on the command line (using
+ // --config-*), then this configuration is used as the starting
+ // point for this search.
+ //
+ if (buildtime &&
+ dsp == nullptr &&
+ ddb->type != buildtime_dependency_type (dn))
+ {
+ database* db (nullptr);
+ database& sdb (ddb->private_ () ? ddb->parent_config () : *ddb);
+
+ const string& type (buildtime_dependency_type (dn));
+
+ // Skip the self-link.
+ //
+ const linked_configs& lcs (sdb.explicit_links ());
+ for (auto i (lcs.begin_linked ()); i != lcs.end (); ++i)
+ {
+ database& ldb (i->db);
+
+ if (ldb.type == type)
+ {
+ if (db == nullptr)
+ db = &ldb;
+ else
+ {
+ // If it happens in the pre-reevaluation mode, that may
+ // mean that some new configuration has been linked since
+ // the time the dependent was built. Let's assume this as
+ // a deviation case.
+ //
+ if (pre_reeval)
+ {
+ l5 ([&]{trace << "re-evaluation of dependent "
+ << pkg.available_name_version_db ()
+ << " deviated: now multiple possible "
+ << type << " configurations for "
+ << "build-time dependency (" << dp << "): "
+ << db->config_orig << ", "
+ << ldb.config_orig;});
+
+ throw reevaluation_deviated ();
+ }
+
+ assert (dr == nullptr); // Should fail on the "silent" run.
+
+ fail << "multiple possible " << type << " configurations "
+ << "for build-time dependency (" << dp << ')' <<
+ info << db->config_orig <<
+ info << ldb.config_orig <<
+ info << "use --config-* to select the configuration";
+ }
+ }
+ }
+
+ // If no suitable configuration is found, then create and link
+ // it, unless the --no-private-config options is specified. In
+ // the latter case, print the dependency chain to stdout and
+ // exit with the specified code.
+ //
+ if (db == nullptr)
+ {
+ // If it happens in the pre-reevaluation mode, that may mean
+ // that some configuration has been unlinked since the time
+ // the dependent was built. Let's assume this as a deviation
+ // case.
+ //
+ if (pre_reeval)
+ {
+ l5 ([&]{trace << "re-evaluation of dependent "
+ << pkg.available_name_version_db ()
+ << " deviated: now no suitable configuration "
+ << "is found for build-time dependency ("
+ << dp << ')';});
+
+ throw reevaluation_deviated ();
+ }
+
+ // The private config should be created on the "silent" run
+ // and so there always should be a suitable configuration on
+ // the diagnostics run.
+ //
+ assert (dr == nullptr);
+
+ if (options.no_private_config_specified ())
+ try
+ {
+ // Note that we don't have the dependency package version
+ // yet. We could probably rearrange the code and obtain the
+ // available dependency package by now, given that it comes
+ // from the main database and may not be specified as system
+ // (we would have the configuration otherwise). However,
+ // let's not complicate the code further and instead print
+ // the package name and the constraint, if present.
+ //
+ // Also, in the future, we may still need the configuration
+ // to obtain the available dependency package for some
+ // reason (may want to fetch repositories locally, etc).
+ //
+ cout << d << '\n';
+
+ // Note that we also need to clean the dependency chain, to
+ // prevent the exception guard from printing it to stderr.
+ //
+ for (build_package_refs dc (move (dep_chain));
+ !dc.empty (); )
+ {
+ const build_package& p (dc.back ());
+
+ cout << p.available_name_version () << ' '
+ << p.db.get ().config << '\n';
+
+ dc.pop_back ();
+ }
+
+ throw failed (options.no_private_config ());
+ }
+ catch (const io_error&)
+ {
+ fail << "unable to write to stdout";
+ }
+
+ const strings mods {"cc"};
+
+ const strings vars {
+ "config.config.load=~" + type,
+ "config.config.persist+='config.*'@unused=drop"};
+
+ dir_path cd (bpkg_dir / dir_path (type));
+
+ // Wipe a potentially existing un-linked private configuration
+ // left from a previous faulty run. Note that trying to reuse
+ // it would be a bad idea since it can be half-prepared, with
+ // an outdated database schema version, etc.
+ //
+ cfg_create (options,
+ sdb.config_orig / cd,
+ optional<string> (type) /* name */,
+ type /* type */,
+ mods,
+ vars,
+ false /* existing */,
+ true /* wipe */);
+
+ // Note that we will copy the name from the configuration
+ // unless it clashes with one of the existing links.
+ //
+ shared_ptr<configuration> lc (
+ cfg_link (sdb,
+ sdb.config / cd,
+ true /* relative */,
+ nullopt /* name */,
+ true /* sys_rep */));
+
+ // Save the newly-created private configuration, together with
+ // the containing configuration database, for their subsequent
+ // re-link.
+ //
+ apc (sdb, move (cd));
+
+ db = &sdb.find_attached (*lc->id);
+ }
+
+ ddb = db; // Switch to the dependency configuration.
+ }
+
+ // Note that building a dependent which is not a build2 module in
+ // the same configuration with the build2 module it depends upon
+ // is an error.
+ //
+ if (buildtime &&
+ !build2_module (nm) &&
+ build2_module (dn) &&
+ pdb == *ddb)
+ {
+ // It doesn't feel like it may happen in the pre-reevaluation
+ // mode. If it still happens, maybe due to some manual
+ // tampering, let's assume this as a deviation case.
+ //
+ if (pre_reeval)
+ {
+ l5 ([&]{trace << "re-evaluation of dependent "
+ << pkg.available_name_version_db ()
+ << " deviated: now unable to build build system "
+ << "module " << dn << " in its dependent "
+ << "package configuration " << pdb.config_orig;});
+
+ throw reevaluation_deviated ();
+ }
+
+ assert (dr == nullptr); // Should fail on the "silent" run.
+
+ // Note that the dependent package information is printed by the
+ // above exception guard.
+ //
+ fail << "unable to build build system module " << dn
+ << " in its dependent package configuration "
+ << pdb.config_orig <<
+ info << "use --config-* to select suitable configuration";
+ }
+
+ // If we didn't get the available package corresponding to the
+ // selected package, look for any that satisfies the constraint.
+ //
+ if (dap == nullptr)
+ {
+ // And if we have no repository fragment to look in, then that
+ // means the package is an orphan (we delay this check until we
+ // actually need the repository fragment to allow orphans
+ // without prerequisites).
+ //
+ if (af == nullptr)
+ {
+ // If it happens in the pre-reevaluation mode, that may mean
+ // that the dependent has become an orphan since the time it
+ // was built. Let's assume this as a deviation case.
+ //
+ if (pre_reeval)
+ {
+ l5 ([&]{trace << "re-evaluation of dependent "
+ << pkg.available_name_version_db ()
+ << " deviated: is now orphaned";});
+
+ throw reevaluation_deviated ();
+ }
+
+ assert (dr == nullptr); // Should fail on the "silent" run.
+
+ fail << "package " << pkg.available_name_version_db ()
+ << " is orphaned" <<
+ info << "explicitly upgrade it to a new version";
+ }
+
+ // We look for prerequisites only in the repositories of this
+ // package (and not in all the repositories of this
+ // configuration). At first this might look strange, but it also
+ // kind of makes sense: we only use repositories "approved" for
+ // this package version. Consider this scenario as an example:
+ // hello/1.0.0 and libhello/1.0.0 in stable and libhello/2.0.0
+ // in testing. As a prerequisite of hello, which version should
+ // libhello resolve to? While one can probably argue either way,
+ // resolving it to 1.0.0 is the conservative choice and the user
+ // can always override it by explicitly building libhello.
+ //
+ // Note though, that if this is a test package, then its special
+ // test dependencies (main packages that refer to it) should be
+ // searched upstream through the complement repositories
+ // recursively, since the test packages may only belong to the
+ // main package's repository and its complements.
+ //
+ // @@ Currently we don't implement the reverse direction search
+ // for the test dependencies, effectively only supporting the
+ // common case where the main and test packages belong to the
+ // same repository. Will need to fix this eventually.
+ //
+ // Note that this logic (naturally) does not apply if the
+ // package is already selected by the user (see above).
+ //
+ // Also note that for the user-specified dependency version
+ // constraint we rely on the satisfying package version be
+ // present in repositories of the first dependent met. As a
+ // result, we may fail too early if such package version doesn't
+ // belong to its repositories, but belongs to the ones of some
+ // dependent that we haven't met yet. Can we just search all
+ // repositories for an available package of the appropriate
+ // version and just take it, if present? We could, but then
+ // which repository should we pick? The wrong choice can
+ // introduce some unwanted repositories and package versions
+ // into play. So instead, we will postpone collecting the
+ // problematic dependent, expecting that some other one will
+ // find the appropriate version in its repositories.
+ //
+ // For a system package we will try to find the available
+ // package that matches the constraint (preferable for the
+ // configuration negotiation machinery) and, if fail, fallback
+ // to picking the latest one just to make sure the package is
+ // recognized. An unrecognized package means the broken/stale
+ // repository (see below).
+ //
+ rp = find_existing (dn, d.constraint, af);
+
+ if (dap == nullptr)
+ rp = find_available_one (dn, d.constraint, af);
+
+ if (dap == nullptr && system && d.constraint)
+ rp = find_available_one (dn, nullopt, af);
+
+ if (dap == nullptr)
+ {
+ // If it happens in the pre-reevaluation mode, that may mean
+ // that the repositories has been refetched since the time the
+ // dependent was built and don't contain a satisfactory
+ // package anymore. Let's assume this as a deviation case.
+ //
+ if (pre_reeval)
+ {
+ l5 ([&]{trace << "re-evaluation of dependent "
+ << pkg.available_name_version_db ()
+ << " deviated: unable to satisfy "
+ << (!dep_constr ? "" : "user-specified ")
+ << "dependency constraint (" << d << ')';});
+
+ throw reevaluation_deviated ();
+ }
+
+ if (dep_constr && !system && postponed_repo != nullptr)
+ {
+ // We shouldn't be called in the diag mode for the postponed
+ // package builds.
+ //
+ assert (dr == nullptr);
+
+ if (!dry_run)
+ {
+ l5 ([&]{trace << "rep-postpone dependent "
+ << pkg.available_name_version_db ()
+ << " due to dependency " << dp
+ << " and user-specified constraint "
+ << *dep_constr;});
+
+ postponed_repo->insert (&pkg);
+ }
+
+ return precollect_result (true /* postpone */);
+ }
+
+ // Fail if we are unable to find an available dependency
+ // package which satisfies the dependent's constraint.
+ //
+ // It feels that just considering this alternative as
+ // unsatisfactory and silently trying another alternative
+ // would be wrong, since the user may rather want to
+ // fix/re-fetch the repository and retry.
+ //
+ diag_record dr (fail);
+
+ // Issue diagnostics differently based on the presence of
+ // available packages for the unsatisfied dependency.
+ //
+ // Note that there can't be any stubs, since they satisfy
+ // any constraint and we won't be here if there were any.
+ //
+ vector<shared_ptr<available_package>> aps (
+ find_available (dn, nullopt /* version_constraint */, af));
+
+ if (!aps.empty ())
+ {
+ dr << "unable to satisfy dependency constraint (" << dn;
+
+ // We need to be careful not to print the wildcard-based
+ // constraint.
+ //
+ if (d.constraint &&
+ (!dep_constr || !wildcard (*dep_constr)))
+ dr << ' ' << *d.constraint;
+
+ dr << ") of package " << nm << pdb <<
+ info << "available " << dn << " versions:";
+
+ for (const shared_ptr<available_package>& ap: aps)
+ dr << ' ' << ap->version;
+ }
+ else
+ {
+ dr << "no package available for dependency " << dn
+ << " of package " << nm << pdb;
+ }
+
+ // Avoid printing this if the dependent package is external
+ // since it's more often confusing than helpful (they are
+ // normally not fetched manually).
+ //
+ if (!af->location.empty () &&
+ !af->location.directory_based () &&
+ (!dep_constr || system))
+ dr << info << "repository " << af->location << " appears "
+ << "to be broken" <<
+ info << "or the repository metadata could be stale" <<
+ info << "run 'bpkg rep-fetch' (or equivalent) to update";
+ }
+
+ // If all that's available is a stub then we need to make sure
+ // the package is present in the system repository and it's
+ // version satisfies the constraint. If a source package is
+ // available but there is a system package specified on the
+ // command line and it's version satisfies the constraint then
+ // the system package should be preferred. To recognize such a
+ // case we just need to check if the authoritative system
+ // version is set and it satisfies the constraint. If the
+ // corresponding system package is non-optional it will be
+ // preferred anyway.
+ //
+ if (dap->stub ())
+ {
+ // Note that the constraint can safely be printed as it can't
+ // be a wildcard (produced from the user-specified dependency
+ // version constraint). If it were, then the system version
+ // wouldn't be NULL and would satisfy itself.
+ //
+ if (dap->system_version (*ddb) == nullptr)
+ {
+ // We should end up throwing reevaluation_deviated exception
+ // before the diagnostics run in the pre-reevaluation mode.
+ //
+ assert (!pre_reeval || dr == nullptr);
+
+ if (dr != nullptr)
+ *dr << error << "dependency " << d << " of package "
+ << nm << " is not available in source" <<
+ info << "specify ?sys:" << dn << " if it is available "
+ << "from the system";
+
+ return precollect_result (false /* postpone */);
+ }
+
+ if (!satisfies (*dap->system_version (*ddb), d.constraint))
+ {
+ // We should end up throwing reevaluation_deviated exception
+ // before the diagnostics run in the pre-reevaluation mode.
+ //
+ assert (!pre_reeval || dr == nullptr);
+
+ if (dr != nullptr)
+ *dr << error << "dependency " << d << " of package "
+ << nm << " is not available in source" <<
+ info << package_string (dn,
+ *dap->system_version (*ddb),
+ true /* system */)
+ << " does not satisfy the constrains";
+
+ return precollect_result (false /* postpone */);
+ }
+
+ system = true;
+ }
+ else
+ {
+ auto p (dap->system_version_authoritative (*ddb));
+
+ if (p.first != nullptr &&
+ p.second && // Authoritative.
+ satisfies (*p.first, d.constraint))
+ system = true;
+ }
+ }
+
+ bool ru (i != map_.end () || dsp != nullptr);
+
+ if (!ru)
+ reused = false;
+
+ r.push_back (prebuild {d,
+ *ddb,
+ move (dsp),
+ move (dap),
+ move (rp.second),
+ system,
+ specified,
+ force,
+ ru});
+ }
+
+ // Now, as we have pre-collected the dependency builds, if
+ // requested, go through them and check that for those dependencies
+ // which are already being built we will be able to choose one of
+ // them (either existing or new) which satisfies all the dependents.
+ // If that's not the case, then issue the diagnostics, if requested,
+ // and return the unsatisfactory dependency builds.
+ //
+ // Note that collect_build() also performs this check but postponing
+ // it till then can end up in failing instead of selecting some
+ // other dependency alternative.
+ //
+ if (check_constraints)
+ {
+ for (const prebuild& b: r)
+ {
+ const shared_ptr<available_package>& dap (b.available);
+
+ // Otherwise we would have failed earlier.
+ //
+ assert (dap != nullptr);
+
+ const dependency& d (b.dependency);
+
+ auto i (map_.find (b.db, d.name));
+
+ if (i != map_.end () && d.constraint)
+ {
+ const build_package& bp (i->second.package);
+
+ if (bp.action && *bp.action == build_package::build)
+ {
+ const version& v1 (b.system
+ ? *dap->system_version (b.db)
+ : dap->version);
+
+ const version& v2 (bp.available_version ());
+
+ if (v1 != v2)
+ {
+ using constraint_type = build_package::constraint_type;
+
+ constraint_type c1 (*d.constraint,
+ pdb,
+ nm,
+ pkg.available_version (),
+ false /* selected_dependent */);
+
+ if (!satisfies (v2, c1.value))
+ {
+ for (const constraint_type& c2: bp.constraints)
+ {
+ if (!satisfies (v1, c2.value))
+ {
+ // We should end up throwing reevaluation_deviated
+ // exception before the diagnostics run in the
+ // pre-reevaluation mode.
+ //
+ assert (!pre_reeval || dr == nullptr);
+
+ if (dr != nullptr)
+ {
+ const package_name& n (d.name);
+
+ // " info: ..."
+ string indent (" ");
+
+ *dr << error << "unable to satisfy constraints on "
+ << "package " << n <<
+ info << c2.dependent << " depends on (" << n
+ << ' ' << c2.value << ')';
+
+ if (const build_package* d = dependent_build (c2))
+ {
+ set<package_key> printed;
+ print_constraints (*dr, *d, indent, printed);
+ }
+
+ *dr << info << c1.dependent << " depends on ("
+ << n << ' ' << c1.value << ')';
+
+ if (const build_package* d = dependent_build (c1))
+ {
+ set<package_key> printed;
+ print_constraints (*dr, *d, indent, printed);
+ }
+
+ *dr << info << "available "
+ << bp.available_name_version () <<
+ info << "available "
+ << package_string (n, v1, b.system) <<
+ info << "explicitly specify " << n
+ << " version to manually satisfy both "
+ << "constraints";
+ }
+
+ return precollect_result (reused, move (r));
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ return precollect_result (move (r), reused);
+ };
+
+ // Try to collect the previously collected pre-builds.
+ //
+ // Return false if the dependent has configuration clauses and is
+ // postponed until dependencies configuration negotiation.
+ //
+ auto collect = [&options,
+ &pkg,
+ &pdb,
+ &nm,
+ &pk,
+ &fdb,
+ &rpt_depts,
+ &apc,
+ &replaced_vers,
+ &dep_chain,
+ postponed_repo,
+ postponed_alts,
+ &postponed_recs,
+ &postponed_edeps,
+ &postponed_deps,
+ &postponed_cfgs,
+ &unacceptable_alts,
+ &unsatisfied_depts,
+ &di,
+ reeval,
+ &reeval_pos,
+ &reevaluated,
+ &fail_reeval,
+ &edas,
+ &das,
+ &precollect,
+ &trace,
+ this]
+ (const dependency_alternative& da,
+ size_t dai,
+ prebuilds&& bs,
+ const package_prerequisites* prereqs,
+ bool check_constraints,
+ bool ignore_unsatisfactory_dep_spec)
+ {
+ // Dependency alternative position.
+ //
+ pair<size_t, size_t> dp (di + 1, dai + 1);
+
+ if (reeval &&
+ dp.first == reeval_pos->first &&
+ dp.second != reeval_pos->second)
+ fail_reeval ();
+
+ postponed_configuration::packages cfg_deps;
+
+ // Remove the temporary dependency collection postponements (see
+ // below for details).
+ //
+ postponed_configuration::packages temp_postponements;
+
+ auto g (
+ make_guard (
+ [&temp_postponements, &postponed_deps] ()
+ {
+ for (const package_key& d: temp_postponements)
+ postponed_deps.erase (d);
+ }));
+
+ package_version_key pvk (pk.db, pk.name, pkg.available_version ());
+
+ for (prebuild& b: bs)
+ {
+ build_package bpk {
+ build_package::build,
+ b.db,
+ b.selected,
+ b.available,
+ move (b.repository_fragment),
+ nullopt, // Dependencies.
+ nullopt, // Dependencies alternatives.
+ nullopt, // Package skeleton.
+ nullopt, // Postponed dependency alternatives.
+ false, // Recursive collection.
+ nullopt, // Hold package.
+ nullopt, // Hold version.
+ {}, // Constraints.
+ b.system,
+ false, // Keep output directory.
+ false, // Disfigure (from-scratch reconf).
+ false, // Configure-only.
+ nullopt, // Checkout root.
+ false, // Checkout purge.
+ strings (), // Configuration variables.
+ nullopt, // Upgrade.
+ false, // Deorphan.
+ {pvk}, // Required by (dependent).
+ true, // Required by dependents.
+ 0}; // State flags.
+
+ const optional<version_constraint>& constraint (
+ b.dependency.constraint);
+
+ // Add our constraint, if we have one.
+ //
+ // Note that we always add the constraint implied by the
+ // dependent. The user-implied constraint, if present, will be
+ // added when merging from the pre-entered entry. So we will have
+ // both constraints for completeness.
+ //
+ if (constraint)
+ bpk.constraints.emplace_back (*constraint,
+ pdb,
+ nm,
+ pkg.available_version (),
+ false /* selected_dependent */);
+
+ // Now collect this prerequisite. If it was actually collected
+ // (i.e., it wasn't already there) and we are forcing a downgrade
+ // or upgrade, then refuse for a held version, warn for a held
+ // package, and print the info message otherwise, unless the
+ // verbosity level is less than two.
+ //
+ // Note though that while the prerequisite was collected it could
+ // have happen because it is an optional package and so not being
+ // pre-collected earlier. Meanwhile the package was specified
+ // explicitly and we shouldn't consider that as a
+ // dependency-driven up/down-grade enforcement.
+ //
+ // Here is an example of the situation we need to handle properly:
+ //
+ // repo: foo/2(->bar/2), bar/0+1
+ // build sys:bar/1
+ // build foo ?sys:bar/2
+ //
+ // Pass the function which verifies we don't try to force
+ // up/downgrade of the held version and makes sure we don't print
+ // the dependency chain if replace_version will be thrown.
+ //
+ // Also note that we rely on "small function object" optimization
+ // here.
+ //
+ struct
+ {
+ const build_package& dependent;
+ const prebuild& prerequisite;
+ } dpn {pkg, b};
+
+ const function<verify_package_build_function> verify (
+ [&dpn, &dep_chain] (const build_package& p, bool scratch)
+ {
+ const prebuild& prq (dpn.prerequisite);
+ const build_package& dep (dpn.dependent);
+
+ if (prq.force && !prq.specified_dependency)
+ {
+ // Fail if the version is held. Otherwise, warn if the
+ // package is held.
+ //
+ bool f (prq.selected->hold_version);
+ bool w (!f && prq.selected->hold_package);
+
+ // Note that there is no sense to warn or inform the user if
+ // we are about to start re-collection from scratch.
+ //
+ // @@ It seems that we may still warn/inform multiple times
+ // about the same package if we start from scratch. The
+ // intermediate diagnostics can probably be irrelevant to
+ // the final result.
+ //
+ // Perhaps what we should do is queue the diagnostics and
+ // then, if the run is not scratched, issues it. And if
+ // it is scratched, then drop it.
+ //
+ if (f || ((w || verb >= 2) && !scratch))
+ {
+ const version& av (p.available_version ());
+
+ bool u (av > prq.selected->version);
+ bool c (prq.dependency.constraint);
+
+ diag_record dr;
+
+ (f ? dr << fail :
+ w ? dr << warn :
+ dr << info)
+ << "package " << dep.name () << dep.db
+ << " dependency on " << (c ? "(" : "") << prq.dependency
+ << (c ? ")" : "") << " is forcing "
+ << (u ? "up" : "down") << "grade of " << *prq.selected
+ << prq.db << " to ";
+
+ // Print both (old and new) package names in full if the
+ // system attribution changes.
+ //
+ if (prq.selected->system ())
+ dr << p.available_name_version ();
+ else
+ dr << av; // Can't be a system version so is never wildcard.
+
+ if (prq.selected->hold_version)
+ dr << info << "package version " << *prq.selected
+ << prq.db<< " is held";
+
+ if (f)
+ dr << info << "explicitly request version "
+ << (u ? "up" : "down") << "grade to continue";
+ }
+ }
+
+ // Don't print the "while satisfying..." chain if we are about
+ // to re-collect the packages.
+ //
+ if (scratch)
+ dep_chain.clear ();
+ });
+
+ // Note: non-recursive.
+ //
+ build_package* p (
+ collect_build (options,
+ move (bpk),
+ replaced_vers,
+ postponed_cfgs,
+ unsatisfied_depts,
+ &dep_chain,
+ nullptr /* fdb */,
+ nullptr /* apc */,
+ nullptr /* rpt_depts */,
+ nullptr /* postponed_repo */,
+ nullptr /* postponed_alts */,
+ nullptr /* postponed_recs */,
+ nullptr /* postponed_edeps */,
+ nullptr /* postponed_deps */,
+ nullptr /* unacceptable_alts */,
+ verify));
+
+ package_key dpk (b.db, b.available->id.name);
+
+ // Do not collect prerequisites recursively for dependent
+ // re-evaluation. Instead, if the re-evaluation position is
+ // reached, stash the dependency packages to add them to the
+ // existing dependent's cluster.
+ //
+ if (reeval && dp != *reeval_pos)
+ continue;
+
+ // Note that while collect_build() may prefer an existing entry in
+ // the map and return NULL, the recursive collection of this
+ // preferred entry may has been postponed due to the existing
+ // dependent (see collect_build_prerequisites() for details). Now,
+ // we can potentially be recursively collecting such a dependent
+ // after its re-evaluation to some earlier than this dependency
+ // position. If that's the case, it is the time to collect this
+ // dependency (unless it has a config clause which will be handled
+ // below).
+ //
+ if (p == nullptr)
+ {
+ p = entered_build (dpk);
+
+ // We don't expect the collected build to be replaced with the
+ // drop since its required-by package names have the "required
+ // by dependents" semantics.
+ //
+ assert (p != nullptr &&
+ p->action &&
+ *p->action == build_package::build);
+ }
+
+ bool collect_prereqs (!p->recursive_collection);
+
+ // Do not recursively collect a dependency of a dependent with
+ // configuration clauses, which could be this or some other
+ // (indicated by the presence in postponed_deps or postponed_cfgs)
+ // dependent. In the former case if the prerequisites were
+ // prematurely collected, throw postpone_dependency.
+ //
+ // Note that such a dependency will be recursively collected
+ // directly right after the configuration negotiation (rather than
+ // via the dependent).
+ //
+ {
+ if (da.prefer || da.require)
+ {
+ // If the dependency collection has already been postponed,
+ // then indicate that the dependent with configuration clauses
+ // is also present and thus the postponement is not bogus.
+ // Otherwise, if the dependency is already recursively
+ // collected (and we are not up-negotiating; see below) then
+ // add the new entry to postponed_deps and throw the
+ // postpone_dependency exception. Otherwise, temporarily add
+ // the new entry for the duration of the dependency collection
+ // loop to prevent recursive collection of this dependency via
+ // some other dependent. When out of the loop, we will add the
+ // dependency into some configuration cluster, effectively
+ // moving the dependency postponement information from
+ // postponed_deps to postponed_cfgs. Note that generally we
+ // add new entries to postponed_deps only if absolutely
+ // necessary to avoid yo-yoing (see the initial part of the
+ // collect_build_prerequisites() function for details).
+ //
+ auto i (postponed_deps.find (dpk));
+
+ bool new_postponement (false);
+
+ if (i == postponed_deps.end ())
+ {
+ postponed_deps.emplace (dpk,
+ postponed_dependency {
+ false /* without_config */,
+ true /* with_config */});
+ new_postponement = true;
+ }
+ else
+ i->second.with_config = true;
+
+ // Throw postpone_dependency if the dependency is prematurely
+ // collected before we saw any config clauses.
+ //
+ // Note that we don't throw if the dependency already belongs
+ // to some (being) negotiated cluster since we will
+ // up-negotiate its configuration (at the end of the loop)
+ // instead.
+ //
+ if (p->recursive_collection)
+ {
+ const postponed_configuration* pcfg (
+ postponed_cfgs.find_dependency (dpk));
+
+ // Can it happen that an already recursively collected
+ // dependency (recursive_collection is true) belongs to a
+ // non (being) negotiated cluster? Yes, if, in particular,
+ // this dependency is an already re-evaluated existing
+ // dependent and we are currently re-evaluating its own
+ // existing dependent and its (as a dependency) cluster is
+ // not being negotiated yet (is in the existing dependents
+ // re-evaluation phase). See the
+ // pkg-build/.../collected-dependency-non-negotiated-cluster
+ // test for an example.
+ //
+ if (!(pcfg != nullptr && pcfg->negotiated))
+ {
+ if (reeval)
+ {
+ l5 ([&]{trace << "cannot re-evaluate existing dependent "
+ << pkg.available_name_version_db ()
+ << " due to dependency "
+ << p->available_name_version_db ()
+ << " (collected prematurely), "
+ << "throwing postpone_dependency";});
+ }
+ else
+ {
+ l5 ([&]{trace << "cannot cfg-postpone dependency "
+ << p->available_name_version_db ()
+ << " of dependent "
+ << pkg.available_name_version_db ()
+ << " (collected prematurely), "
+ << "throwing postpone_dependency";});
+ }
+
+ // Don't print the "while satisfying..." chain.
+ //
+ dep_chain.clear ();
+
+ throw postpone_dependency (move (dpk));
+ }
+ }
+
+ if (new_postponement)
+ temp_postponements.push_back (dpk);
+
+ if (!reeval)
+ {
+ // Postpone until (re-)negotiation.
+ //
+ l5 ([&]{trace << "cfg-postpone dependency "
+ << p->available_name_version_db ()
+ << " of dependent "
+ << pkg.available_name_version_db ();});
+ }
+
+ cfg_deps.push_back (move (dpk));
+
+ collect_prereqs = false;
+ }
+ else
+ {
+ // Indicate that the dependent without configuration clauses
+ // is also present.
+ //
+ auto i (postponed_deps.find (dpk));
+ if (i != postponed_deps.end ())
+ {
+ l5 ([&]{trace << "dep-postpone dependency "
+ << p->available_name_version_db ()
+ << " of dependent "
+ << pkg.available_name_version_db ();});
+
+ i->second.wout_config = true;
+
+ collect_prereqs = false;
+ }
+ else
+ {
+ const postponed_configuration* pcfg (
+ postponed_cfgs.find_dependency (dpk));
+
+ if (pcfg != nullptr)
+ {
+ l5 ([&]{trace << "dep-postpone dependency "
+ << p->available_name_version_db ()
+ << " of dependent "
+ << pkg.available_name_version_db ()
+ << " since already in cluster " << *pcfg;});
+
+ collect_prereqs = false;
+ }
+ else
+ {
+ l5 ([&]{trace << "no cfg-clause for dependency "
+ << p->available_name_version_db ()
+ << " of dependent "
+ << pkg.available_name_version_db ();});
+ }
+ }
+ }
+ }
+
+ if (collect_prereqs)
+ collect_build_prerequisites (options,
+ *p,
+ dep_chain,
+ fdb,
+ apc,
+ rpt_depts,
+ replaced_vers,
+ postponed_repo,
+ postponed_alts,
+ 0 /* max_alt_index */,
+ postponed_recs,
+ postponed_edeps,
+ postponed_deps,
+ postponed_cfgs,
+ unacceptable_alts,
+ unsatisfied_depts);
+ }
+
+ // If this dependent has any dependencies with configurations
+ // clauses, then we need to deal with that.
+ //
+ // This is what we refer to as the "up-negotiation" where we
+ // negotiate the configuration of dependents that could not be
+ // postponed and handled all at once during "initial negotiation" in
+ // collect_build_postponed().
+ //
+ if (!cfg_deps.empty ())
+ {
+ // First, determine if there is any unprocessed reused dependency
+ // alternative that we can potentially use instead of the current
+ // one if it turns out that a configuration for some of its
+ // dependencies cannot be negotiated between all the dependents
+ // (see unacceptable_alternatives for details).
+ //
+ bool has_alt (false);
+ {
+ // Find the index of the current dependency alternative.
+ //
+ size_t i (0);
+ for (; i != edas.size (); ++i)
+ {
+ if (&edas[i].first.get () == &da)
+ break;
+ }
+
+ // The current dependency alternative must be present in the
+ // list.
+ //
+ assert (i != edas.size ());
+
+ // Return true if the current alternative is unacceptable.
+ //
+ auto unacceptable =
+ [&pk, &pkg, di, &i, &edas, &unacceptable_alts] ()
+ {
+ // Convert to 1-base.
+ //
+ pair<size_t, size_t> pos (di + 1, edas[i].second + 1);
+
+ return unacceptable_alts.find (
+ unacceptable_alternative (pk,
+ pkg.available->version,
+ pos)) !=
+ unacceptable_alts.end ();
+ };
+
+ // See if there is any unprocessed reused alternative to the
+ // right.
+ //
+ // Note that this is parallel to the alternative selection
+ // logic.
+ //
+ bool unsatisfactory (false);
+
+ for (++i; i != edas.size (); ++i)
+ {
+ if (unacceptable ())
+ continue;
+
+ const dependency_alternative& a (edas[i].first);
+
+ precollect_result r (
+ precollect (a,
+ das.buildtime,
+ prereqs,
+ check_constraints,
+ ignore_unsatisfactory_dep_spec,
+ nullptr /* diag_record */,
+ true /* dry_run */));
+
+ if (r.builds && r.reused)
+ {
+ has_alt = true;
+ break;
+ }
+
+ if (r.unsatisfactory)
+ unsatisfactory = true;
+ }
+
+ // If there are none and we are in the "recreate dependency
+ // decisions" mode, then repeat the search in the "make
+ // dependency decisions" mode.
+ //
+ if (!has_alt && prereqs != nullptr)
+ {
+ unsatisfactory = false;
+
+ for (i = 0; i != edas.size (); ++i)
+ {
+ if (unacceptable ())
+ continue;
+
+ const dependency_alternative& a (edas[i].first);
+
+ if (&a != &da) // Skip the current dependency alternative.
+ {
+ precollect_result r (
+ precollect (a,
+ das.buildtime,
+ nullptr /* prereqs */,
+ check_constraints,
+ ignore_unsatisfactory_dep_spec,
+ nullptr /* diag_record */,
+ true /* dry_run */));
+
+ if (r.builds && r.reused)
+ {
+ has_alt = true;
+ break;
+ }
+
+ if (r.unsatisfactory)
+ unsatisfactory = true;
+ }
+ }
+ }
+
+ // If there are none and we are in the "check constraints" mode,
+ // then repeat the search with this mode off.
+ //
+ bool cc (check_constraints);
+ if (!has_alt && check_constraints && unsatisfactory)
+ {
+ cc = false;
+
+ for (i = 0; i != edas.size (); ++i)
+ {
+ if (unacceptable ())
+ continue;
+
+ const dependency_alternative& a (edas[i].first);
+
+ if (&a != &da) // Skip the current dependency alternative.
+ {
+ precollect_result r (
+ precollect (a,
+ das.buildtime,
+ nullptr /* prereqs */,
+ false /* check_constraints */,
+ ignore_unsatisfactory_dep_spec,
+ nullptr /* diag_record */,
+ true /* dry_run */));
+
+ if (r.builds && r.reused)
+ {
+ has_alt = true;
+ break;
+ }
+ }
+ }
+ }
+
+ if (!has_alt && !ignore_unsatisfactory_dep_spec)
+ {
+ for (i = 0; i != edas.size (); ++i)
+ {
+ if (unacceptable ())
+ continue;
+
+ const dependency_alternative& a (edas[i].first);
+
+ if (&a != &da) // Skip the current dependency alternative.
+ {
+ precollect_result r (
+ precollect (a,
+ das.buildtime,
+ nullptr /* prereqs */,
+ cc,
+ true /* ignore_unsatisfactory_dep_spec */,
+ nullptr /* diag_record */,
+ true /* dry_run */));
+
+ if (r.builds && r.reused)
+ {
+ has_alt = true;
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ // Re-evaluation is a special case (it happens during cluster
+ // negotiation; see collect_build_postponed()).
+ //
+ if (reeval)
+ {
+ reevaluated = true;
+
+ // As a first step add this dependent/dependencies to one of the
+ // new/existing postponed_configuration clusters, which could
+ // potentially cause some of them to be merged. Note that when
+ // we re-evaluate existing dependents of dependencies in a
+ // cluster, these dependents can potentially be added to
+ // different clusters (see collect_build_postponed() for
+ // details). Here are the possibilities and what we should do in
+ // each case.
+ //
+ // 1. Got added to a new cluster -- this dependent got postponed
+ // and we return false.
+ //
+ // 2. Got added to an existing non-yet-negotiated cluster (which
+ // could potentially involve merging a bunch of them) --
+ // ditto. Note this also covers adding into a cluster which
+ // contain dependencies whose existing dependents we are
+ // currently re-evaluating (the negotiated member is absent
+ // but the depth is non-zero).
+ //
+ // 3. Got added to an existing already-negotiated cluster (which
+ // could potentially involve merging a bunch of them, some
+ // negotiated and some not yet negotiated). Perhaps just
+ // making the resulting cluster shadow and rolling back, just
+ // like in the other case (non-existing dependent), will do.
+ //
+ postponed_configuration& cfg (
+ postponed_cfgs.add (pk,
+ true /* existing */,
+ dp,
+ cfg_deps,
+ has_alt).first);
+
+ if (cfg.negotiated) // Case (3).
+ {
+ // Note that the closest cluster up on the stack is in the
+ // existing dependents re-evaluation phase and thus is not
+ // being negotiated yet. The following clusters up on the
+ // stack can only be in the (fully) negotiated state. Thus, if
+ // cfg.negotiated member is present it can only be true.
+ //
+ // Also as a side-note: at any given moment there can only be
+ // 0 or 1 cluster being negotiated (the negotiate member is
+ // false).
+ //
+ assert (*cfg.negotiated);
+
+ // Don't print the "while satisfying..." chain.
+ //
+ dep_chain.clear ();
+
+ // There is just one complication:
+ //
+ // If the shadow cluster is already present and it is exactly
+ // the same as the resulting cluster which we are going to
+ // make a shadow, then we have already been here and we may
+ // start yo-yoing. To prevent that we will throw the
+ // merge_configuration_cycle exception instead of
+ // merge_configuration, so that the caller could handle this
+ // situation, for example, by just re-collecting the being
+ // re-evaluated existing dependent from scratch, reducing this
+ // case to the regular up-negotiating.
+ //
+ if (!cfg.is_shadow_cluster (cfg))
+ {
+ l5 ([&]{trace << "re-evaluating dependent "
+ << pkg.available_name_version_db ()
+ << " involves negotiated configurations and "
+ << "results in " << cfg << ", throwing "
+ << "merge_configuration";});
+
+ throw merge_configuration {cfg.depth};
+ }
+ else
+ {
+ l5 ([&]{trace << "merge configuration cycle detected for "
+ << "being re-evaluated dependent "
+ << pkg.available_name_version_db ()
+ << " since " << cfg << " is a shadow of itself"
+ << ", throwing merge_configuration_cycle";});
+
+ throw merge_configuration_cycle {cfg.depth};
+ }
+ }
+
+ l5 ([&]{trace << "re-evaluating dependent "
+ << pkg.available_name_version_db ()
+ << " results in " << cfg;});
+
+ return false;
+ }
+
+ // As a first step add this dependent/dependencies to one of the
+ // new/existing postponed_configuration clusters, which could
+ // potentially cause some of them to be merged. Here are the
+ // possibilities and what we should do in each case.
+ //
+ // 1. Got added to a new cluster -- this dependent got postponed
+ // and we return false.
+ //
+ // 2. Got added to an existing non-yet-negotiated cluster (which
+ // could potentially involve merging a bunch of them) -- ditto.
+ //
+ // 3. Got added to an existing already-[being]-negotiated cluster
+ // (which could potentially involve merging a bunch of them,
+ // some negotiated, some being negotiated, and some not yet
+ // negotiated) -- see below logic.
+ //
+ // Note that if a dependent is postponed, it will be recursively
+ // recollected right after the configuration negotiation.
+
+ // Note: don't move the argument from since may be needed for
+ // constructing exception.
+ //
+ pair<postponed_configuration&, optional<bool>> r (
+ postponed_cfgs.add (pk,
+ false /* existing */,
+ dp,
+ cfg_deps,
+ has_alt));
+
+ postponed_configuration& cfg (r.first);
+
+ if (cfg.depth == 0)
+ return false; // Cases (1) or (2).
+ else
+ {
+ // Case (3).
+ //
+ // There is just one complication:
+ //
+ // If all the merged clusters are already negotiated, then all
+ // is good: all the dependencies in cfg_deps have been collected
+ // recursively as part of the configuration negotiation (because
+ // everything in this cluster is already negotiated) and we can
+ // return true (no need to postpone any further steps).
+ //
+ // But if we merged clusters not yet negotiated, or, worse,
+ // being in the middle of negotiation, then we need to get this
+ // merged cluster into the fully negotiated state. The way we do
+ // it is by throwing merge_configuration (see below).
+ //
+ // When we are back here after throwing merge_configuration,
+ // then all the clusters have been pre-merged and our call to
+ // add() shouldn't have added any new cluster. In this case the
+ // cluster can either be already negotiated or being negotiated
+ // and we can proceed as in the "everything is negotiated case"
+ // above (we just need to get the the dependencies that we care
+ // about into the recursively collected state).
+ //
+
+ // To recap, r.second values mean:
+ //
+ // absent -- shadow cluster-based merge is/being negotiated
+ // false -- some non or being negotiated clusters are merged
+ // true -- no clusters are merged or all merged have been
+ // negotiated
+ //
+ if (r.second && !*r.second)
+ {
+ // The partially negotiated case.
+ //
+ // Handling this in a straightforward way is not easy due to
+ // the being negotiated cases -- we have code up the stack
+ // that is in the middle of the negotiation logic.
+ //
+ // Another idea is to again throw to the outer try/catch frame
+ // (thus unwinding all the being negotiated code) and complete
+ // the work there. The problem with this approach is that
+ // without restoring the state we may end up with unrelated
+ // clusters that will have no corresponding try-catch frames
+ // (because we may unwind them in the process).
+ //
+ // So the approach we will use is the "shadow" idea for
+ // merging clusters. Specifically, we throw
+ // merge_configuration to the outer try/catch. At the catch
+ // site we make the newly merged cluster a shadow of the
+ // restored cluster and retry the same steps similar to
+ // retry_configuration. As we redo these steps, we consult the
+ // shadow cluster and if the dependent/dependency entry is
+ // there, then instead of adding it to another (new/existing)
+ // cluster that would later be merged into this non-shadow
+ // cluster, we add it directly to the non-shadow cluster
+ // (potentially merging other cluster which it feels like by
+ // definition should all be already fully negotiated). The end
+ // result is that once we reach this point again, there will
+ // be nothing to merge.
+ //
+ // The shadow check is part of postponed_configs::add().
+ //
+ l5 ([&]{trace << "cfg-postponing dependent "
+ << pkg.available_name_version_db ()
+ << " merges non-negotiated and/or being "
+ << "negotiated configurations in and results in "
+ << cfg << ", throwing merge_configuration";});
+
+ // Don't print the "while satisfying..." chain.
+ //
+ dep_chain.clear ();
+
+ throw merge_configuration {cfg.depth};
+ }
+
+ // Note that there can be some non-negotiated clusters which
+ // have been merged based on the shadow cluster into the
+ // resulting (being) negotiated cluster. If we had negotiated
+ // such non-negotiated clusters normally, we would query
+ // existing dependents for the dependencies they contain and
+ // consider them in the negotiation process by re-evaluating
+ // them (see collect_build_postponed() for details). But if we
+ // force-merge a non-negotiated cluster into the (being)
+ // negotiated cluster then the existing dependents of its
+ // dependencies won't participate in the negotiation, unless we
+ // take care of that now. We will recognize such dependencies as
+ // not yet (being) recursively collected and re-collect their
+ // existing dependents, if any.
+ //
+ vector<existing_dependent> depts;
+ string deps_trace;
+
+ for (const package_key& d: cfg.dependencies)
+ {
+ build_package* p (entered_build (d));
+
+ // Must be collected at least non-recursively.
+ //
+ assert (p != nullptr);
+
+ if (p->recursive_collection)
+ continue;
+
+ bool add_deps_trace (verb >= 5);
+
+ for (existing_dependent& ed:
+ query_existing_dependents (trace,
+ options,
+ d.db,
+ d.name,
+ false /* exclude_optional */,
+ fdb,
+ rpt_depts,
+ replaced_vers))
+ {
+ if (add_deps_trace)
+ {
+ deps_trace += p->available_name_version_db () + ' ';
+
+ // Make sure the dependency is only listed once in the
+ // trace record.
+ //
+ add_deps_trace = false;
+ }
+
+ // Add the existing dependent to the list, suppressing
+ // duplicates.
+ //
+ if (find_if (depts.begin (), depts.end (),
+ [&ed] (const existing_dependent& d)
+ {
+ return d.selected->name == ed.selected->name &&
+ d.db == ed.db;
+ }) == depts.end ())
+ {
+ depts.push_back (move (ed));
+ }
+ }
+ }
+
+ if (!depts.empty ())
+ {
+ l5 ([&]{trace << "cfg-postponing dependent "
+ << pkg.available_name_version_db ()
+ << " adds not (being) collected dependencies "
+ << deps_trace << "with not (being) collected "
+ << "existing dependents to (being) negotiated "
+ << "cluster and results in " << cfg
+ << ", throwing recollect_existing_dependents";});
+
+ // Don't print the "while satisfying..." chain.
+ //
+ dep_chain.clear ();
+
+ throw recollect_existing_dependents {cfg.depth, move (depts)};
+ }
+
+ // Up-negotiate the configuration and if it has changed, throw
+ // retry_configuration to the try/catch frame corresponding to
+ // the negotiation of the outermost merged cluster in order to
+ // retry the same steps (potentially refining the configuration
+ // as we go along) and likely (but not necessarily) ending up
+ // here again, at which point we up-negotiate again with the
+ // expectation that the configuration won't change (but if it
+ // does, then we throw again and do another refinement pass).
+ //
+ // In a sense, semantically, we should act like a one more
+ // iteration of the initial negotiation loop with the exception
+ // acting like a request to restart the refinement process from
+ // the beginning.
+ //
+ bool changed;
+ {
+ // Similar to initial negotiation, resolve package skeletons
+ // for this dependent and its dependencies.
+ //
+ assert (pkg.skeleton);
+ package_skeleton& dept (*pkg.skeleton);
+
+ // If a dependency has already been recursively collected,
+ // then we can no longer call reload_defaults() or
+ // verify_sensible() on its skeleton. We could reset it, but
+ // then we wouldn't be able to continue using it if
+ // negotiate_configuration() below returns false. So it seems
+ // the most sensible approach is to make a temporary copy and
+ // reset that (see the similar code in
+ // collect_build_postponed()).
+ //
+ small_vector<reference_wrapper<package_skeleton>, 1> depcs;
+ forward_list<package_skeleton> depcs_storage; // Ref stability.
+ {
+ depcs.reserve (cfg_deps.size ());
+ for (const package_key& pk: cfg_deps)
+ {
+ build_package* b (entered_build (pk));
+ assert (b != nullptr);
+
+ optional<package_skeleton>& ps (b->skeleton);
+
+ // If the dependency's skeleton is already present, then
+ // this dependency's configuration has already been
+ // initially negotiated (see collect_build_postponed() for
+ // details) and will now be be up-negotiated. Thus, in
+ // particular, the skeleton must not have the old
+ // configuration dependent variables be loaded.
+ //
+ assert (!ps ||
+ (ps->load_config_flags &
+ package_skeleton::load_config_dependent) == 0);
+
+ package_skeleton* depc;
+ if (b->recursive_collection)
+ {
+ assert (ps);
+
+ depcs_storage.push_front (*ps);
+ depc = &depcs_storage.front ();
+ depc->reset ();
+ }
+ else
+ depc = &(ps
+ ? *ps
+ : b->init_skeleton (options,
+ false /* load_old_dependent_config */));
+
+ depcs.push_back (*depc);
+ }
+ }
+
+ optional<bool> c (
+ negotiate_configuration (
+ cfg.dependency_configurations, dept, dp, depcs, has_alt));
+
+ // If the dependency alternative configuration cannot be
+ // negotiated for this dependent, then add an entry to
+ // unacceptable_alts and throw unaccept_alternative to
+ // recollect from scratch.
+ //
+ if (!c)
+ {
+ unacceptable_alts.emplace (pk, pkg.available->version, dp);
+
+ l5 ([&]{trace << "unable to cfg-negotiate dependency "
+ << "alternative " << dp.first << ','
+ << dp.second << " for dependent "
+ << pkg.available_name_version_db ()
+ << ", throwing unaccept_alternative";});
+
+ // Don't print the "while satisfying..." chain.
+ //
+ dep_chain.clear ();
+
+ throw unaccept_alternative ();
+ }
+ else
+ changed = *c;
+ }
+
+ // If the configuration hasn't changed, then we carry on.
+ // Otherwise, retry the negotiation from the beginning to refine
+ // the resulting configuration (see the catch block for
+ // retry_configuration).
+ //
+ if (changed)
+ {
+ l5 ([&]{trace << "cfg-postponing dependent "
+ << pkg.available_name_version_db ()
+ << " involves (being) negotiated configurations "
+ << "and results in " << cfg
+ << ", throwing retry_configuration";});
+
+ // Don't print the "while satisfying..." chain.
+ //
+ dep_chain.clear ();
+
+ throw retry_configuration {cfg.depth, move (pk)};
+ }
+
+ l5 ([&]{trace << "configuration for cfg-postponed "
+ << "dependencies of dependent "
+ << pkg.available_name_version_db () << " is "
+ << (r.second ? "" : "shadow-") << "negotiated";});
+
+ // Note that even in the fully negotiated case we may still add
+ // extra dependencies to this cluster which we still need to
+ // configure and recursively collect before indicating to the
+ // caller (returning true) that we are done with this depends
+ // value and the dependent is not postponed.
+ //
+ for (const package_key& p: cfg_deps)
+ {
+ build_package* b (entered_build (p));
+ assert (b != nullptr);
+
+ assert (b->skeleton); // Should have been init'ed above.
+
+ package_skeleton& ps (*b->skeleton);
+
+ if (!b->recursive_collection)
+ {
+ l5 ([&]{trace << "collecting cfg-postponed dependency "
+ << b->available_name_version_db ()
+ << " of dependent "
+ << pkg.available_name_version_db ();});
+
+ // Similar to the inital negotiation case, verify and set
+ // the dependent configuration for this dependency.
+ //
+ {
+ const package_configuration& pc (
+ cfg.dependency_configurations[p]);
+
+ pair<bool, string> pr (ps.available != nullptr
+ ? ps.verify_sensible (pc)
+ : make_pair (true, string ()));
+
+ if (!pr.first)
+ {
+ diag_record dr (fail);
+ dr << "unable to negotiate sensible configuration for "
+ << "dependency " << p << '\n'
+ << " " << pr.second;
+
+ dr << info << "negotiated configuration:\n";
+ pc.print (dr, " ");
+ }
+
+ ps.dependent_config (pc);
+ }
+
+ collect_build_prerequisites (options,
+ *b,
+ dep_chain,
+ fdb,
+ apc,
+ rpt_depts,
+ replaced_vers,
+ postponed_repo,
+ postponed_alts,
+ 0 /* max_alt_index */,
+ postponed_recs,
+ postponed_edeps,
+ postponed_deps,
+ postponed_cfgs,
+ unacceptable_alts,
+ unsatisfied_depts);
+ }
+ else
+ l5 ([&]{trace << "dependency "
+ << b->available_name_version_db ()
+ << " of dependent "
+ << pkg.available_name_version_db ()
+ << " is already (being) recursively "
+ << "collected, skipping";});
+
+ // Unless the dependency collection has been postponed or it
+ // is already being reconfigured, reconfigure it if its
+ // configuration changes.
+ //
+ if (!b->recursive_collection_postponed () && !b->reconfigure ())
+ {
+ const shared_ptr<selected_package>& sp (b->selected);
+
+ if (sp != nullptr &&
+ sp->state == package_state::configured &&
+ sp->config_checksum != ps.config_checksum ())
+ {
+ b->flags |= build_package::adjust_reconfigure;
+ }
+ }
+ }
+
+ return true;
+ }
+ }
+
+ return true;
+ };
+
+ // Select a dependency alternative, copying it alone into the resulting
+ // dependencies list and evaluating its reflect clause, if present. In
+ // the pre-reevaluation mode update the variable prefixes list, if the
+ // selected alternative has config clause, and the pre-reevaluation
+ // resulting information (re-evaluation position, etc).
+ //
+ // Note that prebuilds are only used in the pre-reevaluation mode.
+ //
+ bool selected (false);
+ auto select = [&sdeps, &salts, &sdas,
+ &skel,
+ di,
+ &selected,
+ pre_reeval, &banned_var_prefixes, &references_banned_var,
+ &orig_dep,
+ &r] (const dependency_alternative& da,
+ size_t dai,
+ prebuilds&& pbs)
+ {
+ assert (sdas.empty ());
+
+ if (pre_reeval)
+ {
+ pair<size_t, size_t> pos (di + 1, dai + 1);
+
+ bool contains_orig_dep (
+ find_if (pbs.begin (), pbs.end (),
+ [&orig_dep] (const prebuild& pb)
+ {
+ return pb.dependency.name == orig_dep->name &&
+ pb.db == orig_dep->db;
+ }) != pbs.end ());
+
+ // If the selected alternative contains the originating dependency,
+ // then set the originating dependency position, unless it is
+ // already set (note that the same dependency package may
+ // potentially be specified in multiple depends clauses).
+ //
+ if (contains_orig_dep && r.originating_dependency_position.first == 0)
+ r.originating_dependency_position = pos;
+
+ if (da.prefer || da.require)
+ {
+ if (contains_orig_dep)
+ r.reevaluation_optional = false;
+
+ // If this is the first selected alternative with the config
+ // clauses, then save its position and the dependency packages.
+ //
+ if (r.reevaluation_position.first == 0)
+ {
+ r.reevaluation_position = pos;
+
+ for (prebuild& pb: pbs)
+ r.reevaluation_dependencies.emplace_back (
+ pb.db, move (pb.dependency.name));
+ }
+
+ // Save the variable prefixes for the selected alternative
+ // dependencies, if we still track them.
+ //
+ if (r.reevaluation_optional)
+ {
+ for (const dependency& d: da)
+ banned_var_prefixes.push_back (
+ "config." + d.name.variable () + '.');
+ }
+ }
+ }
+
+ // Avoid copying enable/reflect not to evaluate them repeatedly.
+ //
+ sdas.emplace_back (nullopt /* enable */,
+ nullopt /* reflect */,
+ da.prefer,
+ da.accept,
+ da.require,
+ da /* dependencies */);
+
+ sdeps.push_back (move (sdas));
+ salts.push_back (dai);
+
+ if (da.reflect)
+ {
+ if (pre_reeval &&
+ r.reevaluation_optional &&
+ references_banned_var (*da.reflect))
+ {
+ r.reevaluation_optional = false;
+ }
+
+ skel.evaluate_reflect (*da.reflect, make_pair (di, dai));
+ }
+
+ selected = true;
+ };
+
+ // Postpone the prerequisite builds collection, optionally inserting the
+ // package to the postponements set (can potentially already be there)
+ // and saving the enabled alternatives.
+ //
+ auto postpone = [&pkg, &edas, &postponed] (postponed_packages* postpones)
+ {
+ if (postpones != nullptr)
+ postpones->insert (&pkg);
+
+ pkg.postponed_dependency_alternatives = move (edas);
+ postponed = true;
+ };
+
+ // Iterate over the enabled dependencies and try to select a
+ // satisfactory alternative.
+ //
+ // If the package is already configured as source and is not
+ // up/downgraded, then we will try to resolve its dependencies to the
+ // current prerequisites. To achieve this we will first try to select an
+ // alternative in the "recreate dependency decisions" mode, filtering
+ // out all the alternatives where dependencies do not all belong to the
+ // list of current prerequisites. If we end up with no alternative
+ // selected, then we retry in the "make dependency decisions" mode and
+ // select the alternative ignoring the current prerequisites.
+ //
+ // Note though, that if we are re-evaluating an existing dependent
+ // then we fail if we didn't succeed in the "recreate dependency
+ // decisions" mode.
+ //
+ const package_prerequisites* prereqs (src_conf && !ud
+ ? &sp->prerequisites
+ : nullptr);
+
+ // During the dependent (pre-)re-evaluation we always try to reproduce
+ // the existing setup.
+ //
+ assert ((!reeval && !pre_reeval) || prereqs != nullptr);
+
+ // Initially try to select an alternative checking that all the
+ // constraints imposed by the being built dependents of the dependencies
+ // in the alternative are satisfied. Failed that, re-try but this time
+ // disable this check so that the unsatisfactory dependency can be
+ // properly handled by collect_build() (which can fail, postpone
+ // failure, etc; see its implementation for details).
+ //
+ bool check_constraints (true);
+
+ // Initially don't ignore the unsatisfactory user-specified dependency
+ // specs, considering the dependency alternative as unsatisfactory if
+ // there are any. Failed that, re-try but this time ignore such specs,
+ // so that the unsatisfactory dependency can later be handled by
+ // collect_build() (which can fail, postpone failure, etc; see its
+ // implementation for details).
+ //
+ // The thinking here is that we don't ignore the unsatisfactory
+ // dependency specs initially to skip the alternatives which are
+ // unresolvable for that reason and prefer alternatives which satisfy
+ // the command line constraints.
+ //
+ bool ignore_unsatisfactory_dep_spec (false);
+
+ for (bool unacceptable (false);;)
+ {
+ // The index and pre-collection result of the first satisfactory
+ // alternative.
+ //
+ optional<pair<size_t, precollect_result>> first_alt;
+
+ // The number of satisfactory alternatives.
+ //
+ size_t alts_num (0);
+
+ // If true, then only reused alternatives will be considered for the
+ // selection.
+ //
+ // The idea here is that we don't want to bloat the configuration by
+ // silently configuring a new dependency package as the alternative
+ // for an already used but not satisfactory for all the dependents
+ // dependency. Think of silently configuring Qt6 just because the
+ // configured version of Qt5 is not satisfactory for all the
+ // dependents. The user must have a choice if to either configure this
+ // new dependency by specifying it explicitly or, for example, to
+ // upgrade dependents so that the existing dependency is satisfactory
+ // for all of them.
+ //
+ // Note that if there are multiple alternatives with all their
+ // dependencies resolved/satisfied, then only reused alternatives are
+ // considered anyway. Thus, this flag only affects the single
+ // alternative case.
+ //
+ bool reused_only (false);
+
+ // If true, then some alternatives with unsatisfactory dependencies
+ // are detected and, unless the alternative is selected or the
+ // selection is postponed, we should re-try with the constraints check
+ // disabled (see above for details).
+ //
+ bool unsatisfactory (false);
+
+ for (size_t i (0); i != edas.size (); ++i)
+ {
+ // Skip the unacceptable alternatives.
+ //
+ {
+ // Convert to 1-base.
+ //
+ pair<size_t, size_t> pos (di + 1, edas[i].second + 1);
+
+ if (unacceptable_alts.find (
+ unacceptable_alternative (pk, ap->version, pos)) !=
+ unacceptable_alts.end ())
+ {
+ unacceptable = true;
+
+ l5 ([&]{trace << "dependency alternative " << pos.first << ','
+ << pos.second << " for dependent "
+ << pkg.available_name_version_db ()
+ << " is unacceptable, skipping";});
+
+ continue;
+ }
+ }
+
+ const dependency_alternative& da (edas[i].first);
+
+ precollect_result pcr (
+ precollect (da,
+ das.buildtime,
+ prereqs,
+ check_constraints,
+ ignore_unsatisfactory_dep_spec));
+
+ // If we didn't come up with satisfactory dependency builds, then
+ // skip this alternative and try the next one, unless the collecting
+ // is postponed in which case just bail out.
+ //
+ // Should we skip alternatives for which we are unable to satisfy
+ // the constraint? On one hand, this could be a user error: there is
+ // no package available from dependent's repositories that satisfies
+ // the constraint. On the other hand, it could be that it's other
+ // dependent's constraints that we cannot satisfy together with
+ // others. And in this case we may want some other
+ // alternative. Consider, as an example, something like this:
+ //
+ // depends: libfoo >= 2.0.0 | {libfoo >= 1.0.0 libbar}
+ //
+ if (!pcr.builds)
+ {
+ if (pcr.repo_postpone)
+ {
+ if (reeval)
+ fail_reeval ();
+
+ postpone (nullptr); // Already inserted into postponed_repo.
+ break;
+ }
+
+ // If this alternative is reused but is not satisfactory, then
+ // switch to the reused-only mode.
+ //
+ if (pcr.unsatisfactory)
+ {
+ unsatisfactory = true;
+
+ if (pcr.reused)
+ reused_only = true;
+ }
+
+ continue;
+ }
+
+ ++alts_num;
+
+ // Note that when we see the first satisfactory alternative, we
+ // don't know yet if it is a single alternative or the first of the
+ // (multiple) true alternatives (those are handled differently).
+ // Thus, we postpone its processing until the second satisfactory
+ // alternative is encountered or the end of the alternatives list is
+ // reached.
+ //
+ if (!first_alt)
+ {
+ first_alt = make_pair (i, move (pcr));
+ continue;
+ }
+
+ // Try to collect and then select a true alternative, returning true
+ // if the alternative is selected or the collection is postponed.
+ // Return false if the alternative is ignored (not postponed and not
+ // all of it dependencies are reused).
+ //
+ auto try_select = [postponed_alts, &max_alt_index,
+ &edas, &pkg,
+ di,
+ &prereqs,
+ &check_constraints,
+ &ignore_unsatisfactory_dep_spec,
+ pre_reeval,
+ reeval,
+ &trace,
+ &postpone,
+ &collect,
+ &select] (size_t index, precollect_result&& pcr)
+ {
+ const auto& eda (edas[index]);
+ const dependency_alternative& da (eda.first);
+ size_t dai (eda.second);
+
+ // Postpone the collection if the alternatives maximum index is
+ // reached.
+ //
+ if (postponed_alts != nullptr && index >= max_alt_index)
+ {
+ // For a dependent re-evaluation max_alt_index is expected to be
+ // max size_t.
+ //
+ assert (!reeval);
+
+ l5 ([&]{trace << "alt-postpone dependent "
+ << pkg.available_name_version_db ()
+ << " since max index is reached: " << index <<
+ info << "dependency alternative: " << da;});
+
+ postpone (postponed_alts);
+ return true;
+ }
+
+ // Select this alternative if all its dependencies are reused and
+ // do nothing about it otherwise.
+ //
+ if (pcr.reused)
+ {
+ // On the diagnostics run there shouldn't be any alternatives
+ // that we could potentially select.
+ //
+ assert (postponed_alts != nullptr);
+
+ if (pre_reeval)
+ {
+ size_t ni (dai + 1);
+ size_t oi (pkg.selected->dependency_alternatives[di]);
+
+ if (ni != oi)
+ {
+ l5 ([&]{trace << "re-evaluation of dependent "
+ << pkg.available_name_version_db ()
+ << " deviated at depends clause " << di + 1
+ << ": selected alternative changed to " << ni
+ << " from " << oi;});
+
+ throw reevaluation_deviated ();
+ }
+ }
+ else if (!collect (da,
+ dai,
+ move (*pcr.builds),
+ prereqs,
+ check_constraints,
+ ignore_unsatisfactory_dep_spec))
+ {
+ postpone (nullptr); // Already inserted into postponed_cfgs.
+ return true;
+ }
+
+ select (da, dai, move (*pcr.builds));
+
+ // Make sure no more true alternatives are selected during this
+ // function call unless we are (pre-)reevaluating a dependent.
+ //
+ if (!reeval && !pre_reeval)
+ max_alt_index = 0;
+
+ return true;
+ }
+ else
+ return false;
+ };
+
+ // If we encountered the second satisfactory alternative, then this
+ // is the "multiple true alternatives" case. In this case we also
+ // need to process the first satisfactory alternative, which
+ // processing was delayed.
+ //
+ if (alts_num == 2)
+ {
+ assert (first_alt);
+
+ if (try_select (first_alt->first, move (first_alt->second)))
+ break;
+ }
+
+ if (try_select (i, move (pcr)))
+ break;
+
+ // Not all of the alternative dependencies are reused, so go to
+ // the next alternative.
+ }
+
+ // Bail out if the collection is postponed for any reason.
+ //
+ if (postponed)
+ break;
+
+ // Select the single satisfactory alternative if it is reused or we
+ // are not in the reused-only mode.
+ //
+ if (!selected && alts_num == 1)
+ {
+ assert (first_alt);
+
+ precollect_result& pcr (first_alt->second);
+
+ assert (pcr.builds);
+
+ if (pcr.reused || !reused_only)
+ {
+ // If there are any unacceptable alternatives, then the remaining
+ // one should be reused.
+ //
+ assert (!unacceptable || pcr.reused);
+
+ const auto& eda (edas[first_alt->first]);
+ const dependency_alternative& da (eda.first);
+ size_t dai (eda.second);
+
+ if (pre_reeval)
+ {
+ size_t ni (dai + 1);
+ size_t oi (sp->dependency_alternatives[di]);
+
+ if (ni != oi)
+ {
+ l5 ([&]{trace << "re-evaluation of dependent "
+ << pkg.available_name_version_db ()
+ << " deviated for depends clause " << di + 1
+ << ": selected alternative (single) changed to "
+ << ni << " from " << oi;});
+
+ throw reevaluation_deviated ();
+ }
+ }
+ else if (!collect (da,
+ dai,
+ move (*pcr.builds),
+ prereqs,
+ check_constraints,
+ ignore_unsatisfactory_dep_spec))
+ {
+ postpone (nullptr); // Already inserted into postponed_cfgs.
+ break;
+ }
+
+ select (da, dai, move (*pcr.builds));
+ }
+ }
+
+ // If an alternative is selected, then we are done.
+ //
+ if (selected)
+ break;
+
+ // Fail or postpone the collection if no alternative is selected,
+ // unless we are re-evaluating a dependent or are in the "recreate
+ // dependency decisions" mode. In the latter case fail for
+ // re-evaluation and fall back to the "make dependency decisions" mode
+ // and retry otherwise.
+ //
+ if (prereqs != nullptr)
+ {
+ if (pre_reeval)
+ {
+ size_t oi (sp->dependency_alternatives[di]);
+
+ l5 ([&]{trace << "re-evaluation of dependent "
+ << pkg.available_name_version_db ()
+ << " deviated for depends clause " << di + 1
+ << ": now cannot select alternative, previously "
+ << oi << " was selected";});
+
+ throw reevaluation_deviated ();
+ }
+
+ if (reeval)
+ fail_reeval ();
+
+ prereqs = nullptr;
+ continue;
+ }
+
+ // Retry with the constraints check disabled, if an alternative with
+ // the unsatisfactory dependencies is detected.
+ //
+ if (check_constraints && unsatisfactory)
+ {
+ check_constraints = false;
+ continue;
+ }
+
+ if (!ignore_unsatisfactory_dep_spec)
+ {
+ ignore_unsatisfactory_dep_spec = true;
+ continue;
+ }
+
+ // Otherwise we would have thrown/failed earlier.
+ //
+ assert (!pre_reeval && !reeval);
+
+ // We shouldn't end up with the "no alternative to select" case if any
+ // alternatives are unacceptable.
+ //
+ assert (!unacceptable);
+
+ // Issue diagnostics and fail if there are no satisfactory
+ // alternatives.
+ //
+ if (alts_num == 0)
+ {
+ diag_record dr;
+ for (const auto& da: edas)
+ {
+ precollect (da.first,
+ das.buildtime,
+ nullptr /* prereqs */,
+ true /* check_constraints */,
+ false /* ignore_unsatisfactory_dep_spec */,
+ &dr);
+ }
+
+ assert (!dr.empty ());
+
+ dr.flush ();
+ throw failed ();
+ }
+
+ // Issue diagnostics and fail if there are multiple non-reused
+ // alternatives or there is a single non-reused alternative in the
+ // reused-only mode, unless the failure needs to be postponed.
+ //
+ assert (alts_num > (!reused_only ? 1 : 0));
+
+ if (postponed_alts != nullptr)
+ {
+ if (verb >= 5)
+ {
+ diag_record dr (trace);
+ dr << "alt-postpone dependent "
+ << pkg.available_name_version_db ()
+ << " due to ambiguous alternatives";
+
+ for (const auto& da: edas)
+ dr << info << "alternative: " << da.first;
+ }
+
+ postpone (postponed_alts);
+ break;
+ }
+
+ diag_record dr (fail);
+ dr << "unable to select dependency alternative for package "
+ << pkg.available_name_version_db () <<
+ info << "explicitly specify dependency packages to manually "
+ << "select the alternative";
+
+ for (const auto& da: edas)
+ {
+ // Note that we pass false as the check_constraints argument to make
+ // sure that the alternatives are always saved into
+ // precollect_result::builds rather than into
+ // precollect_result::unsatisfactory.
+ //
+ precollect_result r (
+ precollect (da.first,
+ das.buildtime,
+ nullptr /* prereqs */,
+ false /* check_constraints */,
+ true /* ignore_unsatisfactory_dep_spec */));
+
+ if (r.builds)
+ {
+ assert (!r.reused); // We shouldn't be failing otherwise.
+
+ dr << info << "alternative:";
+
+ // Only print the non-reused dependencies, which needs to be
+ // explicitly specified by the user.
+ //
+ for (const prebuild& b: *r.builds)
+ {
+ if (!b.reused)
+ dr << ' ' << b.dependency.name;
+ }
+ }
+ }
+
+ // If there is only a single alternative (while we are in the
+ // reused-only mode), then also print the reused unsatisfactory
+ // alternatives and the reasons why they are not satisfactory.
+ //
+ if (alts_num == 1)
+ {
+ assert (reused_only);
+
+ for (const auto& da: edas)
+ {
+ precollect_result r (
+ precollect (da.first,
+ das.buildtime,
+ nullptr /* prereqs */,
+ true /* check_constraints */,
+ false /* ignore_unsatisfactory_dep_spec */));
+
+ if (r.reused && r.unsatisfactory)
+ {
+ // Print the alternative.
+ //
+ dr << info << "unsatisfactory alternative:";
+
+ for (const prebuild& b: *r.unsatisfactory)
+ dr << ' ' << b.dependency.name;
+
+ // Print the reason.
+ //
+ precollect (da.first,
+ das.buildtime,
+ nullptr /* prereqs */,
+ true /* check_constraints */,
+ false /* ignore_unsatisfactory_dep_spec */,
+ &dr);
+ }
+ }
+ }
+ }
+
+ // Bail out if the collection is postponed.
+ //
+ // Note that it's tempting to also bail out in the pre-reevaluation mode
+ // if we have already collected all the required resulting information
+ // (reevaluation position, originating dependency position, etc).
+ // However, in this case we may not detect the dependent deviation and
+ // thus we always iterate through all the depends clauses.
+ //
+ if (postponed)
+ break;
+ }
+
+ if (reeval)
+ {
+ if (!reevaluated)
+ fail_reeval ();
+
+ assert (postponed);
+ }
+
+ if (pre_reeval)
+ {
+ // It doesn't feel like it may happen in the pre-reevaluation mode. If
+ // it still happens, maybe due to some manual tampering, let's assume
+ // this as a deviation case.
+ //
+ if (r.originating_dependency_position.first == 0)
+ {
+ l5 ([&]{trace << "re-evaluation of dependent "
+ << pkg.available_name_version_db ()
+ << " deviated: previously selected dependency "
+ << *orig_dep << " is not selected anymore";});
+
+ throw reevaluation_deviated ();
+ }
+
+ l5 ([&]
+ {
+ diag_record dr (trace);
+ dr << "pre-reevaluated " << pkg.available_name_version_db ()
+ << ": ";
+
+ pair<size_t, size_t> pos (r.reevaluation_position);
+
+ if (pos.first != 0)
+ {
+ dr << pos.first << ',' << pos.second;
+
+ if (r.reevaluation_optional)
+ dr << " re-evaluation is optional";
+ }
+ else
+ dr << "end reached";
+ });
+ }
+ else
+ {
+ dep_chain.pop_back ();
+
+ l5 ([&]{trace << (!postponed ? "end " :
+ reeval ? "re-evaluated " :
+ "postpone ")
+ << pkg.available_name_version_db ();});
+ }
+
+ return pre_reeval && r.reevaluation_position.first != 0
+ ? move (r)
+ : optional<pre_reevaluate_result> ();
+ }
+
+ void build_packages::
+ collect_build_prerequisites (const pkg_build_options& o,
+ database& db,
+ const package_name& name,
+ const function<find_database_function>& fdb,
+ const function<add_priv_cfg_function>& apc,
+ const repointed_dependents& rpt_depts,
+ replaced_versions& replaced_vers,
+ postponed_packages& postponed_repo,
+ postponed_packages& postponed_alts,
+ size_t max_alt_index,
+ postponed_packages& postponed_recs,
+ postponed_existing_dependencies& postponed_edeps,
+ postponed_dependencies& postponed_deps,
+ postponed_configurations& postponed_cfgs,
+ unacceptable_alternatives& unacceptable_alts,
+ unsatisfied_dependents& unsatisfied_depts)
+ {
+ auto mi (map_.find (db, name));
+ assert (mi != map_.end ());
+
+ build_package_refs dep_chain;
+
+ collect_build_prerequisites (o,
+ mi->second.package,
+ dep_chain,
+ fdb,
+ apc,
+ rpt_depts,
+ replaced_vers,
+ &postponed_repo,
+ &postponed_alts,
+ max_alt_index,
+ postponed_recs,
+ postponed_edeps,
+ postponed_deps,
+ postponed_cfgs,
+ unacceptable_alts,
+ unsatisfied_depts);
+ }
+
+ void build_packages::
+ collect_repointed_dependents (
+ const pkg_build_options& o,
+ const repointed_dependents& rpt_depts,
+ replaced_versions& replaced_vers,
+ postponed_packages& postponed_repo,
+ postponed_packages& postponed_alts,
+ postponed_packages& postponed_recs,
+ postponed_existing_dependencies& postponed_edeps,
+ postponed_dependencies& postponed_deps,
+ postponed_configurations& postponed_cfgs,
+ unacceptable_alternatives& unacceptable_alts,
+ unsatisfied_dependents& unsatisfied_depts,
+ const function<find_database_function>& fdb,
+ const function<add_priv_cfg_function>& apc)
+ {
+ tracer trace ("collect_repointed_dependents");
+
+ for (const auto& rd: rpt_depts)
+ {
+ database& db (rd.first.db);
+ const package_name& nm (rd.first.name);
+
+ {
+ auto i (map_.find (db, nm));
+ if (i != map_.end ())
+ {
+ build_package& b (i->second.package);
+
+ if (!b.action || *b.action != build_package::adjust)
+ {
+ if (!b.action ||
+ (*b.action != build_package::drop && !b.reconfigure ()))
+ b.flags |= build_package::adjust_reconfigure;
+
+ continue;
+ }
+ }
+ }
+
+ shared_ptr<selected_package> sp (db.load<selected_package> (nm));
+
+ // The repointed dependent can be an orphan, so just create the
+ // available package from the selected package.
+ //
+ auto rp (make_available_fragment (o, db, sp));
+
+ // Add the prerequisite replacements as the required-by packages.
+ //
+ set<package_version_key> required_by;
+ for (const auto& prq: rd.second)
+ {
+ if (prq.second) // Prerequisite replacement?
+ {
+ const package_key& pk (prq.first);
+
+ // Note that the dependency can potentially be just pre-entered, in
+ // which case its version is not known at this point.
+ //
+ assert (entered_build (pk) != nullptr);
+
+ required_by.emplace (pk.db, pk.name, version ());
+ }
+ }
+
+ build_package p {
+ build_package::build,
+ db,
+ sp,
+ move (rp.first),
+ move (rp.second),
+ nullopt, // Dependencies.
+ nullopt, // Dependencies alternatives.
+ nullopt, // Package skeleton.
+ nullopt, // Postponed dependency alternatives.
+ false, // Recursive collection.
+ nullopt, // Hold package.
+ nullopt, // Hold version.
+ {}, // Constraints.
+ sp->system (),
+ false, // Keep output directory.
+ false, // Disfigure (from-scratch reconf).
+ false, // Configure-only.
+ nullopt, // Checkout root.
+ false, // Checkout purge.
+ strings (), // Configuration variables.
+ nullopt, // Upgrade.
+ false, // Deorphan.
+ move (required_by), // Required by (dependencies).
+ false, // Required by dependents.
+ build_package::adjust_reconfigure | build_package::build_repoint};
+
+ build_package_refs dep_chain;
+
+ package_key pk {db, nm};
+
+ // Note that the repointed dependent can well be a dependency whose
+ // recursive processing should be postponed.
+ //
+ auto i (postponed_deps.find (pk));
+ if (i != postponed_deps.end ())
+ {
+ // Note that here we would collect the repointed dependent recursively
+ // without specifying any configuration for it.
+ //
+ i->second.wout_config = true;
+
+ // Note: not recursive.
+ //
+ collect_build (
+ o, move (p), replaced_vers, postponed_cfgs, unsatisfied_depts);
+
+ l5 ([&]{trace << "dep-postpone repointed dependent " << pk;});
+ }
+ else
+ {
+ const postponed_configuration* pcfg (
+ postponed_cfgs.find_dependency (pk));
+
+ if (pcfg != nullptr)
+ {
+ // Note: not recursive.
+ //
+ collect_build (
+ o, move (p), replaced_vers, postponed_cfgs, unsatisfied_depts);
+
+ l5 ([&]{trace << "dep-postpone repointed dependent " << pk
+ << " since already in cluster " << *pcfg;});
+ }
+ else
+ {
+ build_package_refs dep_chain;
+
+ // Note: recursive.
+ //
+ collect_build (o,
+ move (p),
+ replaced_vers,
+ postponed_cfgs,
+ unsatisfied_depts,
+ &dep_chain,
+ fdb,
+ apc,
+ &rpt_depts,
+ &postponed_repo,
+ &postponed_alts,
+ &postponed_recs,
+ &postponed_edeps,
+ &postponed_deps,
+ &unacceptable_alts);
+ }
+ }
+ }
+ }
+
+ void build_packages::
+ collect_drop (const pkg_build_options&,
+ database& db,
+ shared_ptr<selected_package> sp,
+ replaced_versions& replaced_vers)
+ {
+ tracer trace ("collect_drop");
+
+ package_key pk (db, sp->name);
+
+ // If there is an entry for building specific version of the package (the
+ // available member is not NULL), then it wasn't created to prevent our
+ // drop (see replaced_versions for details). This rather mean that the
+ // replacement version is not being built anymore due to the plan
+ // refinement. Thus, just erase the entry in this case and continue.
+ //
+ auto vi (replaced_vers.find (pk));
+ if (vi != replaced_vers.end () && !vi->second.replaced)
+ {
+ replaced_version& v (vi->second);
+ const shared_ptr<available_package>& ap (v.available);
+
+ if (ap != nullptr)
+ {
+ if (verb >= 5)
+ {
+ bool s (v.system);
+ const version& av (s ? *ap->system_version (db) : ap->version);
+
+ l5 ([&]{trace << "erase version replacement for "
+ << package_string (ap->id.name, av, s) << db;});
+ }
+
+ replaced_vers.erase (vi);
+ vi = replaced_vers.end (); // Keep it valid for the below check.
+ }
+ else
+ v.replaced = true;
+ }
+
+ build_package p {
+ build_package::drop,
+ db,
+ move (sp),
+ nullptr,
+ nullptr,
+ nullopt, // Dependencies.
+ nullopt, // Dependencies alternatives.
+ nullopt, // Package skeleton.
+ nullopt, // Postponed dependency alternatives.
+ false, // Recursive collection.
+ nullopt, // Hold package.
+ nullopt, // Hold version.
+ {}, // Constraints.
+ false, // System package.
+ false, // Keep output directory.
+ false, // Disfigure (from-scratch reconf).
+ false, // Configure-only.
+ nullopt, // Checkout root.
+ false, // Checkout purge.
+ strings (), // Configuration variables.
+ nullopt, // Upgrade.
+ false, // Deorphan.
+ {}, // Required by.
+ false, // Required by dependents.
+ 0}; // State flags.
+
+ auto i (map_.find (pk));
+
+ if (i != map_.end ())
+ {
+ build_package& bp (i->second.package);
+
+ // Don't overwrite the build object if its required-by package names
+ // have the "required by dependents" semantics.
+ //
+ if (!bp.required_by_dependents)
+ {
+ if (bp.available != nullptr)
+ {
+ // Similar to the version replacement in collect_build(), see if
+ // in-place drop is possible (no dependencies, etc) and set scratch
+ // to false if that's the case.
+ //
+ bool scratch (true);
+
+ // While checking if the package has any dependencies skip the
+ // toolchain build-time dependencies since they should be quite
+ // common.
+ //
+ // An update: it turned out that just absence of dependencies is not
+ // the only condition that causes a package to be dropped in place.
+ // The following conditions must also be met:
+ //
+ // - The package must also not participate in any configuration
+ // negotiation on the dependency side (otherwise it could have
+ // been added to a cluster as a dependency).
+ //
+ // - The package must not be added to unsatisfied_depts on the
+ // dependency side.
+ //
+ // This feels quite hairy at the moment, so we won't be dropping in
+ // place for now.
+ //
+#if 0
+ if (!has_dependencies (options, bp.available->dependencies))
+ scratch = false;
+#endif
+
+ l5 ([&]{trace << bp.available_name_version_db ()
+ << " package version needs to be replaced "
+ << (!scratch ? "in-place " : "") << "with drop";});
+
+ if (scratch)
+ {
+ if (vi != replaced_vers.end ())
+ vi->second = replaced_version ();
+ else
+ replaced_vers.emplace (move (pk), replaced_version ());
+
+ throw replace_version ();
+ }
+ }
+
+ // Overwrite the existing (possibly pre-entered, adjustment, or
+ // repoint) entry.
+ //
+ l4 ([&]{trace << "overwrite " << pk;});
+
+ bp = move (p);
+ }
+ else
+ {
+ assert (!bp.required_by.empty ());
+
+ l5 ([&]
+ {
+ diag_record dr (trace);
+ dr << pk << " cannot be dropped since it is required by ";
+ for (auto b (bp.required_by.begin ()), i (b);
+ i != bp.required_by.end ();
+ ++i)
+ dr << (i != b ? ", " : "") << *i;
+ });
+ }
+ }
+ else
+ {
+ l4 ([&]{trace << "add " << pk;});
+
+ map_.emplace (move (pk), data_type {end (), move (p)});
+ }
+ }
+
+ void build_packages::
+ collect_unhold (database& db, const shared_ptr<selected_package>& sp)
+ {
+ auto i (map_.find (db, sp->name));
+
+ // Currently, it must always be pre-entered.
+ //
+ assert (i != map_.end ());
+
+ build_package& bp (i->second.package);
+
+ if (!bp.action) // Pre-entered.
+ {
+ build_package p {
+ build_package::adjust,
+ db,
+ sp,
+ nullptr,
+ nullptr,
+ nullopt, // Dependencies.
+ nullopt, // Dependencies alternatives.
+ nullopt, // Package skeleton.
+ nullopt, // Postponed dependency alternatives.
+ false, // Recursive collection.
+ nullopt, // Hold package.
+ nullopt, // Hold version.
+ {}, // Constraints.
+ sp->system (),
+ false, // Keep output directory.
+ false, // Disfigure (from-scratch reconf).
+ false, // Configure-only.
+ nullopt, // Checkout root.
+ false, // Checkout purge.
+ strings (), // Configuration variables.
+ nullopt, // Upgrade.
+ false, // Deorphan.
+ {}, // Required by.
+ false, // Required by dependents.
+ build_package::adjust_unhold};
+
+ p.merge (move (bp));
+ bp = move (p);
+ }
+ else
+ bp.flags |= build_package::adjust_unhold;
+ }
+
+ void build_packages::
+ collect_build_postponed (const pkg_build_options& o,
+ replaced_versions& replaced_vers,
+ postponed_packages& postponed_repo,
+ postponed_packages& postponed_alts,
+ postponed_packages& postponed_recs,
+ postponed_existing_dependencies& postponed_edeps,
+ postponed_dependencies& postponed_deps,
+ postponed_configurations& postponed_cfgs,
+ strings& postponed_cfgs_history,
+ unacceptable_alternatives& unacceptable_alts,
+ unsatisfied_dependents& unsatisfied_depts,
+ const function<find_database_function>& fdb,
+ const repointed_dependents& rpt_depts,
+ const function<add_priv_cfg_function>& apc,
+ postponed_configuration* pcfg)
+ {
+ // NOTE: enable and run the tests with the config.bpkg.tests.all=true
+ // variable if changing anything in this function.
+ //
+
+ // Snapshot of the package builds collection state.
+ //
+ // Note: should not include postponed_cfgs_history.
+ //
+ class snapshot
+ {
+ public:
+ snapshot (const build_packages& pkgs,
+ const postponed_packages& postponed_repo,
+ const postponed_packages& postponed_alts,
+ const postponed_packages& postponed_recs,
+ const replaced_versions& replaced_vers,
+ const postponed_existing_dependencies& postponed_edeps,
+ const postponed_dependencies& postponed_deps,
+ const postponed_configurations& postponed_cfgs,
+ const unsatisfied_dependents& unsatisfied_depts)
+ : pkgs_ (pkgs),
+ replaced_vers_ (replaced_vers),
+ postponed_edeps_ (postponed_edeps),
+ postponed_deps_ (postponed_deps),
+ postponed_cfgs_ (postponed_cfgs),
+ unsatisfied_depts_ (unsatisfied_depts)
+ {
+ auto save = [] (vector<package_key>& d, const postponed_packages& s)
+ {
+ d.reserve (s.size ());
+
+ for (const build_package* p: s)
+ d.emplace_back (p->db, p->name ());
+ };
+
+ save (postponed_repo_, postponed_repo);
+ save (postponed_alts_, postponed_alts);
+ save (postponed_recs_, postponed_recs);
+ }
+
+ void
+ restore (build_packages& pkgs,
+ postponed_packages& postponed_repo,
+ postponed_packages& postponed_alts,
+ postponed_packages& postponed_recs,
+ replaced_versions& replaced_vers,
+ postponed_existing_dependencies& postponed_edeps,
+ postponed_dependencies& postponed_deps,
+ postponed_configurations& postponed_cfgs,
+ unsatisfied_dependents& unsatisfied_depts)
+ {
+ pkgs = move (pkgs_);
+ replaced_vers = move (replaced_vers_);
+ postponed_cfgs = move (postponed_cfgs_);
+ postponed_deps = move (postponed_deps_);
+ postponed_edeps = move (postponed_edeps_);
+
+ auto restore = [&pkgs] (postponed_packages& d,
+ const vector<package_key>& s)
+ {
+ d.clear ();
+
+ for (const package_key& p: s)
+ {
+ build_package* b (pkgs.entered_build (p));
+ assert (b != nullptr);
+ d.insert (b);
+ }
+ };
+
+ restore (postponed_repo, postponed_repo_);
+ restore (postponed_alts, postponed_alts_);
+ restore (postponed_recs, postponed_recs_);
+
+ unsatisfied_depts = move (unsatisfied_depts_);
+ }
+
+ private:
+ // Note: try to use vectors instead of sets for storage to save
+ // memory. We could probably optimize this some more if necessary
+ // (there are still sets/maps inside).
+ //
+ build_packages pkgs_;
+ vector<package_key> postponed_repo_;
+ vector<package_key> postponed_alts_;
+ vector<package_key> postponed_recs_;
+ replaced_versions replaced_vers_;
+ postponed_existing_dependencies postponed_edeps_;
+ postponed_dependencies postponed_deps_;
+ postponed_configurations postponed_cfgs_;
+ unsatisfied_dependents unsatisfied_depts_;
+ };
+
+ size_t depth (pcfg != nullptr ? pcfg->depth : 0);
+
+ string t ("collect_build_postponed (" + to_string (depth) + ')');
+ tracer trace (t.c_str ());
+
+ string trace_suffix;
+ if (verb >= 5 && pcfg != nullptr)
+ {
+ trace_suffix += ' ';
+ trace_suffix += pcfg->string ();
+ }
+
+ l5 ([&]{trace << "begin" << trace_suffix;});
+
+ if (pcfg != nullptr)
+ {
+ // This is what we refer to as the "initial negotiation" where we
+ // negotiate the configuration of dependents that could be postponed.
+ // Those that could not we "up-negotiate" in the collect() lambda of
+ // collect_build_prerequisites().
+ //
+ using packages = postponed_configuration::packages;
+
+ assert (!pcfg->negotiated);
+
+ // Re-evaluate existing dependents for dependencies in this
+ // configuration cluster. Omit dependents which are already being built,
+ // dropped, or postponed.
+ //
+ // Note that the existing dependent can be re-evaluated to an earlier
+ // position than the position of the dependency which has introduced
+ // this existing dependent. Thus, re-evaluating such a dependent does
+ // not necessarily add this dependent together with the dependencies at
+ // the re-evaluation target position specifically to this cluster. We,
+ // however, re-evaluate all the discovered existing dependents. Also
+ // note that these dependents will be added to their respective clusters
+ // with the `existing` flag as a part of the dependents' re-evaluation
+ // (see the collect lambda in collect_build_prerequisites() for
+ // details).
+ //
+ // After being re-evaluated the existing dependents are recursively
+ // collected in the same way and at the same time as the new dependents
+ // of the clusters they belong to.
+ //
+ // Note that some of the postponed existing dependents may already be in
+ // the cluster. Thus, collect the postponed existing dependents to omit
+ // them from the configuration negotiation and from the subsequent
+ // recursive collection. Note that we will up-negotiate the
+ // configuration these dependents apply to their dependencies after
+ // these dependents will be collected via their own dependents with the
+ // configuration clauses.
+ //
+ set<package_key> postponed_existing_dependents;
+ {
+ // Map existing dependents to the dependencies they apply a
+ // configuration to. Also, collect the information which is required
+ // for a dependent re-evaluation (selected package, etc).
+ //
+ // Note that we may end up adding additional dependencies to
+ // pcfg->dependencies which in turn may have additional existing
+ // dependents which we need to process. Feels like doing this
+ // iteratively is the best option.
+ //
+ // Also note that we need to make sure we don't re-process the same
+ // existing dependents.
+ //
+ struct existing_dependent_ex: existing_dependent
+ {
+ packages dependencies;
+ bool reevaluated = false;
+
+ existing_dependent_ex (existing_dependent&& ed)
+ : existing_dependent (move (ed)) {}
+ };
+ map<package_key, existing_dependent_ex> dependents;
+
+ const packages& deps (pcfg->dependencies);
+
+ // Note that the below collect_build_prerequisites() call can only add
+ // new dependencies to the end of the cluster's dependencies
+ // list. Thus on each iteration we will only add existing dependents
+ // of unprocessed/new dependencies. We will also skip the already
+ // re-evaluated existing dependents.
+ //
+ for (size_t i (0); i != deps.size (); )
+ {
+ size_t n (dependents.size ());
+
+ for (; i != deps.size (); ++i)
+ {
+ // Note: this reference is only used while deps is unchanged.
+ //
+ const package_key& p (deps[i]);
+
+ for (existing_dependent& ed:
+ query_existing_dependents (trace,
+ o,
+ p.db,
+ p.name,
+ false /* exclude_optional */,
+ fdb,
+ rpt_depts,
+ replaced_vers))
+ {
+ if (ed.dependency)
+ {
+ package_key pk (ed.db, ed.selected->name);
+
+ // If this dependent is present in postponed_deps or in some
+ // cluster as a dependency, then it means that someone depends
+ // on it with configuration and it's no longer considered an
+ // existing dependent (it will be reconfigured). However, this
+ // fact may not be reflected yet. And it can actually turn out
+ // bogus.
+ //
+ auto pi (postponed_deps.find (pk));
+ if (pi != postponed_deps.end ())
+ {
+ l5 ([&]{trace << "skip dep-postponed existing dependent "
+ << pk << " of dependency " << p;});
+
+ // Note that here we would re-evaluate the existing
+ // dependent without specifying any configuration for it.
+ //
+ pi->second.wout_config = true;
+
+ collect_existing_dependent (o,
+ ed,
+ {p},
+ replaced_vers,
+ postponed_cfgs,
+ unsatisfied_depts);
+
+ postponed_existing_dependents.insert (pk);
+ continue;
+ }
+
+ const postponed_configuration* pcfg (
+ postponed_cfgs.find_dependency (pk));
+
+ if (pcfg != nullptr)
+ {
+ l5 ([&]{trace << "skip existing dependent " << pk
+ << " of dependency " << p << " since "
+ << "dependent already in cluster " << *pcfg
+ << " (as a dependency)";});
+
+ postponed_existing_dependents.insert (pk);
+ continue;
+ }
+
+ auto i (dependents.find (pk));
+
+ // If the existing dependent is not in the map yet, then add
+ // it.
+ //
+ if (i == dependents.end ())
+ {
+ if (*ed.dependency != p)
+ collect_existing_dependent_dependency (o,
+ ed,
+ replaced_vers,
+ postponed_cfgs,
+ unsatisfied_depts);
+
+ i = dependents.emplace (
+ move (pk), existing_dependent_ex (move (ed))).first;
+ }
+ else
+ {
+ // We always re-evaluate to the earliest position.
+ //
+ assert (i->second.dependency_position ==
+ ed.dependency_position);
+ }
+
+ // Note that we add here the dependency which introduced this
+ // existing dependent, rather than the dependency which
+ // position we re-evaluate to, and which we want to be
+ // mentioned in the plan, if printed.
+ //
+ i->second.dependencies.push_back (p);
+ }
+ else
+ {
+ l5 ([&]{trace << "schedule re-collection of deviated "
+ << "existing dependent " << *ed.selected
+ << ed.db;});
+
+ recollect_existing_dependent (o,
+ ed,
+ replaced_vers,
+ postponed_recs,
+ postponed_cfgs,
+ unsatisfied_depts,
+ true /* add_required_by */);
+ }
+ }
+ }
+
+ // Re-evaluate the newly added existing dependents, if any.
+ //
+ if (dependents.size () != n)
+ {
+ l5 ([&]{trace << "re-evaluate existing dependents for " << *pcfg;});
+
+ for (auto& d: dependents)
+ {
+ existing_dependent_ex& ed (d.second);
+
+ // Skip re-evaluated.
+ //
+ if (ed.reevaluated)
+ continue;
+
+ // Note that a re-evaluated package doesn't necessarily needs to
+ // be reconfigured and thus we don't add the
+ // build_package::adjust_reconfigure flag here.
+ //
+ // Specifically, if none of its dependencies get reconfigured,
+ // then it doesn't need to be reconfigured either since nothing
+ // changes for its config clauses. Otherwise, the
+ // build_package::adjust_reconfigure flag will be added normally
+ // by collect_dependents().
+ //
+ collect_existing_dependent (o,
+ ed,
+ move (ed.dependencies),
+ replaced_vers,
+ postponed_cfgs,
+ unsatisfied_depts);
+
+ build_package* b (entered_build (d.first));
+ assert (b != nullptr);
+
+ // Re-evaluate up to the earliest position.
+ //
+ assert (ed.dependency_position.first != 0);
+
+ try
+ {
+ build_package_refs dep_chain;
+ collect_build_prerequisites (o,
+ *b,
+ dep_chain,
+ fdb,
+ apc,
+ rpt_depts,
+ replaced_vers,
+ &postponed_repo,
+ &postponed_alts,
+ numeric_limits<size_t>::max (),
+ postponed_recs,
+ postponed_edeps,
+ postponed_deps,
+ postponed_cfgs,
+ unacceptable_alts,
+ unsatisfied_depts,
+ ed.dependency_position);
+ }
+ catch (const merge_configuration_cycle& e)
+ {
+ l5 ([&]{trace << "re-evaluation of existing dependent "
+ << b->available_name_version_db () << " failed "
+ << "due to merge configuration cycle for "
+ << *pcfg << ", throwing "
+ << "recollect_existing_dependents";});
+
+ throw recollect_existing_dependents {e.depth, {move (ed)}};
+ }
+
+ ed.reevaluated = true;
+ }
+ }
+ }
+ }
+
+ // Negotiate the configuration.
+ //
+ // The overall plan is as follows: continue refining the configuration
+ // until there are no more changes by giving each dependent a chance to
+ // make further adjustments.
+ //
+ l5 ([&]{trace << "cfg-negotiate begin " << *pcfg;});
+
+ // For the cluster's dependencies, the skeleton should not be present
+ // since we haven't yet started recursively collecting them. And we
+ // couldn't have started collecting them before we negotiated their
+ // configurations (that's in contrast to the up-negotiation). Let's
+ // assert for that here to make sure that's also true for dependencies
+ // of the postponed existing dependents of this cluster.
+ //
+#ifndef NDEBUG
+ for (const package_key& p: pcfg->dependencies)
+ {
+ build_package* b (entered_build (p));
+ assert (b != nullptr && !b->skeleton && !b->recursive_collection);
+ }
+#endif
+
+ for (auto b (pcfg->dependents.begin ()),
+ i (b),
+ e (pcfg->dependents.end ()); i != e; )
+ {
+ if (postponed_existing_dependents.find (i->first) !=
+ postponed_existing_dependents.end ())
+ {
+ l5 ([&]{trace << "skip dep-postponed existing dependent "
+ << i->first;});
+
+ ++i;
+ continue;
+ }
+
+ // Resolve package skeletons for the dependent and its dependencies.
+ //
+ // For the dependent, the skeleton should be already there (since we
+ // should have started recursively collecting it). For a dependency,
+ // it should not already be there (since we haven't yet started
+ // recursively collecting it). But we could be re-resolving the same
+ // dependency multiple times.
+ //
+ package_skeleton* dept;
+ {
+ build_package* b (entered_build (i->first));
+ assert (b != nullptr && b->skeleton);
+ dept = &*b->skeleton;
+ }
+
+ // If a dependency has already been recursively collected, then we can
+ // no longer call reload_defaults() or verify_sensible() on its
+ // skeleton. Thus, we make a temporary copy and reset that (see the
+ // collect() lambda in collect_build_prerequisites() for more
+ // details).
+ //
+ pair<size_t, size_t> pos;
+ small_vector<reference_wrapper<package_skeleton>, 1> depcs;
+ bool has_alt;
+ {
+ // A non-negotiated cluster must only have one depends position for
+ // each dependent.
+ //
+ assert (i->second.dependencies.size () == 1);
+
+ const postponed_configuration::dependency& ds (
+ i->second.dependencies.front ());
+
+ pos = ds.position;
+
+ // Note that an existing dependent which initially doesn't have the
+ // has_alternative flag present should obtain it as a part of
+ // re-evaluation at this time.
+ //
+ assert (ds.has_alternative);
+
+ has_alt = *ds.has_alternative;
+
+ depcs.reserve (ds.size ());
+ for (const package_key& pk: ds)
+ {
+ build_package* b (entered_build (pk));
+
+ // Shouldn't be here otherwise.
+ //
+ assert (b != nullptr && !b->recursive_collection);
+
+ package_skeleton* depc (
+ &(b->skeleton
+ ? *b->skeleton
+ : b->init_skeleton (o,
+ false /* load_old_dependent_config */)));
+
+ depcs.push_back (*depc);
+ }
+ }
+
+ optional<bool> changed (
+ negotiate_configuration (
+ pcfg->dependency_configurations, *dept, pos, depcs, has_alt));
+
+ // If the dependency alternative configuration cannot be negotiated
+ // for this dependent, then add an entry to unacceptable_alts and
+ // throw unaccept_alternative to recollect from scratch.
+ //
+ if (!changed)
+ {
+ assert (dept->available != nullptr); // Can't be system.
+
+ const package_key& p (dept->package);
+ const version& v (dept->available->version);
+
+ unacceptable_alts.emplace (p, v, pos);
+
+ l5 ([&]{trace << "unable to cfg-negotiate dependency alternative "
+ << pos.first << ',' << pos.second << " for "
+ << "dependent " << package_string (p.name, v)
+ << p.db << ", throwing unaccept_alternative";});
+
+ throw unaccept_alternative ();
+ }
+ else if (*changed)
+ {
+ if (i != b)
+ {
+ i = b; // Restart from the beginning.
+ continue;
+ }
+ }
+
+ ++i;
+ }
+
+ // Being negotiated (so can only be up-negotiated).
+ //
+ pcfg->negotiated = false;
+
+ // Note that we can be adding new packages to the being negotiated
+ // cluster by calling collect_build_prerequisites() for its dependencies
+ // and dependents. Thus, we need to stash the current list of
+ // dependencies and dependents and iterate over them.
+ //
+ // Note that whomever is adding new packages is expected to process them
+ // (they may also process existing packages, which we are prepared to
+ // ignore).
+ //
+ packages dependencies (pcfg->dependencies);
+
+ packages dependents;
+ dependents.reserve (pcfg->dependents.size ());
+
+ for (const auto& p: pcfg->dependents)
+ dependents.push_back (p.first);
+
+ // Process dependencies recursively with this config.
+ //
+ // Note that there could be inter-dependecies between these packages,
+ // which means the configuration can only be up-negotiated.
+ //
+ l5 ([&]{trace << "recursively collect cfg-negotiated dependencies";});
+
+ for (const package_key& p: dependencies)
+ {
+ build_package* b (entered_build (p));
+ assert (b != nullptr);
+
+ // Skip the dependencies which are already collected recursively.
+ //
+ if (!b->recursive_collection)
+ {
+ // Note that due to the existing dependents postponement some of the
+ // dependencies may have no dependent configuration applied to them
+ // at this time. In this case such dependencies may have no skeleton
+ // yet and thus we initialize it. Note that we will still apply the
+ // empty configuration to such dependencies and collect them
+ // recursively, since the negotiation machinery relies on the fact
+ // that the dependencies of a negotiated cluster are (being)
+ // recursively collected. When the time comes and such a dependency
+ // is collected via its (currently postponed) existing dependent,
+ // then its configuration will be up-negotiated (likely involving
+ // throwing the retry_configuration exception).
+ //
+ if (!b->skeleton)
+ b->init_skeleton (o, false /* load_old_dependent_config */);
+
+ package_skeleton& ps (*b->skeleton);
+
+ // Verify and set the dependent configuration for this dependency.
+ //
+ // Note: see similar code for the up-negotiation case.
+ //
+ {
+ const package_configuration& pc (
+ pcfg->dependency_configurations[p]);
+
+ // Skip the verification if this is a system package without
+ // skeleton info.
+ //
+ pair<bool, string> pr (ps.available != nullptr
+ ? ps.verify_sensible (pc)
+ : make_pair (true, string ()));
+
+ if (!pr.first)
+ {
+ // Note that the diagnostics from the dependency will most
+ // likely be in the "error ..." form (potentially with
+ // additional info lines) and by printing it with a two-space
+ // indentation we make it "fit" into our diag record.
+ //
+ diag_record dr (fail);
+ dr << "unable to negotiate sensible configuration for "
+ << "dependency " << p << '\n'
+ << " " << pr.second;
+
+ dr << info << "negotiated configuration:\n";
+ pc.print (dr, " "); // Note 4 spaces since in nested info.
+ }
+
+ ps.dependent_config (pc);
+ }
+
+ build_package_refs dep_chain;
+ collect_build_prerequisites (o,
+ *b,
+ dep_chain,
+ fdb,
+ apc,
+ rpt_depts,
+ replaced_vers,
+ &postponed_repo,
+ &postponed_alts,
+ 0 /* max_alt_index */,
+ postponed_recs,
+ postponed_edeps,
+ postponed_deps,
+ postponed_cfgs,
+ unacceptable_alts,
+ unsatisfied_depts);
+ }
+ else
+ l5 ([&]{trace << "dependency " << b->available_name_version_db ()
+ << " is already (being) recursively collected, "
+ << "skipping";});
+
+ // Unless the dependency collection has been postponed or it is
+ // already being reconfigured, reconfigure it if its configuration
+ // changes.
+ //
+ if (!b->recursive_collection_postponed () && !b->reconfigure ())
+ {
+ const shared_ptr<selected_package>& sp (b->selected);
+
+ assert (b->skeleton); // Should have been init'ed above.
+
+ package_skeleton& ps (*b->skeleton);
+
+ if (sp != nullptr &&
+ sp->state == package_state::configured &&
+ sp->config_checksum != ps.config_checksum ())
+ {
+ b->flags |= build_package::adjust_reconfigure;
+ }
+ }
+ }
+
+ // Continue processing dependents with this config.
+ //
+ l5 ([&]{trace << "recursively collect cfg-negotiated dependents";});
+
+ for (const auto& p: dependents)
+ {
+ if (postponed_existing_dependents.find (p) !=
+ postponed_existing_dependents.end ())
+ {
+ l5 ([&]{trace << "skip dep-postponed existing dependent " << p;});
+ continue;
+ }
+
+ // Select the dependency alternative for which configuration has been
+ // negotiated and collect this dependent starting from the next
+ // depends value.
+ //
+ build_package* b (entered_build (p));
+
+ // We should have been started recursively collecting the dependent
+ // and it should have been postponed.
+ //
+ assert (b != nullptr &&
+ b->available != nullptr &&
+ b->dependencies &&
+ b->skeleton &&
+ b->postponed_dependency_alternatives);
+
+ // Select the dependency alternative (evaluate reflect if present,
+ // etc) and position to the next depends value (see
+ // collect_build_prerequisites() for details).
+ //
+ {
+ const bpkg::dependencies& deps (b->available->dependencies);
+ bpkg::dependencies& sdeps (*b->dependencies);
+ vector<size_t>& salts (*b->alternatives);
+
+ size_t di (sdeps.size ());
+
+ // Skip the dependent if it has been already collected as some
+ // package's dependency or some such.
+ //
+ if (di == deps.size ())
+ {
+ l5 ([&]{trace << "dependent " << b->available_name_version_db ()
+ << " is already recursively collected, skipping";});
+
+ continue;
+ }
+
+ l5 ([&]{trace << "select cfg-negotiated dependency alternative "
+ << "for dependent "
+ << b->available_name_version_db ();});
+
+ // Find the postponed dependency alternative.
+ //
+ auto i (pcfg->dependents.find (p));
+
+ assert (i != pcfg->dependents.end () &&
+ i->second.dependencies.size () == 1);
+
+ pair<size_t, size_t> dp (i->second.dependencies[0].position);
+ assert (dp.first == sdeps.size () + 1);
+
+ build_package::dependency_alternatives_refs pdas (
+ move (*b->postponed_dependency_alternatives));
+
+ b->postponed_dependency_alternatives = nullopt;
+
+ auto j (find_if (pdas.begin (), pdas.end (),
+ [&dp] (const auto& da)
+ {
+ return da.second + 1 == dp.second;
+ }));
+
+ assert (j != pdas.end ());
+
+ const dependency_alternative& da (j->first);
+ size_t dai (j->second);
+
+ // Select the dependency alternative and position to the next
+ // depends value.
+ //
+ const dependency_alternatives_ex& das (deps[di]);
+ dependency_alternatives_ex sdas (das.buildtime, das.comment);
+
+ sdas.emplace_back (nullopt /* enable */,
+ nullopt /* reflect */,
+ da.prefer,
+ da.accept,
+ da.require,
+ da /* dependencies */);
+
+ sdeps.push_back (move (sdas));
+ salts.push_back (dai);
+
+ // Evaluate reflect, if present.
+ //
+ if (da.reflect)
+ b->skeleton->evaluate_reflect (*da.reflect, make_pair (di, dai));
+ }
+
+ // Continue recursively collecting the dependent.
+ //
+ build_package_refs dep_chain;
+
+ collect_build_prerequisites (o,
+ *b,
+ dep_chain,
+ fdb,
+ apc,
+ rpt_depts,
+ replaced_vers,
+ &postponed_repo,
+ &postponed_alts,
+ 0 /* max_alt_index */,
+ postponed_recs,
+ postponed_edeps,
+ postponed_deps,
+ postponed_cfgs,
+ unacceptable_alts,
+ unsatisfied_depts);
+ }
+
+ // Negotiated (so can only be rolled back).
+ //
+ pcfg->negotiated = true;
+
+ l5 ([&]{trace << "cfg-negotiate end " << *pcfg;});
+
+ // Fall through (to start another iteration of the below loop).
+ }
+
+ // Try collecting postponed packages for as long as we are making
+ // progress.
+ //
+ vector<build_package*> spas; // Reuse.
+
+ for (bool prog (find_if (postponed_recs.begin (), postponed_recs.end (),
+ [] (const build_package* p)
+ {
+ // Note that we check for the dependencies
+ // presence rather than for the
+ // recursive_collection flag (see below for
+ // details).
+ //
+ return !p->dependencies;
+ }) != postponed_recs.end () ||
+ !postponed_repo.empty () ||
+ !postponed_cfgs.negotiated () ||
+ !postponed_alts.empty () ||
+ postponed_deps.has_bogus ());
+ prog; )
+ {
+ // First, recursively recollect the not yet collected packages (deviated
+ // existing dependents, etc).
+ //
+ prog = false;
+
+ postponed_packages pcs;
+ for (build_package* p: postponed_recs)
+ {
+ // Note that we check for the dependencies presence rather than for
+ // the recursive_collection flag to also recollect the existing
+ // dependents which, for example, may have been specified on the
+ // command line and whose recursive collection has been pruned since
+ // there were no reason to collect it (configured, no upgrade,
+ // etc). Also note that this time we expect the collection to be
+ // enforced with the build_recollect flag.
+ //
+ assert ((p->flags & build_package::build_recollect) != 0);
+
+ if (!p->dependencies)
+ {
+ package_key pk (p->db, p->name ());
+
+ auto pi (postponed_deps.find (pk));
+ if (pi != postponed_deps.end ())
+ {
+ l5 ([&]{trace << "skip re-collection of dep-postponed package "
+ << pk;});
+
+ // Note that here we would re-collect the package without
+ // specifying any configuration for it.
+ //
+ pi->second.wout_config = true;
+
+ continue;
+ }
+ else
+ {
+ const postponed_configuration* pcfg (
+ postponed_cfgs.find_dependency (pk));
+
+ if (pcfg != nullptr)
+ {
+ l5 ([&]{trace << "skip re-collection of dep-postponed package "
+ << pk << " since already in cluster " << *pcfg;});
+
+ continue;
+ }
+ }
+
+ build_package_refs dep_chain;
+ collect_build_prerequisites (o,
+ *p,
+ dep_chain,
+ fdb,
+ apc,
+ rpt_depts,
+ replaced_vers,
+ &postponed_repo,
+ &postponed_alts,
+ 0 /* max_alt_index */,
+ pcs,
+ postponed_edeps,
+ postponed_deps,
+ postponed_cfgs,
+ unacceptable_alts,
+ unsatisfied_depts);
+
+ // Note that the existing dependent collection can be postponed
+ // due to it's own existing dependents.
+ //
+ if (p->recursive_collection)
+ {
+ // Must be present since the re-collection is enforced.
+ //
+ assert (p->dependencies);
+
+ prog = true;
+ }
+ }
+ }
+
+ // Scheduling new packages for re-collection is also a progress.
+ //
+ if (!prog)
+ prog = !pcs.empty ();
+
+ if (prog)
+ {
+ postponed_recs.insert (pcs.begin (), pcs.end ());
+ continue;
+ }
+
+ postponed_packages prs;
+ postponed_packages pas;
+
+ // Now, as there is no more progress made in recollecting of the not yet
+ // collected packages, try to collect the repository-related
+ // postponements.
+ //
+ for (build_package* p: postponed_repo)
+ {
+ l5 ([&]{trace << "collect rep-postponed "
+ << p->available_name_version_db ();});
+
+ build_package_refs dep_chain;
+
+ collect_build_prerequisites (o,
+ *p,
+ dep_chain,
+ fdb,
+ apc,
+ rpt_depts,
+ replaced_vers,
+ &prs,
+ &pas,
+ 0 /* max_alt_index */,
+ postponed_recs,
+ postponed_edeps,
+ postponed_deps,
+ postponed_cfgs,
+ unacceptable_alts,
+ unsatisfied_depts);
+ }
+
+ // Save the potential new dependency alternative-related postponements.
+ //
+ postponed_alts.insert (pas.begin (), pas.end ());
+
+ prog = (prs != postponed_repo);
+
+ if (prog)
+ {
+ postponed_repo.swap (prs);
+ continue;
+ }
+
+ // Now, as there is no more progress made in collecting repository-
+ // related postponements, collect the dependency configuration-related
+ // postponements.
+ //
+ // Note that we do it before alternatives since configurations we do
+ // perfectly (via backtracking) while alternatives -- heuristically.
+ //
+ // Note that since the potential snapshot restore replaces all the list
+ // entries we cannot iterate using the iterator here. Also note that the
+ // list size may change during iterating.
+ //
+ for (size_t ci (0); ci != postponed_cfgs.size (); ++ci)
+ {
+ postponed_configuration* pc (&postponed_cfgs[ci]);
+
+ // Find the next configuration to try to negotiate, skipping the
+ // already negotiated ones.
+ //
+ if (pc->negotiated)
+ continue;
+
+ size_t pcd (depth + 1);
+ pc->depth = pcd;
+
+ // Either return or retry the same cluster or skip this cluster and
+ // proceed to the next one.
+ //
+ for (;;)
+ {
+ // First assume we can negotiate this configuration rolling back if
+ // this doesn't pan out.
+ //
+ snapshot s (*this,
+ postponed_repo,
+ postponed_alts,
+ postponed_recs,
+ replaced_vers,
+ postponed_edeps,
+ postponed_deps,
+ postponed_cfgs,
+ unsatisfied_depts);
+
+ try
+ {
+ collect_build_postponed (o,
+ replaced_vers,
+ postponed_repo,
+ postponed_alts,
+ postponed_recs,
+ postponed_edeps,
+ postponed_deps,
+ postponed_cfgs,
+ postponed_cfgs_history,
+ unacceptable_alts,
+ unsatisfied_depts,
+ fdb,
+ rpt_depts,
+ apc,
+ pc);
+
+ // If collect() returns (instead of throwing), this means it
+ // processed everything that was postponed.
+ //
+ assert (postponed_repo.empty () &&
+ postponed_cfgs.negotiated () &&
+ postponed_alts.empty () &&
+ !postponed_deps.has_bogus ());
+
+ l5 ([&]{trace << "end" << trace_suffix;});
+
+ return;
+ }
+ catch (const retry_configuration& e)
+ {
+ // If this is not "our problem", then keep looking.
+ //
+ if (e.depth != pcd)
+ throw;
+
+ package_configurations cfgs (
+ move (pc->dependency_configurations));
+
+ // Restore the state from snapshot.
+ //
+ // Note: postponed_cfgs is re-assigned.
+ //
+ s.restore (*this,
+ postponed_repo,
+ postponed_alts,
+ postponed_recs,
+ replaced_vers,
+ postponed_edeps,
+ postponed_deps,
+ postponed_cfgs,
+ unsatisfied_depts);
+
+ pc = &postponed_cfgs[ci];
+
+ l5 ([&]{trace << "cfg-negotiation of " << *pc << " failed due "
+ << "to dependent " << e.dependent << ", refining "
+ << "configuration";});
+
+ // Copy over the configuration for further refinement.
+ //
+ // Note that there is also a possibility of ending up with "bogus"
+ // configuration variables that were set by a dependent during
+ // up-negotiation but, due to changes to the overall
+ // configuration, such a dependent were never re-visited.
+ //
+ // The way we are going to deal with this is by detecting such
+ // bogus variables based on the confirmed flag, cleaning them out,
+ // and doing another retry. Here we clear the confirmed flag and
+ // the detection happens in collect_build_postponed() after we
+ // have processed everything postponed (since that's the only time
+ // we can be certain there could no longer be a re-visit).
+ //
+ for (package_configuration& cfg: cfgs)
+ for (config_variable_value& v: cfg)
+ if (v.dependent)
+ v.confirmed = false;
+
+ pc->dependency_configurations = move (cfgs);
+ }
+ catch (merge_configuration& e)
+ {
+ // If this is not "our problem", then keep looking.
+ //
+ if (e.depth != pcd)
+ throw;
+
+ postponed_configuration shadow (move (*pc));
+
+ // Restore the state from snapshot.
+ //
+ // Note: postponed_cfgs is re-assigned.
+ //
+ s.restore (*this,
+ postponed_repo,
+ postponed_alts,
+ postponed_recs,
+ replaced_vers,
+ postponed_edeps,
+ postponed_deps,
+ postponed_cfgs,
+ unsatisfied_depts);
+
+ pc = &postponed_cfgs[ci];
+
+ assert (!pc->negotiated);
+
+ // Drop any accumulated configuration (which could be carried
+ // over from retry_configuration logic).
+ //
+ pc->dependency_configurations.clear ();
+
+ l5 ([&]{trace << "cfg-negotiation of " << *pc << " failed due "
+ << "to non-negotiated clusters, force-merging "
+ << "based on shadow cluster " << shadow;});
+
+ // Pre-merge into this cluster those non-negotiated clusters which
+ // were merged into the shadow cluster.
+ //
+ for (size_t id: shadow.merged_ids)
+ {
+ postponed_configuration* c (postponed_cfgs.find (id));
+
+ if (c != nullptr)
+ {
+ // Otherwise we would be handling the exception in the higher
+ // stack frame.
+ //
+ assert (!c->negotiated);
+
+ l5 ([&]{trace << "force-merge " << *c << " into " << *pc;});
+
+ pc->merge (move (*c));
+
+ // Mark configuration as the one being merged from for
+ // subsequent erasing from the list.
+ //
+ c->dependencies.clear ();
+ }
+ }
+
+ // Erase clusters which we have merged from. Also re-translate the
+ // current cluster address into index which may change as a result
+ // of the merge.
+ //
+ auto i (postponed_cfgs.begin ());
+ auto j (postponed_cfgs.before_begin ()); // Precedes iterator i.
+
+ for (size_t k (0); i != postponed_cfgs.end (); )
+ {
+ if (!i->dependencies.empty ())
+ {
+ if (&*i == pc)
+ ci = k;
+
+ ++i;
+ ++j;
+ ++k;
+ }
+ else
+ i = postponed_cfgs.erase_after (j);
+ }
+
+ pc->set_shadow_cluster (move (shadow));
+ }
+ catch (const recollect_existing_dependents& e)
+ {
+ // If this is not "our problem", then keep looking.
+ //
+ if (e.depth != pcd)
+ throw;
+
+ // Restore the state from snapshot.
+ //
+ // Note: postponed_cfgs is re-assigned.
+ //
+ s.restore (*this,
+ postponed_repo,
+ postponed_alts,
+ postponed_recs,
+ replaced_vers,
+ postponed_edeps,
+ postponed_deps,
+ postponed_cfgs,
+ unsatisfied_depts);
+
+ pc = &postponed_cfgs[ci];
+
+ assert (!pc->negotiated);
+
+ // Drop any accumulated configuration (which could be carried
+ // over from retry_configuration logic).
+ //
+ pc->dependency_configurations.clear ();
+
+ // The shadow cluster likely contains the problematic
+ // dependent/dependencies. Thus, it feels right to drop the shadow
+ // before re-negotiating the cluster.
+ //
+ pc->shadow_cluster.clear ();
+
+ l5 ([&]{trace << "cfg-negotiation of " << *pc << " failed due to "
+ << "some existing dependents related problem, "
+ << "scheduling their re-collection";});
+
+ for (const existing_dependent& ed: e.dependents)
+ {
+ l5 ([&]{trace << "schedule re-collection of "
+ << (!ed.dependency ? "deviated " : "")
+ << "existing dependent " << *ed.selected
+ << ed.db;});
+
+ // Note that we pass false as the add_required_by argument since
+ // the package builds collection state has been restored and the
+ // originating dependency for this existing dependent may not be
+ // collected anymore.
+ //
+ recollect_existing_dependent (o,
+ ed,
+ replaced_vers,
+ postponed_recs,
+ postponed_cfgs,
+ unsatisfied_depts,
+ false /* add_required_by */);
+ }
+ }
+ }
+ }
+
+ // Note that we only get here if we didn't make any progress on the
+ // previous loop (the only "progress" path ends with return).
+
+ // Now, try to collect the dependency alternative-related
+ // postponements.
+ //
+ if (!postponed_alts.empty ())
+ {
+ // Sort the postponements in the unprocessed dependencies count
+ // descending order.
+ //
+ // The idea here is to preferably handle those postponed packages
+ // first, which have a higher probability to affect the dependency
+ // alternative selection for other packages.
+ //
+ spas.assign (postponed_alts.begin (), postponed_alts.end ());
+
+ std::sort (spas.begin (), spas.end (),
+ [] (build_package* x, build_package* y)
+ {
+ size_t xt (x->available->dependencies.size () -
+ x->dependencies->size ());
+
+ size_t yt (y->available->dependencies.size () -
+ y->dependencies->size ());
+
+ if (xt != yt)
+ return xt > yt ? -1 : 1;
+
+ // Also factor the package name and configuration path
+ // into the ordering to achieve a stable result.
+ //
+ int r (x->name ().compare (y->name ()));
+ return r != 0
+ ? r
+ : x->db.get ().config.compare (y->db.get ().config);
+ });
+
+ // Calculate the maximum number of the enabled dependency
+ // alternatives.
+ //
+ size_t max_enabled_count (0);
+
+ for (build_package* p: spas)
+ {
+ assert (p->postponed_dependency_alternatives);
+
+ size_t n (p->postponed_dependency_alternatives->size ());
+
+ if (max_enabled_count < n)
+ max_enabled_count = n;
+ }
+
+ assert (max_enabled_count != 0); // Wouldn't be here otherwise.
+
+ // Try to select a dependency alternative with the lowest index,
+ // preferring postponed packages with the longer tail of unprocessed
+ // dependencies (see above for the reasoning).
+ //
+ for (size_t i (1); i <= max_enabled_count && !prog; ++i)
+ {
+ for (build_package* p: spas)
+ {
+ prs.clear ();
+ pas.clear ();
+
+ size_t ndep (p->dependencies->size ());
+
+ build_package_refs dep_chain;
+
+ l5 ([&]{trace << "index " << i << " collect alt-postponed "
+ << p->available_name_version_db ();});
+
+ collect_build_prerequisites (o,
+ *p,
+ dep_chain,
+ fdb,
+ apc,
+ rpt_depts,
+ replaced_vers,
+ &prs,
+ &pas,
+ i,
+ postponed_recs,
+ postponed_edeps,
+ postponed_deps,
+ postponed_cfgs,
+ unacceptable_alts,
+ unsatisfied_depts);
+
+ prog = (pas.find (p) == pas.end () ||
+ ndep != p->dependencies->size ());
+
+ // Save the potential new postponements.
+ //
+ if (prog)
+ {
+ postponed_alts.erase (p);
+ postponed_alts.insert (pas.begin (), pas.end ());
+ }
+
+ size_t npr (postponed_repo.size ());
+ postponed_repo.insert (prs.begin (), prs.end ());
+
+ // Note that not collecting any alternative-relative postponements
+ // but producing new repository-related postponements is progress
+ // nevertheless.
+ //
+ // Note that we don't need to check for new configuration-related
+ // postponements here since if they are present, then this package
+ // wouldn't be in pas and so prog would be true (see above for
+ // details).
+ //
+ if (!prog)
+ prog = (npr != postponed_repo.size ());
+
+ if (prog)
+ break;
+ }
+ }
+
+ if (prog)
+ continue;
+ }
+
+ assert (!prog);
+
+ // Note that a bogus dependency postponement may, in particular, happen
+ // to an existing dependent due to the cycle introduced by its own
+ // existing dependent. For example, an existing dependent (libfoo)
+ // re-evaluation can be postponed since it starts a chain of
+ // re-evaluations which ends up with its own existing dependent (foo)
+ // with config clause, which being collected after re-evaluation is
+ // unable to collect the prematurely collected libfoo. In this case
+ // postponing collection of libfoo will also prevent foo from being
+ // re-evaluated, the postponement will turn out to be bogus, and we may
+ // start yo-yoing (see the
+ // pkg-build/.../recollect-dependent-bogus-dependency-postponement test
+ // for the real example). To prevent that, let's try to collect a
+ // postponed bogus dependency by recollecting its existing dependents,
+ // if present, prior to considering it as really bogus and re-collecting
+ // everything from scratch.
+ //
+ for (const auto& pd: postponed_deps)
+ {
+ if (pd.second.bogus ())
+ {
+ const package_key& pk (pd.first);
+
+ for (existing_dependent& ed:
+ query_existing_dependents (trace,
+ o,
+ pk.db,
+ pk.name,
+ false /* exclude_optional */,
+ fdb,
+ rpt_depts,
+ replaced_vers))
+ {
+ l5 ([&]{trace << "schedule re-collection of "
+ << (!ed.dependency ? "deviated " : "")
+ << "existing dependent " << *ed.selected
+ << ed.db << " due to bogus postponement of "
+ << "dependency " << pk;});
+
+ recollect_existing_dependent (o,
+ ed,
+ replaced_vers,
+ postponed_recs,
+ postponed_cfgs,
+ unsatisfied_depts,
+ true /* add_required_by */);
+ prog = true;
+ break;
+ }
+ }
+ }
+
+ if (prog)
+ continue;
+
+ // Finally, erase the bogus postponements and re-collect from scratch,
+ // if any (see postponed_dependencies for details).
+ //
+ // Note that we used to re-collect such postponements in-place but
+ // re-doing from scratch feels more correct (i.e., we may end up doing
+ // it earlier which will affect dependency alternatives).
+ //
+ postponed_deps.cancel_bogus (trace);
+ }
+
+ // Check if any negotiatiated configurations ended up with any bogus
+ // variables (see retry_configuration catch block for background).
+ //
+ // Note that we could potentially end up yo-yo'ing: we remove a bogus and
+ // that causes the original dependent to get re-visited which in turn
+ // re-introduces the bogus. In other words, one of the bogus variables
+ // which we have removed are actually the cause of no longer needing the
+ // dependent that introduced it. Feels like the correct outcome of this
+ // should be keeping the bogus variable that triggered yo-yo'ing. Of
+ // course, there could be some that we should keep and some that we should
+ // drop and figuring this out would require retrying all possible
+ // combinations. An alternative solution would be to detect yo-yo'ing,
+ // print the bogus variables involved, and ask the user to choose (with an
+ // override) which ones to keep. Let's go with this for now.
+ //
+ {
+ // On the first pass see if we have anything bogus.
+ //
+ bool bogus (false);
+ for (postponed_configuration& pcfg: postponed_cfgs)
+ {
+ if (pcfg.negotiated && *pcfg.negotiated) // Negotiated.
+ {
+ for (package_configuration& cfg: pcfg.dependency_configurations)
+ {
+ for (config_variable_value& v: cfg)
+ {
+ if (v.dependent && !v.confirmed)
+ {
+ bogus = true;
+ break;
+ }
+ }
+ if (bogus) break;
+ }
+ if (bogus) break;
+ }
+ }
+
+ if (bogus)
+ {
+ // On the second pass calculate the checksum of all the negotiated
+ // clusters.
+ //
+ sha256 cs;
+ for (postponed_configuration& pcfg: postponed_cfgs)
+ {
+ if (pcfg.negotiated && *pcfg.negotiated)
+ {
+ for (package_configuration& cfg: pcfg.dependency_configurations)
+ {
+ for (config_variable_value& v: cfg)
+ {
+ if (v.dependent)
+ to_checksum (cs, v);
+ }
+ }
+ }
+ }
+
+ bool cycle;
+ {
+ string s (cs.string ());
+ if (find (postponed_cfgs_history.begin (),
+ postponed_cfgs_history.end (),
+ s) == postponed_cfgs_history.end ())
+ {
+ postponed_cfgs_history.push_back (move (s));
+ cycle = false;
+ }
+ else
+ cycle = true;
+ }
+
+ // On the third pass we either retry or diagnose.
+ //
+ diag_record dr;
+ if (cycle)
+ {
+ dr <<
+ fail << "unable to remove bogus configuration values without "
+ << "causing configuration refinement cycle" <<
+ info << "consider manually specifying one or more of the "
+ << "following variables as user configuration";
+ }
+
+ for (postponed_configuration& pcfg: postponed_cfgs)
+ {
+ optional<package_key> dept; // Bogus dependent.
+
+ if (pcfg.negotiated && *pcfg.negotiated)
+ {
+ for (package_configuration& cfg: pcfg.dependency_configurations)
+ {
+ // Note that the entire dependency configuration may end up
+ // being "bogus" (i.e., it does not contain any configuration
+ // variables with a confirmed dependent). But that will be
+ // handled naturally: we will either no longer have this
+ // dependency in the cluster and thus never call its skeleton's
+ // dependent_config() or this call will be no-op since it won't
+ // find any dependent variables.
+ //
+ for (config_variable_value& v: cfg)
+ {
+ if (v.dependent && !v.confirmed)
+ {
+ if (!dept)
+ dept = move (v.dependent);
+
+ if (cycle)
+ dr << "\n " << v.serialize_cmdline ();
+ else
+ v.undefine ();
+ }
+ }
+ }
+
+ if (dept)
+ {
+ if (cycle)
+ break;
+ else
+ throw retry_configuration {pcfg.depth, move (*dept)};
+ }
+ }
+
+ if (dept)
+ break;
+ }
+ }
+ }
+
+ // If any postponed_{repo,alts} builds remained, then perform the
+ // diagnostics run. Naturally we shouldn't have any postponed_cfgs without
+ // one of the former.
+ //
+ if (!postponed_repo.empty ())
+ {
+ build_package_refs dep_chain;
+
+ collect_build_prerequisites (o,
+ **postponed_repo.begin (),
+ dep_chain,
+ fdb,
+ apc,
+ rpt_depts,
+ replaced_vers,
+ nullptr,
+ nullptr,
+ 0,
+ postponed_recs,
+ postponed_edeps,
+ postponed_deps,
+ postponed_cfgs,
+ unacceptable_alts,
+ unsatisfied_depts);
+
+ assert (false); // Can't be here.
+ }
+
+ if (!postponed_alts.empty ())
+ {
+ build_package_refs dep_chain;
+
+ collect_build_prerequisites (o,
+ **postponed_alts.begin (),
+ dep_chain,
+ fdb,
+ apc,
+ rpt_depts,
+ replaced_vers,
+ nullptr,
+ nullptr,
+ 0,
+ postponed_recs,
+ postponed_edeps,
+ postponed_deps,
+ postponed_cfgs,
+ unacceptable_alts,
+ unsatisfied_depts);
+
+ assert (false); // Can't be here.
+ }
+
+ // While the assumption is that we shouldn't leave any non-negotiated
+ // clusters, let's verify that for good measure. Let's thus trace the
+ // non-negotiated clusters before the assertion.
+ //
+#ifndef NDEBUG
+ for (const postponed_configuration& cfg: postponed_cfgs)
+ {
+ if (!cfg.negotiated || !*cfg.negotiated)
+ trace << "unexpected non-negotiated cluster " << cfg;
+ }
+
+ assert (postponed_cfgs.negotiated ());
+#endif
+
+ l5 ([&]{trace << "end" << trace_suffix;});
+ }
+
+ build_packages::iterator build_packages::
+ order (database& db,
+ const package_name& name,
+ const function<find_database_function>& fdb,
+ bool reorder)
+ {
+ package_refs chain;
+ return order (db, name, chain, fdb, reorder);
+ }
+
+ set<package_key> build_packages::
+ collect_dependents (const repointed_dependents& rpt_depts,
+ unsatisfied_dependents& unsatisfied_depts)
+ {
+ set<package_key> r;
+
+ // First, cache the packages in the map since we will be adding new
+ // entries to the map while collecting dependents of the initial package
+ // set, recursively.
+ //
+ // Note: the pointer is stable (points to a value in std::map).
+ //
+ vector<build_package*> deps;
+
+ for (auto& p: map_)
+ {
+ build_package& d (p.second.package);
+
+ // Prune if this is not a configured package being up/down-graded
+ // or reconfigured.
+ //
+ if (d.action && *d.action != build_package::drop && d.reconfigure ())
+ deps.push_back (&d);
+ }
+
+ // Note: the pointer is stable (see above for details).
+ //
+ set<const build_package*> visited_deps;
+
+ for (build_package* p: deps)
+ collect_dependents (*p, rpt_depts, unsatisfied_depts, visited_deps, r);
+
+ return r;
+ }
+
+ void build_packages::
+ collect_dependents (build_package& p,
+ const repointed_dependents& rpt_depts,
+ unsatisfied_dependents& unsatisfied_depts,
+ set<const build_package*>& visited_deps,
+ set<package_key>& r)
+ {
+ tracer trace ("collect_dependents");
+
+ // Bail out if the dependency has already been visited and add it to the
+ // visited set otherwise.
+ //
+ if (!visited_deps.insert (&p).second)
+ return;
+
+ database& pdb (p.db);
+ const shared_ptr<selected_package>& sp (p.selected);
+
+ const package_name& n (sp->name);
+
+ // See if we are up/downgrading this package. In particular, the available
+ // package could be NULL meaning we are just adjusting.
+ //
+ int ud (p.available != nullptr
+ ? sp->version.compare (p.available_version ())
+ : 0);
+
+ for (database& ddb: pdb.dependent_configs ())
+ {
+ for (auto& pd: query_dependents_cache (ddb, n, pdb))
+ {
+ package_name& dn (pd.name);
+ optional<version_constraint>& dc (pd.constraint);
+
+ auto i (map_.find (ddb, dn));
+
+ // Make sure the up/downgraded package still satisfies this
+ // dependent. But first "prune" if the dependent is being dropped or
+ // this is a replaced prerequisite of the repointed dependent.
+ //
+ // Note that the repointed dependents are always collected (see
+ // collect_build_prerequisites() for details).
+ //
+ bool check (ud != 0 && dc);
+
+ if (i != map_.end ())
+ {
+ build_package& dp (i->second.package);
+
+ // Skip the droped dependent.
+ //
+ if (dp.action && *dp.action == build_package::drop)
+ continue;
+
+ repointed_dependents::const_iterator j (
+ rpt_depts.find (package_key {ddb, dn}));
+
+ if (j != rpt_depts.end ())
+ {
+ const map<package_key, bool>& prereqs_flags (j->second);
+
+ auto k (prereqs_flags.find (package_key {pdb, n}));
+
+ if (k != prereqs_flags.end () && !k->second)
+ continue;
+ }
+
+ // There is one tricky aspect: the dependent could be in the process
+ // of being reconfigured or up/downgraded as well. In this case all
+ // we need to do is detect this situation and skip the test since
+ // all the (new) constraints of this package have been satisfied in
+ // collect_build().
+ //
+ if (check)
+ check = !dp.dependencies;
+ }
+
+ if (check)
+ {
+ const version& av (p.available_version ());
+ const version_constraint& c (*dc);
+
+ // If the new dependency version doesn't satisfy the existing
+ // dependent, then postpone the failure in the hope that this
+ // problem will be resolved naturally (the dependent will also be
+ // up/downgraded, etc; see unsatisfied_dependents for details).
+ //
+ if (!satisfies (av, c))
+ {
+ package_key d (ddb, dn);
+
+ l5 ([&]{trace << "postpone failure for existing dependent " << d
+ << " unsatisfied with dependency "
+ << p.available_name_version_db () << " ("
+ << c << ')';});
+
+ unsatisfied_depts.add (d, package_key (p.db, p.name ()), c);
+ }
+ }
+
+ auto adjustment = [&dn, &ddb, &n, &pdb] () -> build_package
+ {
+ shared_ptr<selected_package> dsp (ddb.load<selected_package> (dn));
+
+ // A system package cannot be a dependent.
+ //
+ assert (!dsp->system ());
+
+ package_version_key pvk (pdb, n, version ());
+
+ return build_package {
+ build_package::adjust,
+ ddb,
+ move (dsp),
+ nullptr, // No available pkg/repo fragment.
+ nullptr,
+ nullopt, // Dependencies.
+ nullopt, // Dependencies alternatives.
+ nullopt, // Package skeleton.
+ nullopt, // Postponed dependency alternatives.
+ false, // Recursive collection.
+ nullopt, // Hold package.
+ nullopt, // Hold version.
+ {}, // Constraints.
+ false, // System.
+ false, // Keep output directory.
+ false, // Disfigure (from-scratch reconf).
+ false, // Configure-only.
+ nullopt, // Checkout root.
+ false, // Checkout purge.
+ strings (), // Configuration variables.
+ nullopt, // Upgrade.
+ false, // Deorphan.
+ {move (pvk)}, // Required by (dependency).
+ false, // Required by dependents.
+ build_package::adjust_reconfigure};
+ };
+
+ // If the existing entry is pre-entered or is an adjustment, then we
+ // merge it into the new adjustment entry. Otherwise (is a build), we
+ // just add the reconfigure adjustment flag to it, unless it is
+ // already being reconfigured. In the later case we don't add the
+ // dependent to the resulting set since we neither add a new entry to
+ // the map nor modify an existing one.
+ //
+ bool add (true);
+ if (i != map_.end ())
+ {
+ build_package& dp (i->second.package);
+
+ if (!dp.action || // Pre-entered.
+ *dp.action != build_package::build) // Adjustment.
+ {
+ build_package bp (adjustment ());
+ bp.merge (move (dp));
+ dp = move (bp);
+ }
+ else // Build.
+ {
+ if (!dp.reconfigure ())
+ dp.flags |= build_package::adjust_reconfigure;
+ else
+ add = false;
+ }
+ }
+ else
+ {
+ // Don't move dn since it is used by adjustment().
+ //
+ i = map_.emplace (package_key {ddb, dn},
+ data_type {end (), adjustment ()}).first;
+ }
+
+ if (add)
+ r.insert (i->first);
+
+ build_package& dp (i->second.package);
+
+ // Add this dependent's constraint, if present, to the dependency's
+ // constraints list for completeness, while suppressing duplicates.
+ //
+ if (dc)
+ {
+ using constraint_type = build_package::constraint_type;
+
+ constraint_type c (move (*dc),
+ ddb,
+ move (dn),
+ dp.selected->version,
+ true /* selected_dependent */);
+
+ if (find_if (p.constraints.begin (), p.constraints.end (),
+ [&c] (const constraint_type& v)
+ {
+ return v.dependent == c.dependent &&
+ v.value == c.value;
+ }) == p.constraints.end ())
+ {
+ p.constraints.emplace_back (move (c));
+ }
+ }
+
+ // Recursively collect our own dependents.
+ //
+ // Note that we cannot end up with an infinite recursion for
+ // configured packages due to a dependency cycle since we "prune" for
+ // visited dependencies (also see order() for details).
+ //
+ collect_dependents (dp, rpt_depts, unsatisfied_depts, visited_deps, r);
+ }
+ }
+ }
+
+ void build_packages::
+ clear ()
+ {
+ build_package_list::clear ();
+ map_.clear ();
+ }
+
+ void build_packages::
+ clear_order ()
+ {
+ build_package_list::clear ();
+
+ for (auto& p: map_)
+ p.second.position = end ();
+ }
+
+ void build_packages::
+ print_constraints (diag_record& dr,
+ const build_package& p,
+ string& indent,
+ set<package_key>& printed,
+ optional<bool> selected_dependent) const
+ {
+ using constraint_type = build_package::constraint_type;
+
+ const vector<constraint_type>& cs (p.constraints);
+
+ if (!cs.empty ())
+ {
+ package_key pk (p.db, p.name ());
+
+ if (printed.find (pk) == printed.end ())
+ {
+ printed.insert (pk);
+
+ for (const constraint_type& c: cs)
+ {
+ if (!selected_dependent ||
+ *selected_dependent == c.selected_dependent)
+ {
+ if (const build_package* d = dependent_build (c))
+ {
+ dr << '\n' << indent << c.dependent << " requires (" << pk
+ << ' ' << c.value << ')';
+
+ indent += " ";
+ print_constraints (dr, *d, indent, printed, selected_dependent);
+ indent.resize (indent.size () - 2);
+ }
+ else
+ dr << '\n' << indent << c.dependent << " requires (" << pk << ' '
+ << c.value << ')';
+ }
+ }
+ }
+ else
+ {
+ for (const constraint_type& c: cs)
+ {
+ if (!selected_dependent ||
+ *selected_dependent == c.selected_dependent)
+ {
+ dr << '\n' << indent << "...";
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ void build_packages::
+ print_constraints (diag_record& dr,
+ const package_key& pk,
+ string& indent,
+ set<package_key>& printed,
+ optional<bool> selected_dependent) const
+ {
+ const build_package* p (entered_build (pk));
+ assert (p != nullptr); // Expected to be collected.
+ print_constraints (dr, *p, indent, printed, selected_dependent);
+ }
+
+ void build_packages::
+ verify_ordering () const
+ {
+ for (const auto& b: map_)
+ {
+ const build_package& bp (b.second.package);
+
+ auto i (find_if (begin (), end (),
+ [&bp] (const build_package& p) {return &p == &bp;}));
+
+ // List ordering must properly be reflected in the tree entries.
+ //
+ assert (i == b.second.position);
+
+ // Pre-entered builds must never be ordered and the real build actions
+ // (builds, adjustments, etc) must all be ordered.
+ //
+ // Note that the later was not the case until we've implemented
+ // re-collection from scratch after the package version replacement (see
+ // replaced_versions for details). Before that the whole dependency
+ // trees from the being replaced dependent stayed in the map.
+ //
+ if (bp.action.has_value () != (i != end ()))
+ {
+ diag_record dr (info);
+
+ if (!bp.action)
+ {
+ dr << "pre-entered builds must never be ordered" <<
+ info << "ordered pre-entered " << b.first;
+ }
+ else
+ {
+ dr << "build actions must be ordered" <<
+ info << "unordered ";
+
+ switch (*bp.action)
+ {
+ case build_package::build:
+ {
+ dr << "build " << bp.available_name_version_db () <<
+ info << "flags 0x" << hex << uppercase << bp.flags;
+ break;
+ }
+ case build_package::drop:
+ {
+ dr << "drop " << *bp.selected << bp.db;
+ break;
+ }
+ case build_package::adjust:
+ {
+ dr << "adjustment " << *bp.selected << bp.db <<
+ info << "flags 0x" << hex << uppercase << bp.flags;
+ break;
+ }
+ }
+ }
+
+ dr << info
+ << "please report in https://github.com/build2/build2/issues/318";
+
+ dr.flush ();
+
+ assert (bp.action.has_value () == (i != end ()));
+ }
+ }
+ }
+
+ vector<build_packages::existing_dependent> build_packages::
+ query_existing_dependents (
+ tracer& trace,
+ const pkg_build_options& o,
+ database& db,
+ const package_name& name,
+ bool exclude_optional,
+ const function<find_database_function>& fdb,
+ const repointed_dependents& rpt_depts,
+ const replaced_versions& replaced_vers)
+ {
+ vector<existing_dependent> r;
+
+ // Lazily search for the dependency build and detect if it is being
+ // up/downgraded. Note that we will only do that if the dependency has an
+ // existing dependent which imposes a version constraint on this
+ // dependency.
+ //
+ const build_package* dep (nullptr);
+ int ud (0);
+
+ for (database& ddb: db.dependent_configs ())
+ {
+ for (auto& pd: query_dependents (ddb, name, db))
+ {
+ package_key pk (ddb, pd.name);
+
+ // Ignore repointed dependents.
+ //
+ if (rpt_depts.find (pk) != rpt_depts.end ())
+ {
+ l5 ([&]{trace << "skip repointed existing dependent " << pk
+ << " of dependency " << name << db;});
+ continue;
+ }
+
+ // Ignore dependent which is expected to be built or dropped.
+ //
+ auto vi (replaced_vers.find (pk));
+ if (vi != replaced_vers.end () && !vi->second.replaced)
+ {
+ bool build (vi->second.available != nullptr);
+
+ l5 ([&]{trace << "skip expected to be "
+ << (build ? "built" : "dropped")
+ << " existing dependent " << pk
+ << " of dependency " << name << db;});
+
+ continue;
+ }
+
+ // Ignore dependent which is already being built or dropped.
+ //
+ const build_package* p (entered_build (pk));
+
+ if (p != nullptr && p->action)
+ {
+ bool build;
+ if (((build = *p->action == build_package::build) &&
+ (p->system || p->recollect_recursively (rpt_depts))) ||
+ *p->action == build_package::drop)
+ {
+ l5 ([&]{trace << "skip being "
+ << (build ? "built" : "dropped")
+ << " existing dependent " << pk
+ << " of dependency " << name << db;});
+ continue;
+ }
+ }
+
+ // Ignore dependent if this dependency up/downgrade won't satisfy the
+ // dependent's constraint. The thinking here is that we will either
+ // fail for this reason later or the problem will be resolved
+ // naturally due to the execution plan refinement (see
+ // unsatisfied_dependents for details).
+ //
+ if (pd.constraint)
+ {
+ // Search for the dependency build and detect if it is being
+ // up/downgraded, if not done yet. In particular, the available
+ // package could be NULL meaning we are just adjusting.
+ //
+ if (dep == nullptr)
+ {
+ dep = entered_build (db, name);
+
+ assert (dep != nullptr); // Expected to be being built.
+
+ if (dep->available != nullptr)
+ {
+ const shared_ptr<selected_package>& sp (dep->selected);
+
+ // Expected to be selected since it has an existing dependent.
+ //
+ assert (sp != nullptr);
+
+ ud = sp->version.compare (dep->available_version ());
+ }
+ }
+
+ if (ud != 0 &&
+ !satisfies (dep->available_version (), *pd.constraint))
+ {
+ l5 ([&]{trace << "skip unsatisfied existing dependent " << pk
+ << " of dependency "
+ << dep->available_name_version_db () << " due to "
+ << "constraint (" << name << ' ' << *pd.constraint
+ << ')';});
+
+ continue;
+ }
+ }
+
+ // Pre-reevaluate the dependent to calculate the position which the
+ // dependent should be re-evaluated to.
+ //
+ shared_ptr<selected_package> dsp (
+ ddb.load<selected_package> (pd.name));
+
+ pair<shared_ptr<available_package>,
+ lazy_shared_ptr<repository_fragment>> rp (
+ find_available_fragment (o, ddb, dsp));
+
+ optional<package_key> orig_dep (package_key {db, name});
+
+ try
+ {
+ build_package p {
+ build_package::build,
+ ddb,
+ dsp, // Don't move from since will be used later.
+ move (rp.first),
+ move (rp.second),
+ nullopt, // Dependencies.
+ nullopt, // Dependencies alternatives.
+ nullopt, // Package skeleton.
+ nullopt, // Postponed dependency alternatives.
+ false, // Recursive collection.
+ nullopt, // Hold package.
+ nullopt, // Hold version.
+ {}, // Constraints.
+ false, // System.
+ false, // Keep output directory.
+ false, // Disfigure (from-scratch reconf).
+ false, // Configure-only.
+ nullopt, // Checkout root.
+ false, // Checkout purge.
+ strings (), // Configuration variables.
+ nullopt, // Upgrade.
+ false, // Deorphan.
+ {}, // Required by (dependency).
+ false, // Required by dependents.
+ 0}; // State flags.
+
+ build_package_refs dep_chain;
+ postponed_packages postponed_repo;
+ postponed_packages postponed_alts;
+ postponed_packages postponed_recs;
+ postponed_existing_dependencies postponed_edeps;
+ postponed_dependencies postponed_deps;
+ postponed_configurations postponed_cfgs;
+ unacceptable_alternatives unacceptable_alts;
+ unsatisfied_dependents unsatisfied_depts;
+ replaced_versions replaced_vers;
+
+ optional<pre_reevaluate_result> pr (
+ collect_build_prerequisites (o,
+ p,
+ dep_chain,
+ fdb,
+ nullptr /* add_priv_cfg_function */,
+ rpt_depts,
+ replaced_vers,
+ &postponed_repo,
+ &postponed_alts,
+ numeric_limits<size_t>::max (),
+ postponed_recs,
+ postponed_edeps,
+ postponed_deps,
+ postponed_cfgs,
+ unacceptable_alts,
+ unsatisfied_depts,
+ pair<size_t, size_t> (0, 0),
+ orig_dep));
+
+ // Must be read-only.
+ //
+ assert (postponed_repo.empty () &&
+ postponed_alts.empty () &&
+ postponed_recs.empty () &&
+ postponed_edeps.empty () &&
+ postponed_deps.empty () &&
+ postponed_cfgs.empty () &&
+ unacceptable_alts.empty () &&
+ unsatisfied_depts.empty () &&
+ replaced_vers.empty ());
+
+ if (pr && (!pr->reevaluation_optional || !exclude_optional))
+ {
+ // Try to preserve the name of the originating dependency as the
+ // one which brings the existing dependent to the config cluster.
+ // Failed that, use the first dependency in the alternative which
+ // we will be re-evaluating to.
+ //
+ package_key dep (*orig_dep);
+
+ pre_reevaluate_result::packages& deps (
+ pr->reevaluation_dependencies);
+
+ assert (!deps.empty ());
+
+ if (find (deps.begin (), deps.end (), dep) == deps.end ())
+ dep = move (deps.front ());
+
+ r.push_back (
+ existing_dependent {
+ ddb, move (dsp),
+ move (dep), pr->reevaluation_position,
+ move (*orig_dep), pr->originating_dependency_position});
+ }
+ }
+ catch (const reevaluation_deviated&)
+ {
+ r.push_back (
+ existing_dependent {ddb, move (dsp),
+ nullopt, {},
+ move (*orig_dep), {}});
+ }
+ }
+ }
+
+ return r;
+ }
+
+ const build_package* build_packages::
+ collect_existing_dependent_dependency (
+ const pkg_build_options& o,
+ const existing_dependent& ed,
+ replaced_versions& replaced_vers,
+ postponed_configurations& postponed_cfgs,
+ unsatisfied_dependents& unsatisfied_depts)
+ {
+ assert (ed.dependency); // Shouldn't be called for deviated dependents.
+
+ const shared_ptr<selected_package>& dsp (ed.selected);
+
+ package_version_key dpt (ed.db, dsp->name, dsp->version);
+ const package_key& dep (*ed.dependency);
+
+ lazy_shared_ptr<selected_package> lsp (dep.db.get (), dep.name);
+ shared_ptr<selected_package> sp (lsp.load ());
+
+ pair<shared_ptr<available_package>,
+ lazy_shared_ptr<repository_fragment>> rp (
+ find_available_fragment (o, dep.db, sp));
+
+ bool system (sp->system ());
+
+ build_package p {
+ build_package::build,
+ dep.db,
+ move (sp),
+ move (rp.first),
+ move (rp.second),
+ nullopt, // Dependencies.
+ nullopt, // Dependencies alternatives.
+ nullopt, // Package skeleton.
+ nullopt, // Postponed dependency alternatives.
+ false, // Recursive collection.
+ nullopt, // Hold package.
+ nullopt, // Hold version.
+ {}, // Constraints.
+ system, // System.
+ false, // Keep output directory.
+ false, // Disfigure (from-scratch reconf).
+ false, // Configure-only.
+ nullopt, // Checkout root.
+ false, // Checkout purge.
+ strings (), // Configuration variables.
+ nullopt, // Upgrade.
+ false, // Deorphan.
+ {dpt}, // Required by (dependent).
+ true, // Required by dependents.
+ 0}; // State flags.
+
+ // Add constraints, if present.
+ //
+ {
+ auto i (dsp->prerequisites.find (lsp));
+ assert (i != dsp->prerequisites.end ());
+
+ if (i->second.constraint)
+ p.constraints.emplace_back (*i->second.constraint,
+ dpt.db,
+ dpt.name,
+ *dpt.version,
+ true /* selected_package */);
+ }
+
+ // Note: not recursive.
+ //
+ collect_build (
+ o, move (p), replaced_vers, postponed_cfgs, unsatisfied_depts);
+
+ return entered_build (dep);
+ }
+
+ void build_packages::
+ collect_existing_dependent (const pkg_build_options& o,
+ const existing_dependent& ed,
+ postponed_configuration::packages&& ds,
+ replaced_versions& replaced_vers,
+ postponed_configurations& postponed_cfgs,
+ unsatisfied_dependents& unsatisfied_depts)
+ {
+ assert (ed.dependency); // May not be a deviated existing dependent.
+
+ pair<shared_ptr<available_package>,
+ lazy_shared_ptr<repository_fragment>> rp (
+ find_available_fragment (o, ed.db, ed.selected));
+
+ set<package_version_key> rb;
+
+ for (package_key& p: ds)
+ rb.emplace (p.db, move (p.name), version ());
+
+ build_package p {
+ build_package::build,
+ ed.db,
+ ed.selected,
+ move (rp.first),
+ move (rp.second),
+ nullopt, // Dependencies.
+ nullopt, // Dependencies alternatives.
+ nullopt, // Package skeleton.
+ nullopt, // Postponed dependency alternatives.
+ false, // Recursive collection.
+ nullopt, // Hold package.
+ nullopt, // Hold version.
+ {}, // Constraints.
+ false, // System.
+ false, // Keep output directory.
+ false, // Disfigure (from-scratch reconf).
+ false, // Configure-only.
+ nullopt, // Checkout root.
+ false, // Checkout purge.
+ strings (), // Configuration variables.
+ nullopt, // Upgrade.
+ false, // Deorphan.
+ move (rb), // Required by (dependency).
+ false, // Required by dependents.
+ build_package::build_reevaluate};
+
+ // Note: not recursive.
+ //
+ collect_build (
+ o, move (p), replaced_vers, postponed_cfgs, unsatisfied_depts);
+ }
+
+ void build_packages::
+ recollect_existing_dependent (const pkg_build_options& o,
+ const existing_dependent& ed,
+ replaced_versions& replaced_vers,
+ postponed_packages& postponed_recs,
+ postponed_configurations& postponed_cfgs,
+ unsatisfied_dependents& unsatisfied_depts,
+ bool add_required_by)
+ {
+ pair<shared_ptr<available_package>,
+ lazy_shared_ptr<repository_fragment>> rp (
+ find_available_fragment (o, ed.db, ed.selected));
+
+ uint16_t flags (build_package::build_recollect);
+
+ // Reconfigure the deviated dependents.
+ //
+ if (!ed.dependency)
+ flags |= build_package::adjust_reconfigure;
+
+ set<package_version_key> rb;
+
+ if (add_required_by)
+ {
+ const package_key& pk (ed.originating_dependency);
+
+ assert (entered_build (pk) != nullptr); // Expected to be collected.
+
+ rb.emplace (pk.db, pk.name, version ());
+ }
+
+ build_package p {
+ build_package::build,
+ ed.db,
+ ed.selected,
+ move (rp.first),
+ move (rp.second),
+ nullopt, // Dependencies.
+ nullopt, // Dependencies alternatives.
+ nullopt, // Package skeleton.
+ nullopt, // Postponed dependency alternatives.
+ false, // Recursive collection.
+ nullopt, // Hold package.
+ nullopt, // Hold version.
+ {}, // Constraints.
+ false, // System.
+ false, // Keep output directory.
+ false, // Disfigure (from-scratch reconf).
+ false, // Configure-only.
+ nullopt, // Checkout root.
+ false, // Checkout purge.
+ strings (), // Configuration variables.
+ nullopt, // Upgrade.
+ false, // Deorphan.
+ move (rb), // Required by (dependency).
+ false, // Required by dependents.
+ flags};
+
+ // Note: not recursive.
+ //
+ collect_build (
+ o, move (p), replaced_vers, postponed_cfgs, unsatisfied_depts);
+
+ postponed_recs.insert (entered_build (ed.db, ed.selected->name));
+ }
+
+ build_packages::iterator build_packages::
+ order (database& db,
+ const package_name& name,
+ package_refs& chain,
+ const function<find_database_function>& fdb,
+ bool reorder)
+ {
+ package_map::iterator mi (map_.find (db, name));
+
+ // Every package that we order should have already been collected.
+ //
+ assert (mi != map_.end ());
+
+ build_package& p (mi->second.package);
+
+ assert (p.action); // Can't order just a pre-entered package.
+
+ // Make sure there is no dependency cycle.
+ //
+ package_ref cp {db, name};
+ {
+ auto i (find (chain.begin (), chain.end (), cp));
+
+ if (i != chain.end ())
+ {
+ diag_record dr (fail);
+ dr << "dependency cycle detected involving package " << name << db;
+
+ auto nv = [this] (const package_ref& cp)
+ {
+ auto mi (map_.find (cp.db, cp.name));
+ assert (mi != map_.end ());
+
+ build_package& p (mi->second.package);
+
+ assert (p.action); // See above.
+
+ // We cannot end up with a dependency cycle for actions other than
+ // build since these packages are configured and we would fail on a
+ // previous run while building them.
+ //
+ assert (p.available != nullptr);
+
+ return p.available_name_version_db ();
+ };
+
+ // Note: push_back() can invalidate the iterator.
+ //
+ size_t j (i - chain.begin ());
+
+ for (chain.push_back (cp); j != chain.size () - 1; ++j)
+ dr << info << nv (chain[j]) << " depends on " << nv (chain[j + 1]);
+ }
+ }
+
+ // If this package is already in the list, then that would also mean all
+ // its prerequisites are in the list and we can just return its
+ // position. Unless we want it reordered.
+ //
+ iterator& pos (mi->second.position);
+ if (pos != end ())
+ {
+ if (reorder)
+ erase (pos);
+ else
+ return pos;
+ }
+
+ // Order all the prerequisites of this package and compute the position of
+ // its "earliest" prerequisite -- this is where it will be inserted.
+ //
+ const shared_ptr<selected_package>& sp (p.selected);
+ const shared_ptr<available_package>& ap (p.available);
+
+ bool build (*p.action == build_package::build);
+
+ // Package build must always have the available package associated.
+ //
+ assert (!build || ap != nullptr);
+
+ // Unless this package needs something to be before it, add it to the end
+ // of the list.
+ //
+ iterator i (end ());
+
+ // Figure out if j is before i, in which case set i to j. The goal here is
+ // to find the position of our "earliest" prerequisite.
+ //
+ auto update = [this, &i] (iterator j)
+ {
+ for (iterator k (j); i != j && k != end ();)
+ if (++k == i)
+ i = j;
+ };
+
+ // Similar to collect_build_prerequisites(), we can prune if the package
+ // is already configured, right? While in collect_build_prerequisites() we
+ // didn't need to add prerequisites of such a package, it doesn't mean
+ // that they actually never ended up in the map via another dependency
+ // path. For example, some can be a part of the initial selection. And in
+ // that case we must order things properly.
+ //
+ // Also, if the package we are ordering is not a system one and needs to
+ // be disfigured during the plan execution, then we must order its
+ // (current) dependencies that also need to be disfigured.
+ //
+ // And yet, if the package we are ordering is a repointed dependent, then
+ // we must order not only its unamended and new prerequisites
+ // (prerequisite replacements) but also its replaced prerequisites, which
+ // can also be disfigured.
+ //
+ bool src_conf (sp != nullptr &&
+ sp->state == package_state::configured &&
+ sp->substate != package_substate::system);
+
+ auto disfigure = [] (const build_package& p)
+ {
+ return p.action && (*p.action == build_package::drop || p.reconfigure ());
+ };
+
+ bool order_disfigured (src_conf && disfigure (p));
+
+ chain.push_back (cp);
+
+ // Order the build dependencies.
+ //
+ if (build && !p.system)
+ {
+ // So here we are going to do things differently depending on whether
+ // the package prerequisites builds are collected or not. If they are
+ // not, then the package is being reconfigured and we use its configured
+ // prerequisites list. Otherwise, we use its collected prerequisites
+ // builds.
+ //
+ if (!p.dependencies)
+ {
+ assert (src_conf); // Shouldn't be here otherwise.
+
+ // A repointed dependent have always its prerequisite replacements
+ // collected, so p.dependencies must always be present for them.
+ //
+ assert ((p.flags & build_package::build_repoint) == 0);
+
+ for (const auto& p: sp->prerequisites)
+ {
+ database& db (p.first.database ());
+ const package_name& name (p.first.object_id ());
+
+ // The prerequisites may not necessarily be in the map or have an
+ // action be present, but they can never be dropped.
+ //
+ auto i (map_.find (db, name));
+ if (i != map_.end ())
+ {
+ optional<build_package::action_type> a (i->second.package.action);
+
+ assert (!a || *a != build_package::drop); // See above.
+
+ if (a)
+ update (order (db, name, chain, fdb, false /* reorder */));
+ }
+ }
+
+ // We just ordered them among other prerequisites.
+ //
+ order_disfigured = false;
+ }
+ else
+ {
+ // If the package prerequisites builds are collected, then the
+ // resulting dependency list must be complete.
+ //
+ assert (p.dependencies->size () == ap->dependencies.size ());
+
+ // We are iterating in reverse so that when we iterate over the
+ // dependency list (also in reverse), prerequisites will be built in
+ // the order that is as close to the manifest as possible.
+ //
+ for (const dependency_alternatives_ex& das:
+ reverse_iterate (*p.dependencies))
+ {
+ // The specific dependency alternative must already be selected,
+ // unless this is a toolchain build-time dependency or all the
+ // alternatives are disabled in which case the alternatives list
+ // is empty.
+ //
+ if (das.empty ())
+ continue;
+
+ assert (das.size () == 1);
+
+ bool buildtime (das.buildtime);
+
+ for (const dependency& d: das.front ())
+ {
+ const package_name& n (d.name);
+
+ // Use the custom search function to find the dependency's build
+ // configuration. Failed that, search for it recursively.
+ //
+ database* ddb (fdb (db, n, buildtime));
+
+ auto i (ddb != nullptr
+ ? map_.find (*ddb, n)
+ : map_.find_dependency (db, n, buildtime));
+
+ // Note that for the repointed dependent we only order its new and
+ // potentially unamended prerequisites here (see
+ // collect_build_prerequisites() for details). Thus its
+ // (unamended) prerequisites may not necessarily be in the map or
+ // have an action be present, but they can never be dropped. Its
+ // replaced prerequisites will be ordered below.
+ //
+ if (i != map_.end ())
+ {
+ optional<build_package::action_type> a (
+ i->second.package.action);
+
+ assert (!a || *a != build_package::drop); // See above.
+
+ if (a)
+ {
+ update (order (i->first.db,
+ n,
+ chain,
+ fdb,
+ false /* reorder */));
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // Order the dependencies being disfigured.
+ //
+ if (order_disfigured)
+ {
+ for (const auto& p: sp->prerequisites)
+ {
+ database& db (p.first.database ());
+ const package_name& name (p.first.object_id ());
+
+ // The prerequisites may not necessarily be in the map.
+ //
+ auto i (map_.find (db, name));
+
+ // Note that for the repointed dependent we also order its replaced
+ // and potentially new prerequisites here (see above). The latter is
+ // redundant (we may have already ordered them above) but harmless,
+ // since we do not reorder.
+ //
+ if (i != map_.end () && disfigure (i->second.package))
+ update (order (db, name, chain, fdb, false /* reorder */));
+ }
+ }
+
+ chain.pop_back ();
+
+ return pos = insert (i, p);
+ }
+
+ build_packages::package_map::iterator build_packages::package_map::
+ find_dependency (database& db, const package_name& pn, bool buildtime)
+ {
+ iterator r (end ());
+
+ linked_databases ldbs (db.dependency_configs (pn, buildtime));
+
+ for (database& ldb: ldbs)
+ {
+ iterator i (find (ldb, pn));
+ if (i != end ())
+ {
+ if (r == end ())
+ r = i;
+ else
+ fail << "building package " << pn << " in multiple "
+ << "configurations" <<
+ info << r->first.db.get().config_orig <<
+ info << ldb.config_orig <<
+ info << "use --config-* to select package configuration";
+ }
+ }
+
+ return r;
+ }
+}
diff --git a/bpkg/pkg-build-collect.hxx b/bpkg/pkg-build-collect.hxx
new file mode 100644
index 0000000..f84c86f
--- /dev/null
+++ b/bpkg/pkg-build-collect.hxx
@@ -0,0 +1,1882 @@
+// file : bpkg/pkg-build-collect.hxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#ifndef BPKG_PKG_BUILD_COLLECT_HXX
+#define BPKG_PKG_BUILD_COLLECT_HXX
+
+#include <map>
+#include <set>
+#include <list>
+#include <forward_list>
+
+#include <bpkg/types.hxx>
+#include <bpkg/utility.hxx>
+
+#include <bpkg/package.hxx>
+#include <bpkg/diagnostics.hxx>
+
+#include <bpkg/common-options.hxx>
+#include <bpkg/pkg-build-options.hxx>
+
+#include <bpkg/database.hxx>
+#include <bpkg/pkg-configure.hxx> // find_database_function()
+#include <bpkg/package-skeleton.hxx>
+#include <bpkg/system-package-manager.hxx>
+
+namespace bpkg
+{
+ // The current configurations dependents being "repointed" to prerequisites
+ // in other configurations, together with their replacement flags. The flag
+ // is true for the replacement prerequisites ("new") and false for the
+ // prerequisites being replaced ("old"). The unamended prerequisites have no
+ // entries.
+ //
+ using repointed_dependents =
+ std::map<package_key, std::map<package_key, bool>>;
+
+ // A "dependency-ordered" list of packages and their prerequisites.
+ // That is, every package on the list only possibly depending on the
+ // ones after it. In a nutshell, the usage is as follows: we first
+ // add one or more packages (the "initial selection"; for example, a
+ // list of packages the user wants built). The list then satisfies all
+ // the prerequisites of the packages that were added, recursively. At
+ // the end of this process we have an ordered list of all the packages
+ // that we have to build, from last to first, in order to build our
+ // initial selection.
+ //
+ // This process is split into two phases: satisfaction of all the
+ // dependencies (the collect_build() function) and ordering of the list
+ // (the order() function).
+ //
+ // During the satisfaction phase, we collect all the packages, their
+ // prerequisites (and so on, recursively) in a map trying to satisfy
+ // any version constraints. Specifically, during this step, we may
+ // "upgrade" or "downgrade" a package that is already in a map as a
+ // result of another package depending on it and, for example, requiring
+ // a different version. If that happens, we make sure that the replaced
+ // package version doesn't apply constraints and/or configuration to its
+ // own dependencies anymore and also that its non-shared dependencies are
+ // gone from the map, recursively (see replaced_versions for details).
+ // One notable side-effect of this process is that all the packages in the
+ // map end up in the list.
+ //
+ // Note that we don't try to do exhaustive constraint satisfaction (i.e.,
+ // there is no backtracking). Specifically, if we have two candidate
+ // packages each satisfying a constraint of its dependent package, then if
+ // neither of them satisfy both constraints, then we give up and ask the
+ // user to resolve this manually by explicitly specifying the version that
+ // will satisfy both constraints.
+ //
+ // Also note that we rule out dependency alternatives with enable constraint
+ // that evaluates to false and try to select one satisfactory alternative if
+ // there are multiple of them. In the latter case we pick the first
+ // alternative with packages that are already used (as a result of being
+ // dependencies of other package, requested by the user, or already being
+ // present in the configuration) and fail if such an alternative doesn't
+ // exist.
+ //
+ struct build_package
+ {
+ enum action_type
+ {
+ // Available package is not NULL.
+ //
+ build,
+
+ // Selected package is not NULL, available package is NULL.
+ //
+ drop,
+
+ // Selected package is not NULL, available package is NULL.
+ //
+ // This is the "only adjustments" action for a selected package.
+ // Adjustment flags (see below) are unhold (the package should be
+ // treated as a dependency) and reconfigure (dependent package that
+ // needs to be reconfigured because its prerequisite is being
+ // up/down-graded or reconfigured).
+ //
+ // Note that this action is "replaceable" with either drop or build
+ // action but in the latter case the adjustments must be copied over.
+ //
+ adjust
+ };
+
+ // An object with an absent action is there to "pre-enter" information
+ // about a package (constraints and flags) in case it is used.
+ //
+ optional<action_type> action;
+
+ reference_wrapper<database> db; // Needs to be move-assignable.
+
+ shared_ptr<selected_package> selected; // NULL if not selected.
+ shared_ptr<available_package> available; // Can be NULL, fake/transient.
+
+ // Can be NULL (orphan) or root. If not NULL, then loaded from the
+ // repository configuration database, which may differ from the
+ // configuration the package is being built in.
+ //
+ lazy_shared_ptr<bpkg::repository_fragment> repository_fragment;
+
+ const package_name&
+ name () const
+ {
+ return selected != nullptr ? selected->name : available->id.name;
+ }
+
+ // If we end up collecting the prerequisite builds for this package, then
+ // this member stores copies of the selected dependency alternatives. The
+ // dependency alternatives for toolchain build-time dependencies and for
+ // dependencies which have all the alternatives disabled are represented
+ // as empty dependency alternatives lists. If present, it is parallel to
+ // the available package's dependencies member.
+ //
+ // Initially nullopt. Can be filled partially if the package prerequisite
+ // builds collection is postponed for any reason (see postponed_packages
+ // and postponed_configurations for possible reasons).
+ //
+ optional<bpkg::dependencies> dependencies;
+
+ // Indexes of the selected dependency alternatives stored in the above
+ // dependencies member.
+ //
+ optional<vector<size_t>> alternatives;
+
+ // If we end up collecting the prerequisite builds for this package, then
+ // this member stores the skeleton of the package being built.
+ //
+ // Initially nullopt. Can potentially be loaded but with the reflection
+ // configuration variables collected only partially if the package
+ // prerequisite builds collection is postponed for any reason. Can also be
+ // unloaded if the package has no conditional dependencies.
+ //
+ optional<package_skeleton> skeleton;
+
+ // If the package prerequisite builds collection is postponed, then this
+ // member stores the references to the enabled alternatives (in available
+ // package) of a dependency being the cause of the postponement together
+ // with their original indexes in the respective dependency alternatives
+ // list. This, in particular, allows us not to re-evaluate conditions
+ // multiple times on the re-collection attempts.
+ //
+ // Note: it shouldn't be very common for a dependency to contain more than
+ // two true alternatives.
+ //
+ using dependency_alternatives_refs =
+ small_vector<pair<reference_wrapper<const dependency_alternative>,
+ size_t>,
+ 2>;
+
+ optional<dependency_alternatives_refs> postponed_dependency_alternatives;
+
+ // True if the recursive collection of the package has been started or
+ // performed.
+ //
+ // Used by the dependency configuration negotiation machinery which makes
+ // sure that its configuration is negotiated between dependents before its
+ // recursive collection is started (see postponed_configurations for
+ // details).
+ //
+ // Note that the dependencies member cannot be used for that purpose since
+ // it is not always created (think of a system dependency or an existing
+ // dependency that doesn't need its prerequisites re-collection). In a
+ // sense the recursive collection flag is a barrier for the dependency
+ // configuration negotiation.
+ //
+ bool recursive_collection;
+
+ // Return true if the recursive collection started but has been postponed
+ // for any reason.
+ //
+ bool
+ recursive_collection_postponed () const;
+
+ // Hold flags.
+ //
+ // Note that we only "increase" the hold_package value that is already in
+ // the selected package, unless the adjust_unhold flag is set (see below).
+ //
+ optional<bool> hold_package;
+
+ // Note that it is perfectly valid for the hold_version flag to be false
+ // while the command line constraint is present in the constraints list
+ // (see below). This may happen if the package build is collected by the
+ // unsatisfied dependency constraints resolution logic (see
+ // try_replace_dependency() in pkg-build.cxx for details).
+ //
+ optional<bool> hold_version;
+
+ // Constraint value plus, normally, the dependent package name/version
+ // that placed this constraint but can also be some other name (in which
+ // case the version is absent) for the initial selection. Currently, the
+ // only valid non-package name is 'command line', which is used when the
+ // package version is constrained by the user on the command line.
+ //
+ // Note that if the dependent is a package name, then this package is
+ // expected to be collected (present in the map).
+ //
+ struct constraint_type
+ {
+ version_constraint value;
+
+ package_version_key dependent;
+
+ // False for non-packages. Otherwise, indicates whether the constraint
+ // comes from the selected dependent or not.
+ //
+ bool selected_dependent;
+
+ // Create constraint for a package dependent.
+ //
+ constraint_type (version_constraint v,
+ database& db,
+ package_name nm,
+ version ver,
+ bool s)
+ : value (move (v)),
+ dependent (db, move (nm), move (ver)),
+ selected_dependent (s) {}
+
+ // Create constraint for a non-package dependent.
+ //
+ constraint_type (version_constraint v, database& db, string nm)
+ : value (move (v)),
+ dependent (db, move (nm)),
+ selected_dependent (false) {}
+ };
+
+ vector<constraint_type> constraints;
+
+ // System package indicator. See also a note in the merge() function.
+ //
+ bool system;
+
+ // Return the system/distribution package status if this is a system
+ // package (re-)configuration and the package is being managed by the
+ // system package manager (as opposed to user/fallback). Otherwise, return
+ // NULL (so can be used as bool).
+ //
+ // Note on terminology: We call the bpkg package that is being configured
+ // as available from the system as "system package" and we call the
+ // underlying package managed by the system/distribution package manager
+ // as "system/distribution package". See system-package-manager.hxx for
+ // background.
+ //
+ const system_package_status*
+ system_status () const;
+
+ // As above but only return the status if the package needs to be
+ // installed.
+ //
+ const system_package_status*
+ system_install () const;
+
+ // If this flag is set and the external package is being replaced with an
+ // external one, then keep its output directory between upgrades and
+ // downgrades.
+ //
+ bool keep_out;
+
+ // If this flag is set then disfigure the package between upgrades and
+ // downgrades effectively causing a from-scratch reconfiguration.
+ //
+ bool disfigure;
+
+ // If this flag is set, then don't build this package, only configure.
+ //
+ // Note: use configure_only() to query.
+ //
+ bool configure_only_;
+
+ // If present, then check out the package into the specified directory
+ // rather than into the configuration directory, if it comes from a
+ // version control-based repository. Optionally, remove this directory
+ // when the package is purged.
+ //
+ optional<dir_path> checkout_root;
+ bool checkout_purge;
+
+ // Command line configuration variables. Only meaningful for non-system
+ // packages.
+ //
+ strings config_vars;
+
+ // If present, then the package is requested to be upgraded (true) or
+ // patched (false). Can only be present if the package is already
+ // selected. Can only be false if the selected package version is
+ // patchable. Used by the unsatisfied dependency constraints resolution
+ // logic (see try_replace_dependency() in pkg-build.cxx for details).
+ //
+ optional<bool> upgrade;
+
+ // If true, then this package is requested to be deorphaned. Can only be
+ // true if the package is already selected and is orphaned. Used by the
+ // unsatisfied dependency constraints resolution logic (see
+ // try_replace_dependency() in pkg-build.cxx for details).
+ //
+ bool deorphan;
+
+ // Set of packages (dependents or dependencies but not a mix) that caused
+ // this package to be built or adjusted. The 'command line' name signifies
+ // user selection and can be present regardless of the
+ // required_by_dependents flag value.
+ //
+ // Note that if this is a package name, then this package is expected to
+ // be collected (present in the map), potentially just pre-entered if
+ // required_by_dependents is false. If required_by_dependents is true,
+ // then the packages in the set are all expected to be collected as builds
+ // (action is build, available is not NULL, etc).
+ //
+ // Also note that if required_by_dependents is true, then all the
+ // dependent package versions in the required_by set are expected to be
+ // known (the version members are not empty). Otherwise (the required_by
+ // set contains dependencies), since it's not always easy to deduce the
+ // dependency versions at the time of collecting the dependent build (see
+ // collect_repointed_dependents() implementation for details), the
+ // dependency package versions are expected to all be unknown.
+ //
+ std::set<package_version_key> required_by;
+
+ // If this flag is true, then required_by contains dependents.
+ //
+ // We need this because required_by packages have different semantics for
+ // different actions: the dependent for regular builds and dependency for
+ // adjustments and repointed dependent reconfiguration builds. Mixing them
+ // would break prompts/diagnostics.
+ //
+ bool required_by_dependents;
+
+ // Consider a package as user-selected if it is specified on the command
+ // line, is a held package being upgraded via the `pkg-build -u|-p`
+ // command form, or is a dependency being upgraded via the recursively
+ // upgraded dependent.
+ //
+ bool
+ user_selection () const;
+
+ // Consider a package as user-selected only if it is specified on the
+ // command line as build to hold.
+ //
+ bool
+ user_selection (const vector<build_package>& hold_pkgs) const;
+
+ // Return true if the configured package needs to be recollected
+ // recursively.
+ //
+ // This is required if it is being built as a source package and needs to
+ // be up/down-graded and/or reconfigured and has some buildfile clauses,
+ // it is a repointed dependent, or it is already in the process of being
+ // collected. Also configured dependents can be scheduled for recollection
+ // explicitly (see postponed_packages and build_recollect flag for
+ // details).
+ //
+ bool
+ recollect_recursively (const repointed_dependents&) const;
+
+ // State flags.
+ //
+ uint16_t flags;
+
+ // Set if we also need to clear the hold package flag.
+ //
+ static const uint16_t adjust_unhold = 0x0001;
+
+ bool
+ unhold () const
+ {
+ return (flags & adjust_unhold) != 0;
+ }
+
+ // Set if we also need to reconfigure this package. Note that in some
+ // cases reconfigure is naturally implied. For example, if an already
+ // configured package is being up/down-graded. For such cases we don't
+ // guarantee that the reconfigure flag is set. We only make sure to set it
+ // for cases that would otherwise miss the need for reconfiguration. As a
+ // result, use the reconfigure() predicate which detects both explicit and
+ // implied cases.
+ //
+ // At first, it may seem that this flag is redundant and having the
+ // available package set to NULL is sufficient. But consider the case
+ // where the user asked us to build a package that is already in the
+ // configured state (so all we have to do is pkg-update). Next, add to
+ // this a prerequisite package that is being upgraded. Now our original
+ // package has to be reconfigured. But without this flag we won't know
+ // (available for our package won't be NULL).
+ //
+ static const uint16_t adjust_reconfigure = 0x0002;
+
+ bool
+ reconfigure () const;
+
+ // Set if this build action is for repointing of prerequisite.
+ //
+ static const uint16_t build_repoint = 0x0004;
+
+ // Set if this build action is for re-evaluating of an existing dependent.
+ //
+ static const uint16_t build_reevaluate = 0x0008;
+
+ // Set if this build action is for recursive re-collecting of an existing
+ // dependent due to deviation, detecting merge configuration cycle, etc.
+ //
+ static const uint16_t build_recollect = 0x0010;
+
+ // Set if this build action is for replacing of an existing package due to
+ // deorphaning or rebuilding as an archive or directory.
+ //
+ // Note that to replace a package we need to re-fetch it from an existing
+ // repository fragment, archive, or directory (even if its version doesn't
+ // change).
+ //
+ static const uint16_t build_replace = 0x0020;
+
+ bool
+ replace () const
+ {
+ return (flags & build_replace) != 0;
+ }
+
+ bool
+ configure_only () const;
+
+ // Return true if the resulting package will be configured as external.
+ // Optionally, if the package is external, return its absolute and
+ // normalized source root directory path.
+ //
+ bool
+ external (dir_path* = nullptr) const;
+
+ // If the resulting package will be configured as external, then return
+ // its absolute and normalized source root directory path and nullopt
+ // otherwise.
+ //
+ optional<dir_path>
+ external_dir () const
+ {
+ dir_path r;
+ return external (&r) ? optional<dir_path> (move (r)) : nullopt;
+ }
+
+ const version&
+ available_version () const;
+
+ string
+ available_name_version () const
+ {
+ assert (available != nullptr);
+ return package_string (available->id.name, available_version (), system);
+ }
+
+ string
+ available_name_version_db () const;
+
+ // Merge constraints, required-by package names, hold_* flags, state
+ // flags, and user-specified options/variables.
+ //
+ void
+ merge (build_package&&);
+
+ // Initialize the skeleton of a being built package.
+ //
+ package_skeleton&
+ init_skeleton (const common_options&,
+ bool load_old_dependent_config = true,
+ const shared_ptr<available_package>& override = nullptr);
+ };
+
+ using build_package_list = std::list<reference_wrapper<build_package>>;
+
+ using build_package_refs =
+ small_vector<reference_wrapper<const build_package>, 16>;
+
+ // Packages with postponed prerequisites collection, for one of the
+ // following reasons:
+ //
+ // - Postponed due to the inability to find a dependency version satisfying
+ // the pre-entered constraint from repositories available to this
+ // package. The idea is that this constraint could still be satisfied from
+ // a repository fragment of some other package (that we haven't processed
+ // yet) that also depends on this prerequisite.
+ //
+ // - Postponed due to the inability to choose between two dependency
+ // alternatives, both having dependency packages which are not yet
+ // selected in the configuration nor being built. The idea is that this
+ // ambiguity could still be resolved after some of those dependency
+ // packages get built via some other dependents.
+ //
+ // - Postponed recollection of configured dependents whose dependencies
+ // up/downgrade causes selection of different dependency alternatives.
+ // This, in particular, may end up in resolving different dependency
+ // packages and affect the dependent and dependency configurations.
+ //
+ // - Postponed recollection of configured dependents for resolving merge
+ // configuration cycles and as a fallback for missed re-evaluations due to
+ // the shadow-based configuration clusters merge (see
+ // collect_build_prerequisites() for details).
+ //
+ // For the sake of testing, make sure the order in the set is stable.
+ //
+ struct compare_build_package
+ {
+ bool
+ operator() (const build_package* x, const build_package* y) const
+ {
+ const package_name& nx (x->name ());
+ const package_name& ny (y->name ());
+
+ if (int d = nx.compare (ny))
+ return d < 0;
+
+ return x->db.get () < y->db.get ();
+ }
+ };
+ using postponed_packages = std::set<build_package*, compare_build_package>;
+
+ // Base for exception types that indicate an inability to collect a package
+ // build because it was collected prematurely (version needs to be replaced,
+ // configuration requires further negotiation, etc).
+ //
+ struct scratch_collection
+ {
+ // Only used for tracing.
+ //
+ const char* description;
+ const package_key* package = nullptr; // Could be NULL.
+
+ explicit
+ scratch_collection (const char* d): description (d) {}
+ };
+
+ // Map of dependency packages whose recursive processing should be postponed
+ // because they have dependents with configuration clauses.
+ //
+ // Note that dependents of such a package that don't have any configuration
+ // clauses are processed right away (since the negotiated configuration may
+ // not affect them) while those that do are postponed in the same way as
+ // those with dependency alternatives (see above).
+ //
+ // Note that the latter kind of dependent is what eventually causes
+ // recursive processing of the dependency packages. Which means we must
+ // watch out for bogus entries in this map which we may still end up with
+ // (e.g., because postponement caused cross-talk between dependency
+ // alternatives). Thus we keep flags that indicate whether we have seen each
+ // type of dependent and then just process dependencies that have the first
+ // (without config) but not the second (with config).
+ //
+ // Note that if any of these flags is set to true, then the dependency is
+ // expected to be collected (present in the build_packages's map; see below
+ // for the class definition).
+ //
+ struct postponed_dependency
+ {
+ bool wout_config; // Has dependent without config.
+ bool with_config; // Has dependent with config.
+
+ postponed_dependency (bool woc, bool wic)
+ : wout_config (woc),
+ with_config (wic) {}
+
+ bool
+ bogus () const {return wout_config && !with_config;}
+ };
+
+ class postponed_dependencies: public std::map<package_key,
+ postponed_dependency>
+ {
+ public:
+ bool
+ has_bogus () const
+ {
+ for (const auto& pd: *this)
+ {
+ if (pd.second.bogus ())
+ return true;
+ }
+ return false;
+ }
+
+ // Erase the bogus postponements and throw cancel_postponement, if any.
+ //
+ struct cancel_postponement: scratch_collection
+ {
+ cancel_postponement ()
+ : scratch_collection (
+ "bogus dependency collection postponement cancellation") {}
+ };
+
+ void
+ cancel_bogus (tracer& trace)
+ {
+ bool bogus (false);
+ for (auto i (begin ()); i != end (); )
+ {
+ const postponed_dependency& d (i->second);
+
+ if (d.bogus ())
+ {
+ bogus = true;
+
+ l5 ([&]{trace << "erase bogus postponement " << i->first;});
+
+ i = erase (i);
+ }
+ else
+ ++i;
+ }
+
+ if (bogus)
+ {
+ l5 ([&]{trace << "bogus postponements erased, throwing";});
+ throw cancel_postponement ();
+ }
+ }
+ };
+
+ // Map of the dependencies whose recursive collection is postponed until
+ // their existing dependents re-collection/re-evaluation to the lists of the
+ // respective existing dependents (see collect_build_prerequisites() for
+ // details).
+ //
+ using postponed_existing_dependencies = std::map<package_key,
+ vector<package_key>>;
+
+ // Set of dependency alternatives which were found unacceptable by the
+ // configuration negotiation machinery and need to be ignored on re-
+ // collection.
+ //
+ // Note that while negotiating the dependency alternative configuration for
+ // a dependent it may turn out that the configuration required by other
+ // dependents is not acceptable for this dependent. It can also happen that
+ // this dependent is involved in a negotiation cycle when two dependents
+ // continuously overwrite each other's configuration during re-negotiation.
+ // Both situations end up with the failure, unless the dependent has some
+ // other reused dependency alternative which can be tried instead. In the
+ // latter case, we note the problematic alternative and re-collect from
+ // scratch. On re-collection the unacceptable alternatives are ignored,
+ // similar to the disabled alternatives.
+ //
+ struct unacceptable_alternative
+ {
+ package_key package;
+ bpkg::version version;
+ pair<size_t, size_t> position; // depends + alternative (1-based)
+
+ unacceptable_alternative (package_key pkg,
+ bpkg::version ver,
+ pair<size_t, size_t> pos)
+ : package (move (pkg)), version (move (ver)), position (pos) {}
+
+ bool
+ operator< (const unacceptable_alternative& v) const
+ {
+ if (package != v.package)
+ return package < v.package;
+
+ if (int r = version.compare (v.version))
+ return r < 0;
+
+ return position < v.position;
+ }
+ };
+
+ using unacceptable_alternatives = std::set<unacceptable_alternative>;
+
+ struct unaccept_alternative: scratch_collection
+ {
+ unaccept_alternative (): scratch_collection ("unacceptable alternative") {}
+ };
+
+ // Map of packages which need to be re-collected with the different version
+ // and/or system flag or dropped.
+ //
+ // Note that the initial package version may be adjusted to satisfy
+ // constraints of dependents discovered during the packages collection. It
+ // may also be dropped if this is a dependency which turns out to be unused.
+ // However, it may not always be possible to perform such an adjustment
+ // in-place since the intermediate package version could already apply some
+ // constraints and/or configuration to its own dependencies. Thus, we may
+ // need to note the desired package version information and re-collect from
+ // scratch.
+ //
+ // Also note that during re-collection such a desired version may turn out
+ // to not be a final version and the adjustment/re-collection can repeat.
+ //
+ // And yet, it doesn't seem plausible to ever create a replacement for the
+ // drop: replacing one drop with another is meaningless (all drops are the
+ // same) and replacing the package drop with a package version build can
+ // always been handled in-place.
+ //
+ // On the first glance, the map entries which have not been used for
+ // replacement during the package collection (bogus entries) are harmless
+ // and can be ignored. However, the dependency configuration negotiation
+ // machinery refers to this map and skips existing dependents with
+ // configuration clause which belong to it (see query_existing_dependents()
+ // for details). Thus, if after collection of packages some bogus entries
+ // are present in the map, then it means that we could have erroneously
+ // skipped some existing dependents because of them and so need to erase
+ // these entries and re-collect.
+ //
+ struct replaced_version
+ {
+ // Desired package version, repository fragment, and system flag.
+ //
+ // Both are NULL for the replacement with the drop.
+ //
+ shared_ptr<available_package> available;
+ lazy_shared_ptr<bpkg::repository_fragment> repository_fragment;
+ bool system; // Meaningless for the drop.
+
+ // True if the entry has been inserted or used for the replacement during
+ // the current (re-)collection iteration. Used to keep track of "bogus"
+ // (no longer relevant) entries.
+ //
+ bool replaced;
+
+ // Create replacement with the different version.
+ //
+ replaced_version (shared_ptr<available_package> a,
+ lazy_shared_ptr<bpkg::repository_fragment> f,
+ bool s)
+ : available (move (a)),
+ repository_fragment (move (f)),
+ system (s),
+ replaced (true) {}
+
+ // Create replacement with the drop.
+ //
+ replaced_version (): system (false), replaced (true) {}
+ };
+
+ class replaced_versions: public std::map<package_key, replaced_version>
+ {
+ public:
+ // Erase the bogus replacements and, if any, throw cancel_replacement, if
+ // requested.
+ //
+ struct cancel_replacement: scratch_collection
+ {
+ cancel_replacement ()
+ : scratch_collection ("bogus version replacement cancellation") {}
+ };
+
+ void
+ cancel_bogus (tracer&, bool scratch);
+ };
+
+
+ // Dependents with their unsatisfactory dependencies and the respective
+ // ignored constraints.
+ //
+ // Note that during the collecting of all the explicitly specified packages
+ // and their dependencies for the build, we may discover that a being
+ // up/downgraded dependency doesn't satisfy all the being reconfigured,
+ // up/downgraded, or newly built dependents. Rather than fail immediately in
+ // such a case, we postpone the failure, add the unsatisfied dependents and
+ // their respective constraints to the unsatisfied dependents list, and
+ // continue the collection/ordering in the hope that these problems will be
+ // resolved naturally as a result of the requested recollection from scratch
+ // or execution plan refinement (dependents will also be up/downgraded or
+ // dropped, dependencies will be up/downgraded to a different versions,
+ // etc).
+ //
+ // Also note that after collecting/ordering of all the explicitly specified
+ // packages and their dependencies for the build we also collect/order their
+ // existing dependents for reconfiguration, recursively. It may happen that
+ // some of the up/downgraded dependencies don't satisfy the version
+ // constraints which some of the existing dependents impose on them. Rather
+ // than fail immediately in such a case, we postpone the failure, add this
+ // dependent and the unsatisfactory dependency to the unsatisfied dependents
+ // list, and continue the collection/ordering in the hope that these
+ // problems will be resolved naturally as a result of the execution plan
+ // refinement.
+ //
+ // And yet, if these problems do not resolve naturally, then we still try to
+ // resolve them by finding dependency versions which satisfy all the imposed
+ // constraints.
+ //
+ // Specifically, we cache such unsatisfied dependents/constraints, pretend
+ // that the dependents don't impose them and proceed with the remaining
+ // collecting/ordering, simulating the plan execution, and evaluating the
+ // dependency versions. After that, if scratch_collection exception has not
+ // been thrown, we check if the execution plan is finalized or a further
+ // refinement is required. In the latter case we drop the cache and proceed
+ // with the next iteration of the execution plan refinement which may
+ // resolve these problems naturally. Otherwise, we pick the first collected
+ // unsatisfactory dependency and try to find the best available version,
+ // considering all the constraints imposed by the user (explicit version
+ // constraint, --patch and/or --deorphan options, etc) as well as by its new
+ // and existing dependents. If the search succeeds, we update an existing
+ // package spec or add the new one to the command line and recollect from
+ // the very beginning. Note that we always add a new spec with the
+ // hold_version flag set to false. If the search fails, then, similarily, we
+ // try to find the replacement for some of the dependency's dependents,
+ // recursively. Note that we track the package build replacements and never
+ // repeat a replacement for the same command line state (which we adjust for
+ // each replacement). If no replacement is deduced, then we roll back the
+ // latest command line adjustment and recollect from the very beginning. If
+ // there are no adjustments left to try, then we give up the resolution
+ // search and report the first encountered unsatisfied (and ignored)
+ // dependency constraint and fail.
+ //
+ // Note that while we are trying to pick a dependent replacement for the
+ // subsequent re-collection, we cannot easily detect if the replacement is
+ // satisfied with the currently collected dependencies since that would
+ // effectively require to collect the replacement (select dependency
+ // alternatives, potentially re-negotiate dependency configurations,
+ // etc). Thus, we only verify that the replacement version satisfies its
+ // currently collected dependents. To reduce the number of potential
+ // dependent replacements to consider, we apply the heuristics and only
+ // consider those dependents which have or may have some satisfaction
+ // problems (not satisfied with a collected dependency, apply a dependency
+ // constraint which is incompatible with other dependents, etc; see
+ // try_replace_dependent() for details).
+ //
+ struct unsatisfied_constraint
+ {
+ // Note: also contains the unsatisfied dependent information.
+ //
+ build_package::constraint_type constraint;
+
+ // Available package version which satisfies the above constraint.
+ //
+ version available_version;
+ bool available_system;
+ };
+
+ struct ignored_constraint
+ {
+ package_key dependency;
+ version_constraint constraint;
+
+ // Only specified when the failure is postponed during the collection of
+ // the explicitly specified packages and their dependencies.
+ //
+ vector<unsatisfied_constraint> unsatisfied_constraints;
+ vector<package_key> dependency_chain;
+
+ ignored_constraint (const package_key& d,
+ const version_constraint& c,
+ vector<unsatisfied_constraint>&& ucs = {},
+ vector<package_key>&& dc = {})
+ : dependency (d),
+ constraint (c),
+ unsatisfied_constraints (move (ucs)),
+ dependency_chain (move (dc)) {}
+ };
+
+ struct unsatisfied_dependent
+ {
+ package_key dependent;
+ vector<ignored_constraint> ignored_constraints;
+ };
+
+ struct build_packages;
+
+ class unsatisfied_dependents: public vector<unsatisfied_dependent>
+ {
+ public:
+ // Add a dependent together with the ignored dependency constraint and,
+ // potentially, with the unsatisfied constraints and the dependency chain.
+ //
+ void
+ add (const package_key& dependent,
+ const package_key& dependency,
+ const version_constraint&,
+ vector<unsatisfied_constraint>&& ucs = {},
+ vector<package_key>&& dc = {});
+
+ // Try to find the dependent entry and return NULL if not found.
+ //
+ unsatisfied_dependent*
+ find_dependent (const package_key&);
+
+ // Issue the diagnostics for the first unsatisfied (and ignored)
+ // dependency constraint and throw failed.
+ //
+ [[noreturn]] void
+ diag (const build_packages&);
+ };
+
+ // List of dependency groups whose recursive processing should be postponed
+ // due to dependents with configuration clauses, together with these
+ // dependents (we will call them package clusters).
+ //
+ // The idea is that configuration for the dependencies in the cluster needs
+ // to be negotiated between the dependents in the cluster. Note that at any
+ // given time during collection a dependency can only belong to a single
+ // cluster. For example, the following dependent/dependencies with
+ // configuration clauses:
+ //
+ // foo: depends: libfoo
+ // bar: depends: libfoo
+ // depends: libbar
+ // baz: depends: libbaz
+ //
+ // End up in the following clusters (see string() below for the cluster
+ // representation):
+ //
+ // {foo bar | libfoo->{foo/1,1 bar/1,1}}
+ // {bar | libbar->{bar/2,1}}
+ // {baz | libbaz->{baz/1,1}}
+ //
+ // Or, another example:
+ //
+ // foo: depends: libfoo
+ // bar: depends: libfoo libbar
+ // baz: depends: libbaz
+ //
+ // {foo bar | libfoo->{foo/1,1 bar/1,1} libbar->{bar/1,1}}
+ // {baz | libbaz->{baz/1,1}}
+ //
+ // Note that a dependent can belong to any given non-negotiated cluster with
+ // only one `depends` position. However, if some dependency configuration is
+ // up-negotiated for a dependent, then multiple `depends` positions will
+ // correspond to this dependent in the same cluster. Naturally, such
+ // clusters are always (being) negotiated.
+ //
+ // Note that adding new dependent/dependencies to the postponed
+ // configurations can result in merging some of the existing clusters if the
+ // dependencies being added intersect with multiple clusters. For example,
+ // adding:
+ //
+ // fox: depends: libbar libbaz
+ //
+ // to the clusters in the second example will merge them into a single
+ // cluster:
+ //
+ // {foo bar baz fox | libfoo->{foo/1,1 bar/1,1} libbar->{bar/1,1 fox/1,1}
+ // libbaz->{baz/1,1 fox/1,1}}
+ //
+ // Also note that we keep track of packages which turn out to be
+ // dependencies of existing (configured) dependents with configuration
+ // clauses. The recursive processing of such packages should be postponed
+ // until negotiation between all the existing and new dependents which may
+ // or may not be present.
+ //
+ class postponed_configuration
+ {
+ public:
+ // The id of the cluster plus the ids of all the clusters that have been
+ // merged into it directly or as their components.
+ //
+ size_t id;
+ small_vector<size_t, 1> merged_ids;
+
+ using packages = small_vector<package_key, 1>;
+
+ class dependency: public packages
+ {
+ public:
+ pair<size_t, size_t> position; // depends + alternative (1-based)
+
+ // If true, then another dependency alternative is present and that can
+ // potentially be considered instead of this one (see
+ // unacceptable_alternatives for details).
+ //
+ // Initially nullopt for existing dependents until they are re-evaluated.
+ //
+ optional<bool> has_alternative;
+
+ dependency (const pair<size_t, size_t>& pos,
+ packages deps,
+ optional<bool> ha)
+ : packages (move (deps)), position (pos), has_alternative (ha) {}
+ };
+
+ class dependent_info
+ {
+ public:
+ bool existing;
+ small_vector<dependency, 1> dependencies;
+
+ dependency*
+ find_dependency (pair<size_t, size_t> pos);
+
+ void
+ add (dependency&&);
+ };
+
+ using dependents_map = std::map<package_key, dependent_info>;
+
+ dependents_map dependents;
+ packages dependencies;
+
+ // Dependency configuration.
+ //
+ // Note that this container may not yet contain some entries that are
+ // already in the dependencies member above. And it may already contain
+ // entries that are not yet in dependencies due to the retry_configuration
+ // logic.
+ //
+ package_configurations dependency_configurations;
+
+ // Shadow clusters.
+ //
+ // See the collect lambda in collect_build_prerequisites() for details.
+ //
+ using positions = small_vector<pair<size_t, size_t>, 1>;
+ using shadow_dependents_map = std::map<package_key, positions>;
+
+ shadow_dependents_map shadow_cluster;
+
+ // Absent -- not negotiated yet, false -- being negotiated, true -- has
+ // been negotiated.
+ //
+ optional<bool> negotiated;
+
+ // The depth of the negotiating recursion (see collect_build_postponed()
+ // for details).
+ //
+ // Note that non-zero depth for an absent negotiated member indicates that
+ // the cluster is in the existing dependents re-evaluation or
+ // configuration refinment phases.
+ //
+ size_t depth = 0;
+
+ // Add dependencies of a new dependent.
+ //
+ postponed_configuration (size_t i,
+ package_key&& dependent,
+ bool existing,
+ pair<size_t, size_t> position,
+ packages&& deps,
+ optional<bool> has_alternative)
+ : id (i)
+ {
+ add (move (dependent),
+ existing,
+ position,
+ move (deps),
+ has_alternative);
+ }
+
+ // Add dependency of an existing dependent.
+ //
+ postponed_configuration (size_t i,
+ package_key&& dependent,
+ pair<size_t, size_t> position,
+ package_key&& dep)
+ : id (i)
+ {
+ add (move (dependent),
+ true /* existing */,
+ position,
+ packages ({move (dep)}),
+ nullopt /* has_alternative */);
+ }
+
+ // Add dependencies of a dependent.
+ //
+ // Note: adds the specified dependencies to the end of the configuration
+ // dependencies list suppressing duplicates.
+ //
+ void
+ add (package_key&& dependent,
+ bool existing,
+ pair<size_t, size_t> position,
+ packages&& deps,
+ optional<bool> has_alternative);
+
+ // Return true if any of the configuration's dependents depend on the
+ // specified package.
+ //
+ bool
+ contains_dependency (const package_key& d) const
+ {
+ return find (dependencies.begin (), dependencies.end (), d) !=
+ dependencies.end ();
+ }
+
+ // Return true if this configuration contains any of the specified
+ // dependencies.
+ //
+ bool
+ contains_dependency (const packages&) const;
+
+ // Return true if this and specified configurations contain any common
+ // dependencies.
+ //
+ bool
+ contains_dependency (const postponed_configuration&) const;
+
+ // Notes:
+ //
+ // - Adds dependencies of the being merged from configuration to the end
+ // of the current configuration dependencies list suppressing
+ // duplicates.
+ //
+ // - Doesn't change the negotiate member of this configuration.
+ //
+ void
+ merge (postponed_configuration&&);
+
+ void
+ set_shadow_cluster (postponed_configuration&&);
+
+ bool
+ is_shadow_cluster (const postponed_configuration&);
+
+ bool
+ contains_in_shadow_cluster (package_key dependent,
+ pair<size_t, size_t> pos) const;
+
+ // Return the postponed configuration string representation in the form:
+ //
+ // {<dependent>[ <dependent>]* | <dependency>[ <dependency>]*}['!'|'?']
+ //
+ // <dependent> = <package>['^']
+ // <dependency> = <package>->{<dependent>/<position>[ <dependent>/<position>]*}
+ //
+ // The potential trailing '!' or '?' of the configuration representation
+ // indicates that the configuration is negotiated or is being negotiated,
+ // respectively.
+ //
+ // '^' character that may follow a dependent indicates that this is an
+ // existing dependent.
+ //
+ // <position> = <depends-index>','<alternative-index>
+ //
+ // <depends-index> and <alternative-index> are the 1-based serial numbers
+ // of the respective depends value and the dependency alternative in the
+ // dependent's manifest.
+ //
+ // See package_key for details on <package>.
+ //
+ // For example:
+ //
+ // {foo^ bar | libfoo->{foo/2,3 bar/1,1} libbar->{bar/1,1}}!
+ //
+ std::string
+ string () const;
+
+ private:
+ // Add the specified packages to the end of the dependencies list
+ // suppressing duplicates.
+ //
+ void
+ add_dependencies (packages&&);
+
+ void
+ add_dependencies (const packages&);
+ };
+
+ inline ostream&
+ operator<< (ostream& os, const postponed_configuration& c)
+ {
+ return os << c.string ();
+ }
+
+ // Note that we could be adding new/merging existing entries while
+ // processing an entry. Thus we use a list.
+ //
+ class postponed_configurations:
+ public std::forward_list<postponed_configuration>
+ {
+ public:
+ // Return the configuration the dependent is added to (after all the
+ // potential configuration merges, etc).
+ //
+ // Also return in second absent if the merge happened due to the shadow
+ // cluster logic (in which case the cluster was/is being negotiated),
+ // false if any non-negotiated or being negotiated clusters has been
+ // merged in, and true otherwise.
+ //
+ // If some configurations needs to be merged and this involves the (being)
+ // negotiated configurations, then merge into the outermost-depth
+ // negotiated configuration (with minimum non-zero depth).
+ //
+ pair<postponed_configuration&, optional<bool>>
+ add (package_key dependent,
+ bool existing,
+ pair<size_t, size_t> position,
+ postponed_configuration::packages dependencies,
+ optional<bool> has_alternative);
+
+ // Add new postponed configuration cluster with a single dependency of an
+ // existing dependent.
+ //
+ // Note that it's the caller's responsibility to make sure that the
+ // dependency doesn't already belong to any existing cluster.
+ //
+ void
+ add (package_key dependent,
+ pair<size_t, size_t> position,
+ package_key dependency);
+
+ postponed_configuration*
+ find (size_t id);
+
+ // Return address of the cluster the dependency belongs to and NULL if it
+ // doesn't belong to any cluster.
+ //
+ const postponed_configuration*
+ find_dependency (const package_key& d) const;
+
+ // Return true if all the configurations have been negotiated.
+ //
+ bool
+ negotiated () const;
+
+ // Translate index to iterator and return the referenced configuration.
+ //
+ postponed_configuration&
+ operator[] (size_t);
+
+ size_t
+ size () const;
+
+ private:
+ size_t next_id_ = 1;
+ };
+
+ struct build_packages: build_package_list
+ {
+ build_packages () = default;
+
+ // Copy-constructible and move-assignable (used for snapshoting).
+ //
+ build_packages (const build_packages&);
+
+ build_packages (build_packages&&) = delete;
+
+ build_packages& operator= (const build_packages&) = delete;
+
+ build_packages&
+ operator= (build_packages&&) noexcept (false);
+
+ // Pre-enter a build_package without an action. No entry for this package
+ // may already exists.
+ //
+ void
+ enter (package_name, build_package);
+
+ // Return the package pointer if it is already in the map and NULL
+ // otherwise (so can be used as bool).
+ //
+ build_package*
+ entered_build (database& db, const package_name& name)
+ {
+ auto i (map_.find (db, name));
+ return i != map_.end () ? &i->second.package : nullptr;
+ }
+
+ build_package*
+ entered_build (const package_key& p)
+ {
+ return entered_build (p.db, p.name);
+ }
+
+ const build_package*
+ entered_build (database& db, const package_name& name) const
+ {
+ auto i (map_.find (db, name));
+ return i != map_.end () ? &i->second.package : nullptr;
+ }
+
+ const build_package*
+ entered_build (const package_key& p) const
+ {
+ return entered_build (p.db, p.name);
+ }
+
+ // Return NULL if the dependent in the constraint is not a package name
+ // (command line, etc; see build_package::constraint_type for details).
+ // Otherwise, return the dependent package build which is expected to be
+ // collected.
+ //
+ const build_package*
+ dependent_build (const build_package::constraint_type&) const;
+
+ // Collect the package being built. Return its pointer if this package
+ // version was, in fact, added to the map and NULL if it was already there
+ // and the existing version was preferred or if the package build has been
+ // replaced with the drop. So can be used as bool.
+ //
+ // Consult replaced_vers for an existing version replacement entry and
+ // follow it, if present, potentially collecting the package drop instead.
+ // Ignore the entry if its version doesn't satisfy the specified
+ // dependency constraints or the entry is a package drop and the specified
+ // required-by package names have the "required by dependents" semantics.
+ // In this case it's likely that this replacement will be applied for some
+ // later collect_build() call but can potentially turn out bogus. Note
+ // that a version replacement for a specific package may only be applied
+ // once during the collection iteration.
+ //
+ // Add entry to replaced_vers and throw replace_version if the
+ // existing version needs to be replaced but the new version cannot be
+ // re-collected recursively in-place (see replaced_versions for details).
+ //
+ // Optionally, pass the function which verifies the chosen package
+ // version. It is called before replace_version is potentially thrown or
+ // the recursive collection is performed. The scratch argument is true if
+ // the package version needs to be replaced but in-place replacement is
+ // not possible (see replaced_versions for details).
+ //
+ // Also, in the recursive mode (find database function is not NULL):
+ //
+ // - Use the custom search function to find the package dependency
+ // databases.
+ //
+ // - For the repointed dependents collect the prerequisite replacements
+ // rather than prerequisites being replaced.
+ //
+ // - Call add_priv_cfg_function callback for the created private
+ // configurations.
+ //
+ // Note that postponed_* arguments must all be either specified or not.
+ // The dep_chain argument can be specified in the non-recursive mode (for
+ // the sake of the diagnostics) and must be specified in the recursive
+ // mode.
+ //
+ struct replace_version: scratch_collection
+ {
+ replace_version (): scratch_collection ("package version replacement") {}
+ };
+
+ using add_priv_cfg_function = void (database&, dir_path&&);
+
+ using verify_package_build_function = void (const build_package&,
+ bool scratch);
+
+ build_package*
+ collect_build (const pkg_build_options&,
+ build_package,
+ replaced_versions&,
+ postponed_configurations&,
+ unsatisfied_dependents&,
+ build_package_refs* dep_chain = nullptr,
+ const function<find_database_function>& = nullptr,
+ const function<add_priv_cfg_function>& = nullptr,
+ const repointed_dependents* = nullptr,
+ postponed_packages* postponed_repo = nullptr,
+ postponed_packages* postponed_alts = nullptr,
+ postponed_packages* postponed_recs = nullptr,
+ postponed_existing_dependencies* = nullptr,
+ postponed_dependencies* = nullptr,
+ unacceptable_alternatives* = nullptr,
+ const function<verify_package_build_function>& = nullptr);
+
+ // Collect prerequisites of the package being built recursively. Return
+ // nullopt, unless in the pre-reevaluation mode (see below).
+ //
+ // But first "prune" this process if the package we build is a system one
+ // or is already configured, since that would mean all its prerequisites
+ // are configured as well. Note that this is not merely an optimization:
+ // the package could be an orphan in which case the below logic will fail
+ // (no repository fragment in which to search for prerequisites). By
+ // skipping the prerequisite check we are able to gracefully handle
+ // configured orphans.
+ //
+ // There are, however, some cases when we still need to re-collect
+ // prerequisites of a configured package:
+ //
+ // - For the repointed dependent we still need to collect its prerequisite
+ // replacements to make sure its dependency constraints are satisfied.
+ //
+ // - If configuration variables are specified for the dependent which has
+ // any buildfile clauses in the dependencies, then we need to
+ // re-evaluate them. This can result in a different set of dependencies
+ // required by this dependent (due to conditional dependencies, etc)
+ // and, potentially, for its reconfigured existing prerequisites,
+ // recursively.
+ //
+ // - For an existing dependent being re-evaluated to the specific
+ // dependency position (reeval_pos argument is specified and is not
+ // {0,0}).
+ //
+ // - For an existing dependent being pre-reevaluated (reeval_pos argument
+ // is {0,0}).
+ //
+ // - For an existing dependent being re-collected due to the selected
+ // dependency alternatives deviation, etc which may be caused by its
+ // dependency up/downgrade (see postponed_packages and
+ // build_package::build_recollect flag for details).
+ //
+ // Note that for these cases, as it was said above, we can potentially
+ // fail if the dependent is an orphan, but this is exactly what we need to
+ // do in that case, since we won't be able to re-collect its dependencies.
+ //
+ // Only a single true dependency alternative can be selected per function
+ // call, unless we are (pre-)re-evaluating. Such an alternative can only
+ // be selected if its index in the postponed alternatives list is less
+ // than the specified maximum (used by the heuristics that determines in
+ // which order to process packages with alternatives; if 0 is passed, then
+ // no true alternative will be selected).
+ //
+ // The idea here is to postpone the true alternatives selection till the
+ // end of the packages collection and then try to optimize the overall
+ // resulting selection (over all the dependents) by selecting alternatives
+ // with the lower indexes first (see collect_build_postponed() for
+ // details).
+ //
+ // Always postpone recursive collection of dependencies for a dependent
+ // with configuration clauses, recording them together with the dependent
+ // in postponed_cfgs (see postponed_configurations for details). If it
+ // turns out that some dependency of such a dependent has already been
+ // collected via some other dependent without configuration clauses, then
+ // record it in postponed_deps and throw the postpone_dependency
+ // exception. This exception is handled via re-collecting packages from
+ // scratch, but now with the knowledge about premature dependency
+ // collection. If some dependency already belongs to some non or being
+ // negotiated cluster then throw merge_configuration. If some dependencies
+ // have existing dependents with config clauses which have not been
+ // considered for the configuration negotiation yet, then throw
+ // recollect_existing_dependents exception to re-collect these dependents.
+ // If configuration has already been negotiated between some other
+ // dependents, then up-negotiate the configuration and throw
+ // retry_configuration exception so that the configuration refinement can
+ // be performed. See the collect lambda implementation for details on the
+ // configuration refinement machinery.
+ //
+ // If the reeval_pos argument is specified and is not {0,0}, then
+ // re-evaluate the package to the specified position. In this mode perform
+ // the regular dependency alternative selection and non-recursive
+ // dependency collection. When the specified position is reached, postpone
+ // the collection by recording the dependent together with the
+ // dependencies at that position in postponed_cfgs (see
+ // postponed_configurations for details). If the dependent/dependencies
+ // are added to an already negotiated cluster, then throw
+ // merge_configuration, similar to the regular collection mode (see
+ // above). Also check for the merge configuration cycles (see the function
+ // implementation for details) and throw the merge_configuration_cycle
+ // exception if such a cycle is detected.
+ //
+ // If {0,0} is specified as the reeval_pos argument, then perform the
+ // pre-reevaluation of an existing dependent, requested due to the
+ // specific dependency up/down-grade or reconfiguration (must be passed as
+ // the orig_dep; we call it originating dependency). The main purpose of
+ // this read-only mode is to obtain the position of the earliest selected
+ // dependency alternative with the config clause, if any, which the
+ // re-evaluation needs to be performed to and to determine if such a
+ // re-evaluation is optional (see pre_reevaluate_result for the full
+ // information being retrieved). The re-evaluation is considered to be
+ // optional if the existing dependent has no config clause for the
+ // originating dependency and the enable and reflect clauses do not refer
+ // to any of the dependency configuration variables (which can only be
+ // those which the dependent has the configuration clauses for; see the
+ // bpkg manual for details). The thinking here is that such an existing
+ // dependent may not change any configuration it applies to its
+ // dependencies and thus it doesn't call for any negotiations (note: if
+ // there are config clauses for the upgraded originating dependency, then
+ // the potentially different defaults for its config variables may affect
+ // the configuration this dependent applies to its dependencies). Such a
+ // dependent can also be reconfigured without pre-selection of its
+ // dependency alternatives since pkg-configure is capable of doing that on
+ // its own for such a simple case (see pkg_configure_prerequisites() for
+ // details). Also look for any deviation in the dependency alternatives
+ // selection and throw reevaluation_deviated exception if such a deviation
+ // is detected. Return nullopt if no dependency alternative with the
+ // config clause is selected.
+ //
+ // If the package is a dependency of configured dependents and needs to be
+ // reconfigured (being upgraded, has configuration specified, etc), then
+ // do the following for each such dependent prior to collecting its own
+ // prerequisites:
+ //
+ // - If the dependent is not already being built/dropped, expected to be
+ // built/dropped, and doesn't apply constraints which the dependency
+ // doesn't satisfy anymore, then pre-reevaluate the dependent.
+ //
+ // - If the dependency alternative with configuration clause has been
+ // encountered during the pre-reevaluation, then record it in
+ // postponed_cfgs as a single-dependency cluster with an existing
+ // dependent (see postponed_configurations for details). If the index of
+ // the encountered depends clause is equal/less than the index of the
+ // depends clause the dependency belongs to, then postpone the recursive
+ // collection of this dependency assuming that it will be collected
+ // later, during/after its existing dependent re-evaluation.
+ //
+ // - If the dependency alternatives selection has deviated, then record
+ // the dependent in postponed_recs (so that it can be re-collected
+ // later) and postpone recursive collection of this dependency assuming
+ // that it will be collected later, during its existing dependent
+ // re-collection. Also record this dependency in the postponed existing
+ // dependencies map (postponed_existing_dependencies argument). This way
+ // the caller can track if the postponed dependencies have never been
+ // collected recursively (deviations are too large, etc) and handle this
+ // situation (currently just fail).
+ //
+ // If a dependency alternative configuration cannot be negotiated between
+ // all the dependents, then unaccept_alternative can be thrown (see
+ // unacceptable_alternatives for details).
+ //
+ struct postpone_dependency: scratch_collection
+ {
+ package_key package;
+
+ explicit
+ postpone_dependency (package_key p)
+ : scratch_collection ("prematurely collected dependency"),
+ package (move (p))
+ {
+ scratch_collection::package = &package;
+ }
+ };
+
+ struct retry_configuration
+ {
+ size_t depth;
+ package_key dependent;
+ };
+
+ struct merge_configuration
+ {
+ size_t depth;
+ };
+
+ struct merge_configuration_cycle
+ {
+ size_t depth;
+ };
+
+ struct reevaluation_deviated {};
+
+ struct pre_reevaluate_result
+ {
+ using packages = postponed_configuration::packages;
+
+ pair<size_t, size_t> reevaluation_position;
+ packages reevaluation_dependencies;
+ bool reevaluation_optional = true;
+ pair<size_t, size_t> originating_dependency_position;
+ };
+
+ optional<pre_reevaluate_result>
+ collect_build_prerequisites (const pkg_build_options&,
+ build_package&,
+ build_package_refs& dep_chain,
+ const function<find_database_function>&,
+ const function<add_priv_cfg_function>&,
+ const repointed_dependents&,
+ replaced_versions&,
+ postponed_packages* postponed_repo,
+ postponed_packages* postponed_alts,
+ size_t max_alt_index,
+ postponed_packages& postponed_recs,
+ postponed_existing_dependencies&,
+ postponed_dependencies&,
+ postponed_configurations&,
+ unacceptable_alternatives&,
+ unsatisfied_dependents&,
+ optional<pair<size_t, size_t>> reeval_pos = nullopt,
+ const optional<package_key>& orig_dep = nullopt);
+
+ void
+ collect_build_prerequisites (const pkg_build_options&,
+ database&,
+ const package_name&,
+ const function<find_database_function>&,
+ const function<add_priv_cfg_function>&,
+ const repointed_dependents&,
+ replaced_versions&,
+ postponed_packages& postponed_repo,
+ postponed_packages& postponed_alts,
+ size_t max_alt_index,
+ postponed_packages& postponed_recs,
+ postponed_existing_dependencies&,
+ postponed_dependencies&,
+ postponed_configurations&,
+ unacceptable_alternatives&,
+ unsatisfied_dependents&);
+
+ // Collect the repointed dependents and their replaced prerequisites,
+ // recursively.
+ //
+ // If a repointed dependent is already pre-entered or collected with an
+ // action other than adjustment, then just mark it for reconfiguration
+ // unless it is already implied. Otherwise, collect the package build with
+ // the repoint sub-action and reconfigure adjustment flag.
+ //
+ void
+ collect_repointed_dependents (const pkg_build_options&,
+ const repointed_dependents&,
+ replaced_versions&,
+ postponed_packages& postponed_repo,
+ postponed_packages& postponed_alts,
+ postponed_packages& postponed_recs,
+ postponed_existing_dependencies&,
+ postponed_dependencies&,
+ postponed_configurations&,
+ unacceptable_alternatives&,
+ unsatisfied_dependents&,
+ const function<find_database_function>&,
+ const function<add_priv_cfg_function>&);
+
+ // Collect the package being dropped. Noop if the specified package is
+ // already being built and its required-by package names have the
+ // "required by dependents" semantics.
+ //
+ // Add entry to replaced_vers and throw replace_version if the existing
+ // version needs to be dropped but this can't be done in-place (see
+ // replaced_versions for details).
+ //
+ void
+ collect_drop (const pkg_build_options&,
+ database&,
+ shared_ptr<selected_package>,
+ replaced_versions&);
+
+ // Collect the package being unheld.
+ //
+ void
+ collect_unhold (database&, const shared_ptr<selected_package>&);
+
+ void
+ collect_build_postponed (const pkg_build_options&,
+ replaced_versions&,
+ postponed_packages& postponed_repo,
+ postponed_packages& postponed_alts,
+ postponed_packages& postponed_recs,
+ postponed_existing_dependencies&,
+ postponed_dependencies&,
+ postponed_configurations&,
+ strings& postponed_cfgs_history,
+ unacceptable_alternatives&,
+ unsatisfied_dependents&,
+ const function<find_database_function>&,
+ const repointed_dependents&,
+ const function<add_priv_cfg_function>&,
+ postponed_configuration* = nullptr);
+
+ // If a configured package is being up/down-graded or reconfigured then
+ // that means all its configured dependents could be affected and we have
+ // to reconfigure them. This function examines every such a package that
+ // is already in the map and collects all its configured dependents. We
+ // also need to make sure the dependents are ok with the up/downgrade. If
+ // some dependency constraints are not satisfied, then cache them and
+ // proceed further as if no problematic constraints are imposed (see
+ // unsatisfied_dependents for details). Return the set of the collected
+ // dependents.
+ //
+ // Should we reconfigure just the direct depends or also include indirect,
+ // recursively? Consider this plausible scenario as an example: We are
+ // upgrading a package to a version that provides an additional API. When
+ // its direct dependent gets reconfigured, it notices this new API and
+ // exposes its own extra functionality that is based on it. Now it would
+ // make sense to let its own dependents (which would be our original
+ // package's indirect ones) to also notice this.
+ //
+ std::set<package_key>
+ collect_dependents (const repointed_dependents&, unsatisfied_dependents&);
+
+ // Order the previously-collected package with the specified name and
+ // configuration returning its position.
+ //
+ // Recursively order the collected package dependencies, failing if a
+ // dependency cycle is detected. If reorder is true, then reorder this
+ // package to be considered as "early" as possible.
+ //
+ iterator
+ order (database&,
+ const package_name&,
+ const function<find_database_function>&,
+ bool reorder = true);
+
+ void
+ clear ();
+
+ void
+ clear_order ();
+
+ // Print all the version constraints (one per line) applied to this
+ // package and its dependents, recursively. The specified package is
+ // expected to be collected (present in the map). Don't print the version
+ // constraints for the same package twice, printing "..." instead. Noop if
+ // there are no constraints for this package.
+ //
+ // Optionally, only print constraints from the selected or being built
+ // dependents (see build_package::constraint_type for details).
+ //
+ void
+ print_constraints (diag_record&,
+ const build_package&,
+ string& indent,
+ std::set<package_key>& printed,
+ optional<bool> selected_dependent = nullopt) const;
+
+ void
+ print_constraints (diag_record&,
+ const package_key&,
+ string& indent,
+ std::set<package_key>& printed,
+ optional<bool> selected_dependent = nullopt) const;
+
+ // Verify that builds ordering is consistent across all the data
+ // structures and the ordering expectations are fulfilled (real build
+ // actions are all ordered, etc).
+ //
+ void
+ verify_ordering () const;
+
+ private:
+ // Return the list of existing dependents that has a configuration clause
+ // for any of the selected alternatives together with the dependencies for
+ // the earliest such an alternative and the originating dependency (for
+ // which the function is called for) position. Return absent dependency
+ // for those dependents which dependency alternatives selection has
+ // deviated (normally due to the dependency up/downgrade). Skip dependents
+ // which are being built and require recursive recollection or dropped
+ // (present in the map) or expected to be built or dropped (present in
+ // rpt_depts or replaced_vers). Also skip dependents which impose the
+ // version constraint on this dependency and the dependency doesn't
+ // satisfy this constraint. Optionally, skip the existing dependents for
+ // which re-evaluation is considered optional (exclude_optional argument;
+ // see pre-reevaluation mode of collect_build_prerequisites() for
+ // details).
+ //
+ // Note that the originating dependency is expected to be collected
+ // (present in the map).
+ //
+ struct existing_dependent
+ {
+ // Dependent.
+ //
+ reference_wrapper<database> db;
+ shared_ptr<selected_package> selected;
+
+ // Earliest dependency with config clause.
+ //
+ optional<package_key> dependency;
+ pair<size_t, size_t> dependency_position;
+
+ // Originating dependency passed to the function call.
+ //
+ package_key originating_dependency;
+ pair<size_t, size_t> originating_dependency_position;
+ };
+
+ // This exception is thrown by collect_build_prerequisites() and
+ // collect_build_postponed() to resolve different kinds of existing
+ // dependent re-evaluation related cycles by re-collecting the problematic
+ // dependents from scratch.
+ //
+ struct recollect_existing_dependents
+ {
+ size_t depth;
+ vector<existing_dependent> dependents;
+ };
+
+ vector<existing_dependent>
+ query_existing_dependents (
+ tracer&,
+ const pkg_build_options&,
+ database&,
+ const package_name&,
+ bool exclude_optional,
+ const function<find_database_function>&,
+ const repointed_dependents&,
+ const replaced_versions&);
+
+ // Non-recursively collect the dependency of an existing dependent
+ // previously returned by the query_existing_dependents() function call
+ // with the build_package::build_reevaluate flag.
+ //
+ const build_package*
+ collect_existing_dependent_dependency (
+ const pkg_build_options&,
+ const existing_dependent&,
+ replaced_versions&,
+ postponed_configurations&,
+ unsatisfied_dependents&);
+
+ // Non-recursively collect an existing non-deviated dependent previously
+ // returned by the query_existing_dependents() function call for the
+ // subsequent re-evaluation.
+ //
+ void
+ collect_existing_dependent (
+ const pkg_build_options&,
+ const existing_dependent&,
+ postponed_configuration::packages&& dependencies,
+ replaced_versions&,
+ postponed_configurations&,
+ unsatisfied_dependents&);
+
+ // Non-recursively collect an existing dependent previously returned by
+ // the query_existing_dependents() function call with the
+ // build_package::build_recollect flag and add it to the postponed package
+ // recollections list. Also add the build_package::adjust_reconfigure flag
+ // for the deviated dependents (existing_dependent::dependency is absent).
+ //
+ // Note that after this function call the existing dependent may not be
+ // returned as a result by the query_existing_dependents() function
+ // anymore (due to the build_package::build_recollect flag presence).
+ //
+ void
+ recollect_existing_dependent (const pkg_build_options&,
+ const existing_dependent&,
+ replaced_versions&,
+ postponed_packages& postponed_recs,
+ postponed_configurations&,
+ unsatisfied_dependents&,
+ bool add_required_by);
+
+ // Skip the dependents collection for the specified dependency if that has
+ // already been done.
+ //
+ // Note that if this function has already been called for this dependency,
+ // then all its dependents are already in the map and their dependency
+ // constraints have been checked.
+ //
+ void
+ collect_dependents (build_package&,
+ const repointed_dependents&,
+ unsatisfied_dependents&,
+ std::set<const build_package*>& visited_deps,
+ std::set<package_key>& result);
+
+ struct package_ref
+ {
+ database& db;
+ const package_name& name;
+
+ bool
+ operator== (const package_ref&);
+ };
+ using package_refs = small_vector<package_ref, 16>;
+
+ iterator
+ order (database&,
+ const package_name&,
+ package_refs& chain,
+ const function<find_database_function>&,
+ bool reorder);
+
+ private:
+ struct data_type
+ {
+ iterator position; // Note: can be end(), see collect_build().
+ build_package package;
+ };
+
+ class package_map: public std::map<package_key, data_type>
+ {
+ public:
+ using base_type = std::map<package_key, data_type>;
+
+ using base_type::find;
+
+ iterator
+ find (database& db, const package_name& pn)
+ {
+ return find (package_key {db, pn});
+ }
+
+ const_iterator
+ find (database& db, const package_name& pn) const
+ {
+ return find (package_key {db, pn});
+ }
+
+ // Try to find a package build in the dependency configurations (see
+ // database::dependency_configs() for details). Return the end iterator
+ // if no build is found and issue diagnostics and fail if multiple
+ // builds (in multiple configurations) are found.
+ //
+ iterator
+ find_dependency (database&, const package_name&, bool buildtime);
+ };
+ package_map map_;
+ };
+}
+
+#endif // BPKG_PKG_BUILD_COLLECT_HXX
diff --git a/bpkg/pkg-build.cli b/bpkg/pkg-build.cli
index e5a6118..a365082 100644
--- a/bpkg/pkg-build.cli
+++ b/bpkg/pkg-build.cli
@@ -1,7 +1,7 @@
// file : bpkg/pkg-build.cli
// license : MIT; see accompanying LICENSE file
-include <bpkg/configuration.cli>;
+include <bpkg/common.cli>;
"\section=1"
"\name=bpkg-pkg-build"
@@ -95,12 +95,20 @@ namespace bpkg
A package name (<pkg>) can be prefixed with a package scheme
(<scheme>). Currently the only recognized scheme is \cb{sys} which
instructs \cb{pkg-build} to configure the package as available from the
- system rather than building it from source. If the system package version
- (<ver-spec>) is not specified or is '\cb{/*}', then it is considered to
- be unknown but satisfying any version constraint. If specified,
- <ver-spec> may not be a version constraint. If the version is not
- explicitly specified, then at least a stub package must be available from
- one of the repositories.
+ system rather than building it from source.
+
+ The system package version (<ver-spec>) may not be a version constraint
+ but may be the special '\cb{/*}' value, which indicates that the version
+ should be considered unknown but satisfying any version constraint. If
+ unspecified, then \cb{pkg-build} will attempt to query the system package
+ manager for the installed version unless the system package manager is
+ unsupported or this functionality is disabled with \cb{--sys-no-query},
+ in which case the '\cb{/*}' <ver-spec> is assumed. If the system package
+ manager is supported, then the automatic installation of an available
+ package can be requested with the \cb{--sys-install} option. Note that if
+ the version is not explicitly specified, then at least a stub package
+ must be available from one of the repositories unless the
+ \cb{--sys-no-stub} option is specified.
Finally, a package can be specified as either the path to the package
archive (<file>) or to the package directory (<dir>\cb{/}; note that it
@@ -111,10 +119,11 @@ namespace bpkg
Additional configuration variables (<cfg-var>), if any, should be
specified before packages (<pkg-spec>) and should be separated with
\cb{--}. Such variables are effective only when configuring and only for
- packages that were explicitly specified on the command line (they can
- also be specified to only apply to specific packages using the argument
- grouping mechanism discussed below). See \l{bpkg-pkg-configure(1)} for
- more information on configuration variables.
+ packages that were explicitly specified on the command line (unless
+ global overrides). They can also be specified to only apply to specific
+ packages using the argument grouping mechanism discussed below. See
+ \l{bpkg-pkg-configure(1)} for more information on configuration
+ variables.
By default a package that is specified explicitly on the command line is
built to \i{hold}: it will not be considered for automatic removal if it
@@ -156,6 +165,17 @@ namespace bpkg
bpkg build libfoo/2.0.0 # upgrade libfoo 2.0.0 to hold,
# also hold version 2.0.0
\
+
+ A package can be built in one of the linked configurations instead of the
+ current (or host/build system module, for build-time dependencies)
+ configuration by specifying one of the \cb{--config-*} options (see
+ \l{bpkg-cfg-create(1)} for background on linked configurations). For
+ example:
+
+ \
+ bpkg build foo { --config-name=alt-host }+ ?bison
+ \
+
"
}
@@ -183,18 +203,44 @@ namespace bpkg
all the constraints."
}
+ bool --deorphan
+ {
+ "Replace orphaned packages with the best matching available package
+ versions which satisfy all the constraints.
+
+ It may happen that a built package no longer has the corresponding
+ package available in the repository it came from (for example, as a
+ result of \l{bpkg-rep-fetch(1)} or \l{bpkg-rep-remove(1)}). Such a
+ package is called an \i{orphan}. Without the \cb{--deorphan} option,
+ upgrading, downgrading, or patching an orphan will leave it unchanged
+ if a more suitable version of the package is not available. If the
+ \cb{--deorphan} option is specified, then an orphan will be replaced
+ with a non-orphan. In this case, if \cb{--upgrade}, \cb{--patch}, or
+ the package version is specified, then the new version is selected
+ accordingly. Otherwise, the closest version to the orphaned version is
+ selected using the following preference order:
+ (1) same version, revision, and iteration,
+ (2) latest iteration of same version and revision,
+ (3) later revision of same version,
+ (4) later patch of same version,
+ (5) later minor of same version,
+ (6) latest available version, including earlier
+ (see \l{bpkg#package-version Package Version} for details)."
+ }
+
bool --immediate|-i
{
- "Also upgrade or patch immediate dependencies."
+ "Also upgrade, patch, or deorphan immediate dependencies."
}
bool --recursive|-r
{
- "Also upgrade or patch all dependencies, recursively."
+ "Also upgrade, patch, or deorphan all dependencies, recursively."
}
// Sometimes we may want to upgrade/patch the package itself but to
- // patch/upgrade its dependencies.
+ // patch/upgrade its dependencies. Also we may want to deorphan
+ // dependencies, potentially upgrading/patching the package itself.
//
bool --upgrade-immediate
{
@@ -206,6 +252,11 @@ namespace bpkg
"Patch immediate dependencies."
}
+ bool --deorphan-immediate
+ {
+ "Deorphan immediate dependencies."
+ }
+
bool --upgrade-recursive
{
"Upgrade all dependencies, recursively."
@@ -216,6 +267,11 @@ namespace bpkg
"Patch all dependencies, recursively."
}
+ bool --deorphan-recursive
+ {
+ "Deorphan all dependencies, recursively."
+ }
+
bool --dependency
{
"Build, upgrade, or downgrade a package as a dependency rather than to
@@ -228,6 +284,12 @@ namespace bpkg
downgrades. Refer to \l{bpkg-pkg-disfigure(1)} for details."
}
+ bool --disfigure
+ {
+ "Disfigure packages between upgrades and downgrades effectively
+ causing a from-scratch reconfiguration."
+ }
+
dir_path --checkout-root
{
"<dir>",
@@ -243,16 +305,41 @@ namespace bpkg
are purged. Refer to the \cb{--output-purge} option in
\l{bpkg-pkg-checkout(1)} for details."
}
+
+ strings --config-name
+ {
+ "<name>",
+ "Name of the linked configuration to build this package(s) in. By
+ default, the package is built in the current configuration. Repeat
+ this option to specify multiple configurations."
+ }
+
+ vector<uint64_t> --config-id
+ {
+ "<num>",
+ "Numeric id of the linked configuration to build this package(s) in. By
+ default, the package is built in the current configuration. Repeat this
+ option to specify multiple configurations."
+ }
+
+ vector<uuid> --config-uuid
+ {
+ "<uuid>",
+ "UUID of the linked configuration to build this package(s) in. By
+ default, the package is built in the current configuration. Repeat this
+ this option to specify multiple configurations."
+ }
};
- class pkg_build_options: configuration_options,
+ class pkg_build_options: common_options,
pkg_build_pkg_options
{
"\h|PKG-BUILD GLOBAL OPTIONS|"
bool --yes|-y
{
- "Assume the answer to all prompts is \cb{yes}."
+ "Assume the answer to all prompts is \cb{yes}. Note that this excludes
+ the system package manager prompts; see \cb{--sys-yes} for details."
}
string --for|-f
@@ -310,6 +397,165 @@ namespace bpkg
specified as part of the build command. Refer to the \cb{--shallow}
option in \l{bpkg-rep-fetch(1)} for details."
}
+
+ strings --mask-repository
+ {
+ "<rep>",
+ "For the duration of the command execution pretend the specified
+ repository was removed as if by performing the \cb{rep-remove}
+ command. The repository can be specified either as a repository name or
+ as a repository location (URL or a directory path). Note that the
+ repository's complement and prerequisite repositories are also
+ considered masked, recursively, unless they are complements and/or
+ prerequisites of other unmasked repositories. Repeat this option to
+ mask multiple repositories."
+ }
+
+ strings --mask-repository-uuid
+ {
+ "<v>",
+ "For the duration of the command execution pretend the specified
+ repository was removed from the specified configuration. Similar to
+ \cb{--mask-repository} but only masks the repository in a single
+ configuration. The option value is a key-value pair in the form:
+
+ \c{\i{config-uuid}\b{=}\i{rep}}
+
+ Repeat this option to mask multiple repositories."
+ }
+
+ bool --no-refinement
+ {
+ "Don't try to refine the configuration by offering to drop any unused
+ dependencies that were potentially left behind on the previous
+ \cb{pkg-build} or \cb{pkg-drop} command execution if the command
+ is otherwise a noop (performs no new package builds, upgrades, etc)."
+ }
+
+ bool --no-move
+ {
+ "Don't move dependency packages between configurations. In this mode the
+ \cb{--config-*} options specify packages' current rather than new
+ locations."
+ }
+
+ uint16_t --noop-exit
+ {
+ "<code>",
+
+ "Exit with the specified error code if the command execution is a noop
+ (performs no new package builds, upgrades, etc)."
+ }
+
+ string --rebuild-checksum
+ {
+ "<sum>",
+
+ "Hash the names, versions, and configurations of all the packages that
+ would be built. If the resulting checksum matches the specified, then
+ exit without building anything (potentially with a special error code
+ specified with the \cb{--noop-exit} option). Otherwise, proceed to
+ build as normal. In both cases, print the resulting checksum to
+ \cb{stdout}."
+ }
+
+ uint16_t --no-private-config
+ {
+ "<code>",
+
+ "If no configuration of a suitable type is linked to build a
+ build-time dependency, instead of automatically creating a private
+ configuration of this type, exit with the specified error code
+ printing to \cb{stdout} the dependency chain starting from the
+ build-time dependency (together with its constraint, if present)
+ and ending with the top-level dependent (together with their
+ configuration directories), one entry per line. For example:
+
+ \
+ yacc ^1.0.0
+ libbar/1.0.0 /path/to/libbar/cfg/
+ libfoo/1.0.0 /path/to/libfoo/cfg/
+ \
+
+ See \l{bpkg-cfg-create(1)} for details on linked configurations."
+ }
+
+ bool --sys-no-query
+ {
+ "Do not query the system package manager for the installed versions of
+ packages specified with the \cb{sys} scheme."
+ }
+
+ bool --sys-install
+ {
+ "Instruct the system package manager to install available versions of
+ packages specified with the \cb{sys} scheme that are not already
+ installed. See also the \cb{--sys-no-fetch}, \cb{--sys-yes}, and
+ \cb{--sys-sudo} options."
+ }
+
+ bool --sys-no-fetch
+ {
+ "Do not fetch the system package manager metadata before querying for
+ available versions of packages specified with the \cb{sys} scheme.
+ This option only makes sense together with \cb{--sys-install}."
+ }
+
+ bool --sys-no-stub
+ {
+ "Do no require a stub for packages specified with the \cb{sys} scheme.
+ Note that this option has effect only if the system package manager
+ interactions are supported and not disabled."
+ }
+
+ bool --sys-yes
+ {
+ "Assume the answer to the system package manager prompts is \cb{yes}.
+ Note that system package manager interactions may break your system
+ and you should normally only use this option on throw-away setups
+ (test virtual machines, etc)."
+ }
+
+ string --sys-sudo = "sudo"
+ {
+ "<prog>",
+
+ "The \cb{sudo} program to use for system package manager interactions
+ that normally require administrative privileges (fetch package
+ metadata, install packages, etc). If unspecified, \cb{sudo} is used
+ by default. Pass empty or the special \cb{false} value to disable the
+ use of the \cb{sudo} program. Note that the \cb{sudo} program is
+ normally only needed if the system package installation is enabled
+ with the \cb{--sys-install} option."
+ }
+
+ string --sys-distribution
+ {
+ "<name>",
+ "Alternative system/distribution package manager to interact with. The
+ valid <name> values are \cb{debian} (Debian and alike, such as Ubuntu,
+ etc) and \cb{fedora} (Fedora and alike, such as RHEL, CentOS, etc).
+ Note that some package managers may only be supported when running on
+ certain host operating systems."
+ }
+
+ string --sys-architecture
+ {
+ "<name>",
+ "Alternative architecture to use when interacting with the system
+ package manager. The valid <name> values are system/distribution
+ package manager-specific. If unspecified, the host architecture
+ is used."
+ }
+
+ dir_paths --directory|-d
+ {
+ "<dir>",
+ "Assume current configuration is in <dir> rather than in the current
+ working directory. Repeat this option to specify multiple current
+ configurations. If multiple configurations are specified, they need not
+ belong to the same linked configuration cluster."
+ }
};
"
diff --git a/bpkg/pkg-build.cxx b/bpkg/pkg-build.cxx
index 082696e..fac79c2 100644
--- a/bpkg/pkg-build.cxx
+++ b/bpkg/pkg-build.cxx
@@ -5,11 +5,11 @@
#include <map>
#include <set>
-#include <list>
-#include <cstring> // strlen()
-#include <iostream> // cout
+#include <cstring> // strlen()
+#include <sstream>
+#include <iostream> // cout
-#include <libbutl/standard-version.mxx>
+#include <libbutl/standard-version.hxx>
#include <bpkg/package.hxx>
#include <bpkg/package-odb.hxx>
@@ -20,6 +20,8 @@
#include <bpkg/common-options.hxx>
+#include <bpkg/cfg-link.hxx>
+#include <bpkg/rep-mask.hxx>
#include <bpkg/pkg-purge.hxx>
#include <bpkg/pkg-fetch.hxx>
#include <bpkg/rep-fetch.hxx>
@@ -29,2300 +31,2898 @@
#include <bpkg/pkg-checkout.hxx>
#include <bpkg/pkg-configure.hxx>
#include <bpkg/pkg-disfigure.hxx>
+#include <bpkg/package-query.hxx>
+#include <bpkg/package-skeleton.hxx>
+
#include <bpkg/system-repository.hxx>
+#include <bpkg/system-package-manager.hxx>
+
+#include <bpkg/pkg-build-collect.hxx>
using namespace std;
using namespace butl;
namespace bpkg
{
- // @@ Overall TODO:
- //
- // - Configuration vars (both passed and preserved)
+ // System package manager. Resolved lazily if and when needed. Present NULL
+ // value means no system package manager is available for this host.
//
+ static optional<unique_ptr<system_package_manager>> sys_pkg_mgr;
- // Try to find an available stub package in the imaginary system repository.
- // Such a repository contains stubs corresponding to the system packages
- // specified by the user on the command line with version information
- // (sys:libfoo/1.0, ?sys:libfoo/* but not ?sys:libfoo; the idea is that a
- // real stub won't add any extra information to such a specification so we
- // shouldn't insist on its presence). Semantically this imaginary repository
- // complements all real repositories.
+ // Current configurations as specified with --directory|-d (or the current
+ // working directory if none specified).
//
- static vector<shared_ptr<available_package>> imaginary_stubs;
+ static linked_databases current_configs;
- static shared_ptr<available_package>
- find_imaginary_stub (const package_name& name)
+ static inline bool
+ multi_config ()
{
- auto i (find_if (imaginary_stubs.begin (), imaginary_stubs.end (),
- [&name] (const shared_ptr<available_package>& p)
- {
- return p->id.name == name;
- }));
+ return current_configs.size () != 1;
+ }
- return i != imaginary_stubs.end () ? *i : nullptr;
+ static inline bool
+ current (database& db)
+ {
+ return find (current_configs.begin (), current_configs.end (), db) !=
+ current_configs.end ();
}
- // Try to find packages that optionally satisfy the specified version
- // constraint. Return the list of packages and repository fragments in which
- // each was found or empty list if none were found. Note that a stub
- // satisfies any constraint.
+ // Retrieve the repository fragments for the specified package from its
+ // ultimate dependent configurations and add them to the respective
+ // configuration-associated fragment lists.
//
- static
- vector<pair<shared_ptr<available_package>, shared_ptr<repository_fragment>>>
- find_available (database& db,
- const package_name& name,
- const optional<version_constraint>& c)
+ // If this package's repository fragment is a root fragment (package is
+ // fetched/unpacked using the existing archive/directory), then also add
+ // this repository fragment to the resulting list assuming that this
+ // package's dependencies can be resolved from this repository fragment or
+ // its complements (user-added repositories) as well.
+ //
+ static void
+ add_dependent_repo_fragments (database& db,
+ const shared_ptr<selected_package>& p,
+ config_repo_fragments& r)
{
- vector<pair<shared_ptr<available_package>,
- shared_ptr<repository_fragment>>> r;
+ available_package_id id (p->name, p->version);
- for (shared_ptr<available_package> ap:
- pointer_result (query_available (db, name, c)))
+ // Add a repository fragment to the specified list, suppressing duplicates.
+ //
+ auto add = [] (shared_ptr<repository_fragment>&& rf,
+ vector<shared_ptr<repository_fragment>>& rfs)
{
- // An available package should come from at least one fetched
- // repository fragment.
- //
- assert (!ap->locations.empty ());
+ if (find (rfs.begin (), rfs.end (), rf) == rfs.end ())
+ rfs.push_back (move (rf));
+ };
- // All repository fragments the package comes from are equally good, so
- // we pick the first one.
- //
- r.emplace_back (move (ap),
- ap->locations[0].repository_fragment.load ());
- }
+ if (p->repository_fragment.empty ()) // Root repository fragment?
+ add (db.find<repository_fragment> (empty_string), r[db]);
- // Adding a stub from the imaginary system repository to the non-empty
- // results isn't necessary but may end up with a duplicate. That's why we
- // only add it if nothing else is found.
- //
- if (r.empty ())
+ for (database& ddb: dependent_repo_configs (db))
{
- shared_ptr<available_package> ap (find_imaginary_stub (name));
+ shared_ptr<available_package> dap (ddb.find<available_package> (id));
- if (ap != nullptr)
- r.emplace_back (move (ap), nullptr);
- }
+ if (dap != nullptr)
+ {
+ assert (!dap->locations.empty ());
- return r;
+ config_repo_fragments::iterator i (r.find (ddb));
+
+ if (i == r.end ())
+ i = r.insert (ddb,
+ vector<shared_ptr<repository_fragment>> ()).first;
+
+ vector<shared_ptr<repository_fragment>>& rfs (i->second);
+
+ for (const auto& pl: dap->locations)
+ {
+ const lazy_shared_ptr<repository_fragment>& lrf (
+ pl.repository_fragment);
+
+ if (!rep_masked_fragment (lrf))
+ add (lrf.load (), rfs);
+ }
+
+ // Erase the entry from the map if it contains no fragments, which may
+ // happen if all the available package repositories are masked.
+ //
+ if (rfs.empty ())
+ r.erase (i);
+ }
+ }
}
- // As above but only look for packages from the specified list fo repository
- // fragments, their prerequisite repositories, and their complements,
- // recursively (note: recursivity applies to complements, not
- // prerequisites).
+ // Return a patch version constraint for the specified package version if it
+ // is a standard version (~ shortcut). Otherwise, if requested, issue a
+ // warning and return nullopt.
+ //
+ // Note that the function may also issue a warning and return nullopt if the
+ // package minor version reached the limit (see standard-version.cxx for
+ // details).
//
- static
- vector<pair<shared_ptr<available_package>, shared_ptr<repository_fragment>>>
- find_available (database& db,
- const package_name& name,
- const optional<version_constraint>& c,
- const vector<shared_ptr<repository_fragment>>& rfs,
- bool prereq = true)
+ static optional<version_constraint>
+ patch_constraint (const package_name& nm,
+ const version& pv,
+ bool quiet = false)
{
- // Filter the result based on the repository fragments to which each
- // version belongs.
+ // Note that we don't pass allow_stub flag so the system wildcard version
+ // will (naturally) not be patched.
//
- vector<pair<shared_ptr<available_package>,
- shared_ptr<repository_fragment>>> r (
- filter (rfs, query_available (db, name, c), prereq));
+ string vs (pv.string ());
+ optional<standard_version> v (parse_standard_version (vs));
- if (r.empty ())
+ if (!v)
{
- shared_ptr<available_package> ap (find_imaginary_stub (name));
+ if (!quiet)
+ warn << "unable to patch " << package_string (nm, pv) <<
+ info << "package is not using semantic/standard version";
- if (ap != nullptr)
- r.emplace_back (move (ap), nullptr);
+ return nullopt;
}
- return r;
- }
-
- // As above but only look for a single package from the specified repository
- // fragment, its prerequisite repositories, and their complements,
- // recursively (note: recursivity applies to complements, not
- // prerequisites). Return the package and the repository fragment in which
- // it was found or NULL for both if not found.
- //
- static pair<shared_ptr<available_package>, shared_ptr<repository_fragment>>
- find_available_one (database& db,
- const package_name& name,
- const optional<version_constraint>& c,
- const shared_ptr<repository_fragment>& rf,
- bool prereq = true)
- {
- // Filter the result based on the repository fragment to which each
- // version belongs.
+ try
+ {
+ return version_constraint ('~' + vs);
+ }
+ // Note that the only possible reason for invalid_argument exception to be
+ // thrown is that minor version reached the 99999 limit (see
+ // standard-version.cxx for details).
//
- auto r (filter_one (rf, query_available (db, name, c), prereq));
+ catch (const invalid_argument&)
+ {
+ if (!quiet)
+ warn << "unable to patch " << package_string (nm, pv) <<
+ info << "minor version limit reached";
- if (r.first == nullptr)
- r.first = find_imaginary_stub (name);
+ return nullopt;
+ }
+ }
- return r;
+ static inline optional<version_constraint>
+ patch_constraint (const shared_ptr<selected_package>& sp, bool quiet = false)
+ {
+ return patch_constraint (sp->name, sp->version, quiet);
}
- // As above but look for a single package from a list of repository
- // fragments.
+ // As above but returns a minor version constraint (^ shortcut) instead of
+ // the patch version constraint (~ shortcut).
//
- static pair<shared_ptr<available_package>, shared_ptr<repository_fragment>>
- find_available_one (database& db,
- const package_name& name,
- const optional<version_constraint>& c,
- const vector<shared_ptr<repository_fragment>>& rfs,
- bool prereq = true)
+ static optional<version_constraint>
+ minor_constraint (const package_name& nm,
+ const version& pv,
+ bool quiet = false)
{
- // Filter the result based on the repository fragments to which each
- // version belongs.
+ // Note that we don't pass allow_stub flag so the system wildcard version
+ // will (naturally) not be patched.
//
- auto r (filter_one (rfs, query_available (db, name, c), prereq));
+ string vs (pv.string ());
+ optional<standard_version> v (parse_standard_version (vs));
- if (r.first == nullptr)
- r.first = find_imaginary_stub (name);
+ if (!v)
+ {
+ if (!quiet)
+ warn << "unable to upgrade " << package_string (nm, pv)
+ << " to latest minor version" <<
+ info << "package is not using semantic/standard version";
- return r;
+ return nullopt;
+ }
+
+ try
+ {
+ return version_constraint ('^' + vs);
+ }
+ // Note that the only possible reason for invalid_argument exception to be
+ // thrown is that major version reached the 99999 limit (see
+ // standard-version.cxx for details).
+ //
+ catch (const invalid_argument&)
+ {
+ if (!quiet)
+ warn << "unable to upgrade " << package_string (nm, pv)
+ << " to latest minor version" <<
+ info << "major version limit reached";
+
+ return nullopt;
+ }
}
- // Create a transient (or fake, if you prefer) available_package object
- // corresponding to the specified selected object. Note that the package
- // locations list is left empty and that the returned repository fragment
- // could be NULL if the package is an orphan.
+ // Return true if the selected package is not configured as system and its
+ // repository fragment is not present in any ultimate dependent
+ // configuration (see dependent_repo_configs() for details) or this exact
+ // version is not available from this repository fragment nor from its
+ // complements. Also return true if the selected package repository fragment
+ // is a root fragment (package is fetched/unpacked using the existing
+ // archive/directory).
//
- // Note also that in our model we assume that make_available() is only
- // called if there is no real available_package. This makes sure that if
- // the package moves (e.g., from testing to stable), then we will be using
- // stable to resolve its dependencies.
+ // Note that the orphan definition here is stronger than in the rest of the
+ // code, since we request the available package to also be present in the
+ // repository fragment and consider packages built as existing archives or
+ // directories as orphans. It feels that such a definition aligns better
+ // with the user expectations about deorphaning.
//
- static pair<shared_ptr<available_package>, shared_ptr<repository_fragment>>
- make_available (const common_options& options,
- const dir_path& c,
- database& db,
- const shared_ptr<selected_package>& sp)
+ static bool
+ orphan_package (database& db, const shared_ptr<selected_package>& sp)
{
- assert (sp != nullptr && sp->state != package_state::broken);
+ assert (sp != nullptr);
if (sp->system ())
- return make_pair (make_shared<available_package> (sp->name, sp->version),
- nullptr);
+ return false;
- // First see if we can find its repository fragment.
- //
- // Note that this is package's "old" repository fragment and there is no
- // guarantee that its dependencies are still resolvable from it. But this
- // is our best chance (we could go nuclear and point all orphans to the
- // root repository fragment but that feels a bit too drastic at the
- // moment).
- //
- shared_ptr<repository_fragment> af (
- db.find<repository_fragment> (
- sp->repository_fragment.canonical_name ()));
+ const string& cn (sp->repository_fragment.canonical_name ());
- // The package is in at least fetched state, which means we should
- // be able to get its manifest.
- //
- const optional<path>& a (sp->archive);
+ if (cn.empty ()) // Root repository fragment?
+ return true;
- package_manifest m (
- sp->state == package_state::fetched
- ? pkg_verify (options,
- a->absolute () ? *a : c / *a,
- true /* ignore_unknown */,
- false /* expand_values */)
- : pkg_verify (sp->effective_src_root (c),
- true /* ignore_unknown */,
- // Copy potentially fixed up version from selected package.
- [&sp] (version& v) {v = sp->version;}));
+ for (database& ddb: dependent_repo_configs (db))
+ {
+ const shared_ptr<repository_fragment> rf (
+ ddb.find<repository_fragment> (cn));
- return make_pair (make_shared<available_package> (move (m)), move (af));
+ if (rf != nullptr && !rep_masked_fragment (ddb, rf))
+ {
+ auto af (
+ find_available_one (sp->name,
+ version_constraint (sp->version),
+ lazy_shared_ptr<repository_fragment> (ddb,
+ move (rf)),
+ false /* prereq */,
+ true /* revision */));
+
+ const shared_ptr<available_package>& ap (af.first);
+
+ if (ap != nullptr && !ap->stub ())
+ return false;
+ }
+ }
+
+ return true;
}
- // Return true if the version constraint represents the wildcard version.
+ // List of dependency packages (specified with ? on the command line).
//
- static inline bool
- wildcard (const version_constraint& vc)
+ // If configuration is not specified for a system dependency package (db is
+ // NULL), then the dependency is assumed to be specified for all current
+ // configurations and their explicitly linked configurations, recursively,
+ // including private configurations that can potentially be created during
+ // this run.
+ //
+ // The selected package is not NULL if the database is not NULL and the
+ // dependency package is present in this database.
+ //
+ struct dependency_package
{
- bool r (vc.min_version && *vc.min_version == wildcard_version);
+ database* db; // Can only be NULL if system.
+ package_name name;
+ optional<version_constraint> constraint; // nullopt if unspecified.
+
+ // Can only be true if constraint is specified.
+ //
+ bool hold_version;
- if (r)
- assert (vc.max_version == vc.min_version);
+ shared_ptr<selected_package> selected;
+ bool system;
+ bool existing; // Build as archive or directory.
- return r;
- }
+ // true -- upgrade, false -- patch.
+ //
+ optional<bool> upgrade; // Only for absent constraint.
+
+ bool deorphan;
+ bool keep_out;
+ bool disfigure;
+ optional<dir_path> checkout_root;
+ bool checkout_purge;
+ strings config_vars; // Only if not system.
+ const system_package_status* system_status; // See struct pkg_arg.
+ };
+ using dependency_packages = vector<dependency_package>;
- // A "dependency-ordered" list of packages and their prerequisites.
- // That is, every package on the list only possibly depending on the
- // ones after it. In a nutshell, the usage is as follows: we first
- // add one or more packages (the "initial selection"; for example, a
- // list of packages the user wants built). The list then satisfies all
- // the prerequisites of the packages that were added, recursively. At
- // the end of this process we have an ordered list of all the packages
- // that we have to build, from last to first, in order to build our
- // initial selection.
+ // Evaluate a dependency package and return a new desired version. If the
+ // result is absent (nullopt), then there are no user expectations regarding
+ // this dependency. If the result is a NULL available_package, then it is
+ // either no longer used and can be dropped, or no changes to the dependency
+ // are necessary. Otherwise, the result is available_package to
+ // upgrade/downgrade to or replace with the same version (deorphan, rebuild
+ // as an existing archive or directory, etc) as well as the repository
+ // fragment it must come from, the system flag, and the database it must be
+ // configured in.
//
- // This process is split into two phases: satisfaction of all the
- // dependencies (the collect_build() function) and ordering of the list
- // (the order() function).
+ // If the dependency is being rebuilt as an existing archive or directory we
+ // may end up with the available package version being the same as the
+ // selected package version. In this case the dependency needs to be
+ // re-fetched/re-unpacked from this archive/directory. Also note that if the
+ // dependency needs to be rebuilt as an existing archive or directory the
+ // caller may need to stash its name/database. This way on the subsequent
+ // call this function may return the "no change" recommendation rather than
+ // the "replace" recommendation.
//
- // During the satisfaction phase, we collect all the packages, their
- // prerequisites (and so on, recursively) in a map trying to satisfy
- // any version constraints. Specifically, during this step, we may
- // "upgrade" or "downgrade" a package that is already in a map as a
- // result of another package depending on it and, for example, requiring
- // a different version. One notable side-effect of this process is that
- // we may end up with a lot more packages in the map (but not in the list)
- // than we will have on the list. This is because some of the prerequisites
- // of "upgraded" or "downgraded" packages may no longer need to be built.
+ // If in the deorphan mode it turns out that the package is not an orphan
+ // and there is no version constraint specified and upgrade/patch is not
+ // requested, then assume that no changes are necessary for the dependency.
+ // Otherwise, if the package version is not constrained and no upgrade/patch
+ // is requested, then pick the version that matches the dependency version
+ // best in the following preference order:
//
- // Note also that we don't try to do exhaustive constraint satisfaction
- // (i.e., there is no backtracking). Specifically, if we have two
- // candidate packages each satisfying a constraint of its dependent
- // package, then if neither of them satisfy both constraints, then we
- // give up and ask the user to resolve this manually by explicitly
- // specifying the version that will satisfy both constraints.
+ // - same version, revision, and iteration
+ // - latest iteration of same version and revision
+ // - later revision of same version
+ // - later patch of same version
+ // - later minor of same version
+ // - latest available version, including earlier
//
- struct build_package
+ // Otherwise, always upgrade/downgrade the orphan or fail if no satisfactory
+ // version is available. Note that in the both cases (deorphan and
+ // upgrade/downgrade+deorphan) we may end up with the available package
+ // version being the same as the selected package version. In this case the
+ // dependency needs to be re-fetched from an existing repository. Also note
+ // that if the dependency needs to be deorphaned the caller may need to
+ // cache the original orphan version. This way on the subsequent calls this
+ // function still considers this package as an orphan and uses its original
+ // version to deduce the best match, which may change due, for example, to a
+ // change of the constraining dependents set.
+ //
+ // If the package version that satisfies explicitly specified dependency
+ // version constraint can not be found in the dependents repositories, then
+ // return the "no changes are necessary" result if ignore_unsatisfiable
+ // argument is true and fail otherwise. The common approach is to pass true
+ // for this argument until the execution plan is finalized, assuming that
+ // the problematic dependency might be dropped.
+ //
+ struct evaluate_result
{
- enum action_type
+ // The system, existing, upgrade, and orphan members are meaningless if
+ // the unused flag is true.
+ //
+ reference_wrapper<database> db;
+ shared_ptr<available_package> available;
+ lazy_shared_ptr<bpkg::repository_fragment> repository_fragment;
+ bool unused;
+ bool system;
+ bool existing;
+ optional<bool> upgrade;
+
+ // Original orphan version which needs to be deorphaned. May only be
+ // present for the deorphan mode.
+ //
+ optional<version> orphan;
+ };
+
+ struct dependent_constraint
+ {
+ database& db;
+ shared_ptr<selected_package> package;
+ optional<version_constraint> constraint;
+
+ dependent_constraint (database& d,
+ shared_ptr<selected_package> p,
+ optional<version_constraint> c)
+ : db (d), package (move (p)), constraint (move (c)) {}
+ };
+
+ using dependent_constraints = vector<dependent_constraint>;
+ using deorphaned_dependencies = map<package_key, version>;
+ using existing_dependencies = vector<package_key>;
+
+ static evaluate_result
+ evaluate_dependency (const common_options&,
+ database&,
+ const shared_ptr<selected_package>&,
+ const optional<version_constraint>& desired,
+ bool desired_sys,
+ bool existing,
+ database& desired_db,
+ const shared_ptr<selected_package>& desired_db_sp,
+ optional<bool> upgrade,
+ bool deorphan,
+ bool explicitly,
+ const config_repo_fragments&,
+ const dependent_constraints&,
+ const existing_dependencies&,
+ const deorphaned_dependencies&,
+ const build_packages&,
+ bool ignore_unsatisfiable);
+
+ // If there are no user expectations regarding this dependency, then we give
+ // no up/down-grade/replace recommendation, unless there are no dependents
+ // in which case we recommend to drop the dependency.
+ //
+ // Note that the user expectations are only applied for dependencies that
+ // have dependents in the current configurations.
+ //
+ static optional<evaluate_result>
+ evaluate_dependency (const common_options& o,
+ database& db,
+ const shared_ptr<selected_package>& sp,
+ const dependency_packages& deps,
+ bool no_move,
+ const existing_dependencies& existing_deps,
+ const deorphaned_dependencies& deorphaned_deps,
+ const build_packages& pkgs,
+ bool ignore_unsatisfiable)
+ {
+ tracer trace ("evaluate_dependency");
+
+ assert (sp != nullptr && !sp->hold_package);
+
+ const package_name& nm (sp->name);
+
+ auto no_change = [&db] ()
{
- build,
+ return evaluate_result {db,
+ nullptr /* available */,
+ nullptr /* repository_fragment */,
+ false /* unused */,
+ false /* system */,
+ false /* existing */,
+ nullopt /* upgrade */,
+ nullopt /* orphan */};
+ };
- // Selected package is not NULL, available package is NULL.
- //
- drop,
+ // Only search for the user expectations regarding this dependency if it
+ // has dependents in the current configurations, unless --no-move is
+ // specified.
+ //
+ // In the no-move mode consider the user-specified configurations not as a
+ // dependency new location, but as the current location of the dependency
+ // to which the expectations are applied. Note that multiple package specs
+ // for the same dependency in different configurations can be specified on
+ // the command line.
+ //
+ linked_databases cur_dbs;
+ dependency_packages::const_iterator i (deps.end ());
- // Selected package is not NULL, available package is NULL.
- //
- // This is the "only adjustments" action for a selected package.
- // Adjustment flags (see below) are unhold (the package should be
- // treated as a dependency) and reconfigure (dependent package that
- // needs to be reconfigured because its prerequisite is being
- // up/down-graded or reconfigured).
+ if (!no_move)
+ {
+ // Collect the current configurations which contain dependents for this
+ // dependency and assume no expectations if there is none.
//
- // Note that this action is "replaceable" with either drop or build
- // action but in the latter case the adjustments must be copied over.
+ for (database& cdb: current_configs)
+ {
+ if (!query_dependents (cdb, nm, db).empty ())
+ cur_dbs.push_back (cdb);
+ }
+
+ // Search for the user expectations regarding this dependency by
+ // matching the package name and configuration type, if configuration is
+ // specified, preferring entries with configuration specified and fail
+ // if there are multiple candidates.
//
- adjust
- };
+ if (!cur_dbs.empty ())
+ {
+ for (dependency_packages::const_iterator j (deps.begin ());
+ j != deps.end ();
+ ++j)
+ {
+ if (j->name == nm && (j->db == nullptr || j->db->type == db.type))
+ {
+ if (i == deps.end () || i->db == nullptr)
+ {
+ i = j;
+ }
+ else if (j->db != nullptr)
+ {
+ fail << "multiple " << db.type << " configurations specified "
+ << "for dependency package " << nm <<
+ info << i->db->config_orig <<
+ info << j->db->config_orig <<
+ info << "consider using the --no-move option";
+ }
+ }
+ }
+ }
+ }
+ else
+ {
+ for (dependency_packages::const_iterator j (deps.begin ());
+ j != deps.end ();
+ ++j)
+ {
+ if (j->name == nm && (j->db == nullptr || *j->db == db))
+ {
+ if (i == deps.end () || i->db == nullptr)
+ i = j;
- // An object with an absent action is there to "pre-enter" information
- // about a package (constraints and flags) in case it is used.
- //
- optional<action_type> action;
+ if (i->db != nullptr)
+ break;
+ }
+ }
+ }
- shared_ptr<selected_package> selected; // NULL if not selected.
- shared_ptr<available_package> available; // Can be NULL, fake/transient.
+ bool user_exp (i != deps.end ());
+ bool copy_dep (user_exp && i->db != nullptr && *i->db != db);
- // Can be NULL (orphan) or root.
+ // Collect the dependents for checking the version constraints, using
+ // their repository fragments for discovering available dependency package
+ // versions, etc.
+ //
+ // Note that if dependency needs to be copied, then we only consider its
+ // dependents in the current configurations which potentially can be
+ // repointed to it. Note that configurations of such dependents must
+ // contain the new dependency configuration in their dependency tree.
//
- shared_ptr<bpkg::repository_fragment> repository_fragment;
+ linked_databases dep_dbs;
- const package_name&
- name () const
+ if (copy_dep)
{
- return selected != nullptr ? selected->name : available->id.name;
+ for (database& db: i->db->dependent_configs ())
+ {
+ if (find (cur_dbs.begin (), cur_dbs.end (), db) != cur_dbs.end ())
+ dep_dbs.push_back (db);
+ }
+
+ // Bail out if no dependents can be repointed to the dependency.
+ //
+ if (dep_dbs.empty ())
+ {
+ l5 ([&]{trace << *sp << db << ": can't repoint";});
+ return no_change ();
+ }
}
+ else
+ dep_dbs = db.dependent_configs ();
- // Hold flags. Note that we only "increase" the hold_package value that is
- // already in the selected package.
+ // Collect the dependents but bail out if the dependency is used but there
+ // are no user expectations regarding it.
//
- optional<bool> hold_package;
- optional<bool> hold_version;
+ vector<pair<database&, package_dependent>> pds;
- // Constraint value plus, normally, the dependent package name that placed
- // this constraint but can also be some other name for the initial
- // selection (e.g., package version specified by the user on the command
- // line). This why we use the string type, rather than package_name.
- //
- struct constraint_type
+ for (database& ddb: dep_dbs)
{
- string dependent;
- version_constraint value;
+ auto ds (query_dependents (ddb, nm, db));
- constraint_type () = default;
- constraint_type (string d, version_constraint v)
- : dependent (move (d)), value (move (v)) {}
- };
+ if (!ds.empty ())
+ {
+ if (!user_exp)
+ return nullopt;
- vector<constraint_type> constraints;
+ for (auto& d: ds)
+ pds.emplace_back (ddb, move (d));
+ }
+ }
- // System package indicator. See also a note in the merge() function.
+ // Bail out if the dependency is unused.
//
- bool system;
+ if (pds.empty ())
+ {
+ l5 ([&]{trace << *sp << db << ": unused";});
- // If the flag is set and the external package is being replaced with an
- // external one, then keep its output directory between upgrades and
- // downgrades.
- //
- bool keep_out;
+ return evaluate_result {db,
+ nullptr /* available */,
+ nullptr /* repository_fragment */,
+ true /* unused */,
+ false /* system */,
+ false /* existing */,
+ nullopt /* upgrade */,
+ nullopt /* orphan */};
+ }
- // If present, then check out the package into the specified directory
- // rather than into the configuration directory, if it comes from a
- // version control-based repository. Optionally, remove this directory
- // when the package is purged.
+ // The requested dependency database, version constraint, and system flag.
//
- optional<dir_path> checkout_root;
- bool checkout_purge;
+ assert (i != deps.end ());
+
+ database& ddb (i->db != nullptr ? *i->db : db);
+ const optional<version_constraint>& dvc (i->constraint); // May be nullopt.
+ bool dsys (i->system);
+ bool existing (i->existing);
+ bool deorphan (i->deorphan);
- // Command line configuration variables. Only meaningful for non-system
- // packages.
+ // The selected package in the desired database which we copy over.
+ //
+ // It is the current dependency package, if we don't copy, and may or may
+ // not exist otherwise.
//
- strings config_vars;
+ shared_ptr<selected_package> dsp (db == ddb
+ ? sp
+ : ddb.find<selected_package> (nm));
+
+ // If a package in the desired database is already selected and matches
+ // the user expectations then no package change is required, unless the
+ // package is also being built as an existing archive or directory or
+ // needs to be deorphaned.
+ //
+ if (dsp != nullptr && dvc)
+ {
+ const version& sv (dsp->version);
+ bool ssys (dsp->system ());
- // Set of package names that caused this package to be built or adjusted.
- // Empty name signifies user selection.
+ if (!existing &&
+ !deorphan &&
+ ssys == dsys &&
+ (ssys ? sv == *dvc->min_version : satisfies (sv, dvc)))
+ {
+ l5 ([&]{trace << *dsp << ddb << ": unchanged";});
+ return no_change ();
+ }
+ }
+
+ // Build a set of repository fragments the dependent packages come from.
+ // Also cache the dependents and the constraints they apply to this
+ // dependency.
//
- set<package_name> required_by;
+ config_repo_fragments repo_frags;
+ dependent_constraints dpt_constrs;
- bool
- user_selection () const
+ for (auto& pd: pds)
{
- return required_by.find (package_name ()) != required_by.end ();
+ database& ddb (pd.first);
+ package_dependent& dep (pd.second);
+
+ shared_ptr<selected_package> p (ddb.load<selected_package> (dep.name));
+ add_dependent_repo_fragments (ddb, p, repo_frags);
+
+ dpt_constrs.emplace_back (ddb, move (p), move (dep.constraint));
}
- // Adjustment flags.
- //
- uint16_t adjustments;
+ return evaluate_dependency (o,
+ db,
+ sp,
+ dvc,
+ dsys,
+ existing,
+ ddb,
+ dsp,
+ i->upgrade,
+ deorphan,
+ true /* explicitly */,
+ repo_frags,
+ dpt_constrs,
+ existing_deps,
+ deorphaned_deps,
+ pkgs,
+ ignore_unsatisfiable);
+ }
- // Set if we also need to clear the hold package flag.
- //
- static const uint16_t adjust_unhold = 0x0001;
+ struct config_selected_package
+ {
+ database& db;
+ const shared_ptr<selected_package>& package;
+
+ config_selected_package (database& d,
+ const shared_ptr<selected_package>& p)
+ : db (d), package (p) {}
bool
- unhold () const
+ operator== (const config_selected_package& v) const
{
- return (adjustments & adjust_unhold) != 0;
+ return package->name == v.package->name && db == v.db;
}
- // Set if we also need to reconfigure this package. Note that in some
- // cases reconfigure is naturally implied. For example, if an already
- // configured package is being up/down-graded. For such cases we don't
- // guarantee that the reconfigure flag is set. We only make sure to set it
- // for cases that would otherwise miss the need for reconfiguration. As a
- // result, use the reconfigure() predicate which detects both explicit and
- // implied cases.
- //
- // At first, it may seem that this flag is redundant and having the
- // available package set to NULL is sufficient. But consider the case
- // where the user asked us to build a package that is already in the
- // configured state (so all we have to do is pkg-update). Next, add to
- // this a prerequisite package that is being upgraded. Now our original
- // package has to be reconfigured. But without this flag we won't know
- // (available for our package won't be NULL).
- //
- static const uint16_t adjust_reconfigure = 0x0002;
-
bool
- reconfigure () const
+ operator< (const config_selected_package& v) const
{
- assert (action && *action != drop);
-
- return selected != nullptr &&
- selected->state == package_state::configured &&
- ((adjustments & adjust_reconfigure) != 0 ||
- (*action == build &&
- (selected->system () != system ||
- selected->version != available_version () ||
- (!system && !config_vars.empty ()))));
+ int r (package->name.compare (v.package->name));
+ return r != 0 ? (r < 0) : (db < v.db);
}
+ };
- const version&
- available_version () const
- {
- // This should have been diagnosed before creating build_package object.
- //
- assert (available != nullptr &&
- (system
- ? available->system_version () != nullptr
- : !available->stub ()));
+ static evaluate_result
+ evaluate_dependency (const common_options& o,
+ database& db,
+ const shared_ptr<selected_package>& sp,
+ const optional<version_constraint>& dvc,
+ bool dsys,
+ bool existing,
+ database& ddb,
+ const shared_ptr<selected_package>& dsp,
+ optional<bool> upgrade,
+ bool deorphan,
+ bool explicitly,
+ const config_repo_fragments& rfs,
+ const dependent_constraints& dpt_constrs,
+ const existing_dependencies& existing_deps,
+ const deorphaned_dependencies& deorphaned_deps,
+ const build_packages& pkgs,
+ bool ignore_unsatisfiable)
+ {
+ tracer trace ("evaluate_dependency");
- return system ? *available->system_version () : available->version;
- }
+ const package_name& nm (sp->name);
- string
- available_name_version () const
+ auto no_change = [&db] ()
{
- assert (available != nullptr);
- return package_string (available->id.name, available_version (), system);
- }
+ return evaluate_result {db,
+ nullptr /* available */,
+ nullptr /* repository_fragment */,
+ false /* unused */,
+ false /* system */,
+ false /* existing */,
+ nullopt /* upgrade */,
+ nullopt /* orphan */};
+ };
- // Merge constraints, required-by package names, hold_* flags,
- // adjustments, and user-specified options/variables.
+ // Build the list of available packages for the potential up/down-grade
+ // to, in the version-descending order. If patching, then we constrain the
+ // choice with the latest patch version and place no constraints if
+ // upgrading. For a system package we will try to find the available
+ // package that matches the user-specified system version (preferable for
+ // the configuration negotiation machinery) and, if fail, fallback to
+ // picking the latest one just to make sure the package is recognized.
//
- void
- merge (build_package&& p)
+ // But first check if this package is specified as an existing archive or
+ // directory. If that's the case, then only consider its (transient)
+ // available package instead of the above.
+ //
+ bool patch (false);
+ available_packages afs;
+
+ if (existing)
{
- // We don't merge into pre-entered objects, and from/into drops.
+ // By definition such a dependency has a version specified and may not
+ // be system.
//
- assert (action && *action != drop && (!p.action || *p.action != drop));
+ assert (dvc && !dsys);
+
+ pair<shared_ptr<available_package>,
+ lazy_shared_ptr<repository_fragment>> rp (
+ find_existing (ddb, nm, *dvc));
- // Copy the user-specified options/variables.
+ // Must have been added to the existing packages registry.
//
- if (p.user_selection ())
+ assert (rp.first != nullptr);
+
+ afs.push_back (move (rp));
+ }
+ else
+ {
+ optional<version_constraint> c;
+
+ if (!dvc)
{
- // We don't allow a package specified on the command line multiple
- // times to have different sets of options/variables. Given that, it's
- // tempting to assert that the options/variables don't change if we
- // merge into a user selection. That's, however, not the case due to
- // the iterative plan refinement implementation details (--checkout-*
- // options and variables are only saved into the pre-entered
- // dependencies, etc.).
- //
- if (p.keep_out)
- keep_out = p.keep_out;
+ assert (!dsys); // The version can't be empty for the system package.
- if (p.checkout_root)
- checkout_root = move (p.checkout_root);
+ patch = (upgrade && !*upgrade);
- if (p.checkout_purge)
- checkout_purge = p.checkout_purge;
+ if (patch)
+ {
+ c = patch_constraint (sp, ignore_unsatisfiable);
+
+ if (!c)
+ {
+ l5 ([&]{trace << *sp << db << ": non-patchable";});
+ return no_change ();
+ }
+ }
+ }
+ else if (!dsys || !wildcard (*dvc))
+ c = dvc;
- if (!p.config_vars.empty ())
- config_vars = move (p.config_vars);
+ afs = find_available (nm, c, rfs);
- // Propagate the user-selection tag.
- //
- required_by.insert (package_name ());
+ if (afs.empty () && dsys && c)
+ afs = find_available (nm, nullopt, rfs);
+ }
+
+ // In the deorphan mode check that the dependency is an orphan or was
+ // deorphaned on some previous refinement iteration. If that's not the
+ // case, then just disable the deorphan mode for this dependency and, if
+ // the version is not constrained and upgrade/patch is not requested, bail
+ // out indicating that no change is required.
+ //
+ // Note that in the move mode (dsp != sp) we deorphan the dependency in
+ // its destination configuration, if present. In the worst case scenario
+ // both the source and destination selected packages may need to be
+ // deorphaned since the source selected package may also stay if some
+ // dependents were not repointed to the new dependency (remember that the
+ // move mode is actually a copy mode). We, however, have no easy way to
+ // issue recommendations for both the old and the new dependencies at the
+ // moment. Given that in the common case the old dependency get dropped,
+ // let's keep it simple and do nothing about the old dependency and see
+ // how it goes.
+ //
+ const version* deorphaned (nullptr);
+
+ if (deorphan)
+ {
+ bool orphan (dsp != nullptr && !dsp->system () && !dsys);
+
+ if (orphan)
+ {
+ auto i (deorphaned_deps.find (package_key (ddb, nm)));
+
+ if (i == deorphaned_deps.end ())
+ orphan = orphan_package (ddb, dsp);
+ else
+ deorphaned = &i->second;
}
- // Required-by package names have different semantics for different
- // actions: dependent for builds and prerequisite for adjustment. Mixing
- // them would break prompts/diagnostics, so we copy them only if actions
- // match.
- //
- if (p.action && *p.action == *action)
- required_by.insert (p.required_by.begin (), p.required_by.end ());
+ if (!orphan)
+ {
+ if (!dvc && !upgrade)
+ {
+ l5 ([&]{trace << *sp << db << ": non-orphan";});
+ return no_change ();
+ }
- // Copy constraints.
- //
- // Note that we may duplicate them, but this is harmless.
- //
- constraints.insert (constraints.end (),
- make_move_iterator (p.constraints.begin ()),
- make_move_iterator (p.constraints.end ()));
+ deorphan = false;
+ }
+ }
- // Copy hold_* flags if they are "stronger".
- //
- if (!hold_package || (p.hold_package && *p.hold_package > *hold_package))
- hold_package = p.hold_package;
+ // Go through up/down-grade candidates and pick the first one that
+ // satisfies all the dependents. In the deorphan mode if the package
+ // version is not constrained and upgrade/patch is not requested, then
+ // pick the version that matches the dependency version best (see the
+ // function description for details). Collect (and sort) unsatisfied
+ // dependents per the unsatisfiable version in case we need to print them.
+ //
+ // NOTE: don't forget to update the find_orphan_match() lambda and the
+ // try_replace_dependency() function if changing anything deorphan-related
+ // here.
+ //
+ using sp_set = set<config_selected_package>;
- if (!hold_version || (p.hold_version && *p.hold_version > *hold_version))
- hold_version = p.hold_version;
+ vector<pair<version, sp_set>> unsatisfiable;
- // Copy adjustments flags.
- //
- adjustments |= p.adjustments;
+ bool stub (false);
+
+ assert (!dsys ||
+ (ddb.system_repository &&
+ ddb.system_repository->find (nm) != nullptr));
- // Note that we don't copy the build_package::system flag. If it was
- // set from the command line ("strong system") then we will also have
- // the '==' constraint which means that this build_package object will
- // never be replaced.
+ // Version to deorphan (original orphan version).
+ //
+ const version* dov (deorphaned != nullptr ? deorphaned :
+ deorphan ? &dsp->version :
+ nullptr);
+
+ optional<version_constraint> dopc; // Patch constraint for the above.
+ optional<version_constraint> domc; // Minor constraint for the above.
+
+ bool orphan_best_match (deorphan && !dvc && !upgrade);
+
+ if (orphan_best_match)
+ {
+ // Note that non-zero iteration makes a version non-standard, so we
+ // reset it to 0 to produce the patch/minor constraints.
//
- // For other cases ("weak system") we don't want to copy system over in
- // order not prevent, for example, system to non-system upgrade.
+ version v (dov->epoch,
+ dov->upstream,
+ dov->release,
+ dov->revision,
+ 0 /* iteration */);
+
+ dopc = patch_constraint (nm, v, true /* quiet */);
+ domc = minor_constraint (nm, v, true /* quiet */);
}
- };
- using build_package_list = list<reference_wrapper<build_package>>;
+ using available = pair<shared_ptr<available_package>,
+ lazy_shared_ptr<repository_fragment>>;
- struct build_packages: build_package_list
- {
- // Packages collection of whose prerequisites has been postponed due the
- // inability to find a version satisfying the pre-entered constraint from
- // repositories available to this package. The idea is that this
- // constraint could still be satisfied from a repository fragment of some
- // other package (that we haven't processed yet) that also depends on this
- // prerequisite.
- //
- using postponed_packages = set<const build_package*>;
+ available deorphan_latest_iteration;
+ available deorphan_later_revision;
+ available deorphan_later_patch;
+ available deorphan_later_minor;
+ available deorphan_latest_available;
- // Pre-enter a build_package without an action. No entry for this package
- // may already exists.
+ // If the dependency is deorphaned to the same version as on the previous
+ // call, then return the "no change" result. Otherwise, return the
+ // deorphan result.
//
- void
- enter (package_name name, build_package pkg)
+ auto deorphan_result = [&sp, &db,
+ &ddb, &dsp,
+ dsys,
+ deorphaned, dov,
+ existing,
+ upgrade,
+ &no_change,
+ &trace] (available&& a, const char* what)
{
- assert (!pkg.action);
+ if (deorphaned != nullptr && dsp->version == a.first->version)
+ {
+ l5 ([&]{trace << *sp << db << ": already deorphaned";});
+ return no_change ();
+ }
- auto p (map_.emplace (move (name), data_type {end (), move (pkg)}));
- assert (p.second);
- }
+ l5 ([&]{trace << *sp << db << ": deorphan to " << what << ' '
+ << package_string (sp->name, a.first->version)
+ << ddb;});
- // Collect the package being built. Return its pointer if this package
- // version was, in fact, added to the map and NULL if it was already there
- // or the existing version was preferred. So can be used as bool.
- //
- build_package*
- collect_build (const common_options& options,
- const dir_path& cd,
- database& db,
- build_package pkg,
- postponed_packages* recursively = nullptr)
+ return evaluate_result {
+ ddb, move (a.first), move (a.second),
+ false /* unused */,
+ dsys,
+ existing,
+ upgrade,
+ *dov};
+ };
+
+ auto build_result = [&ddb, dsys, existing, upgrade] (available&& a)
{
- using std::swap; // ...and not list::swap().
+ return evaluate_result {
+ ddb, move (a.first), move (a.second),
+ false /* unused */,
+ dsys,
+ existing,
+ upgrade,
+ nullopt /* orphan */};
+ };
- tracer trace ("collect_build");
+ // Note that if the selected dependency is the best that we can get, we
+ // normally issue the "no change" recommendation. However, if the
+ // configuration variables are specified for this dependency on the
+ // command line, then we issue the "reconfigure" recommendation instead.
+ //
+ // Return true, if the already selected dependency has been specified on
+ // the command line with the configuration variables, but has not yet been
+ // built on this pkg-build run.
+ //
+ auto reconfigure = [&ddb, &dsp, &nm, dsys, &pkgs] ()
+ {
+ assert (dsp != nullptr);
- // Only builds are allowed here.
- //
- assert (pkg.action && *pkg.action == build_package::build &&
- pkg.available != nullptr);
+ if (!dsys)
+ {
+ const build_package* p (pkgs.entered_build (ddb, nm));
+ return p != nullptr && !p->action && !p->config_vars.empty ();
+ }
+ else
+ return false;
+ };
- auto i (map_.find (pkg.available->id.name));
+ for (available& af: afs)
+ {
+ shared_ptr<available_package>& ap (af.first);
+ const version& av (!dsys ? ap->version : *ap->system_version (ddb));
- // If we already have an entry for this package name, then we
- // have to pick one over the other.
+ // If we aim to upgrade to the latest version and it tends to be less
+ // then the selected one, then what we currently have is the best that
+ // we can get, and so we return the "no change" result, unless we are
+ // deorphaning.
//
- // If the existing entry is a pre-entered or is non-build one, then we
- // merge it into the new build entry. Otherwise (both are builds), we
- // pick one and merge the other into it.
+ // Note that we also handle a package stub here.
//
- if (i != map_.end ())
+ if (!dvc && dsp != nullptr && av < dsp->version)
{
- build_package& bp (i->second.package);
+ assert (!dsys); // Version can't be empty for the system package.
- // Can't think of the scenario when this happens. We would start
- // collecting from scratch (see below).
+ // For the selected system package we still need to pick a source
+ // package version to downgrade to.
//
- assert (!bp.action || *bp.action != build_package::drop);
+ if (!dsp->system () && !deorphan)
+ {
+ if (reconfigure ())
+ {
+ l5 ([&]{trace << *dsp << ddb << ": reconfigure (best)";});
+ return build_result (find_available_fragment (o, ddb, dsp));
+ }
+ else
+ {
+ l5 ([&]{trace << *dsp << ddb << ": best";});
+ return no_change ();
+ }
+ }
- if (!bp.action || *bp.action != build_package::build) // Non-build.
+ // We can not upgrade the package to a stub version, so just skip it.
+ //
+ if (ap->stub ())
{
- pkg.merge (move (bp));
- bp = move (pkg);
+ stub = true;
+ continue;
}
- else // Build.
+ }
+
+ // Check if the version satisfies all the dependents and collect
+ // unsatisfied ones.
+ //
+ bool satisfactory (true);
+ sp_set unsatisfied_dependents;
+
+ for (const auto& dp: dpt_constrs)
+ {
+ if (!satisfies (av, dp.constraint))
{
- // At the end we want p1 to point to the object that we keep
- // and p2 to the object that we merge from.
+ satisfactory = false;
+
+ // Continue to collect dependents of the unsatisfiable version if
+ // we need to print them before failing.
//
- build_package* p1 (&bp);
- build_package* p2 (&pkg);
+ if (ignore_unsatisfiable)
+ break;
- if (p1->available_version () != p2->available_version ())
- {
- using constraint_type = build_package::constraint_type;
+ unsatisfied_dependents.emplace (dp.db, dp.package);
+ }
+ }
- // If the versions differ, we have to pick one. Start with the
- // newest version since if both satisfy, then that's the one we
- // should prefer. So get the first to try into p1 and the second
- // to try -- into p2.
- //
- if (p2->available_version () > p1->available_version ())
- swap (p1, p2);
+ if (!satisfactory)
+ {
+ if (!ignore_unsatisfiable)
+ unsatisfiable.emplace_back (av, move (unsatisfied_dependents));
- // See if pv's version satisfies pc's constraints. Return the
- // pointer to the unsatisfied constraint or NULL if all are
- // satisfied.
- //
- auto test = [] (build_package* pv,
- build_package* pc) -> const constraint_type*
- {
- for (const constraint_type& c: pc->constraints)
- {
- if (!satisfies (pv->available_version (), c.value))
- return &c;
- }
+ // If the dependency is expected to be configured as system, then bail
+ // out, as an available package version will always resolve to the
+ // system one (see above).
+ //
+ if (dsys)
+ break;
- return nullptr;
- };
+ continue;
+ }
- // First see if p1 satisfies p2's constraints.
- //
- if (auto c2 = test (p1, p2))
- {
- // If not, try the other way around.
- //
- if (auto c1 = test (p2, p1))
- {
- const package_name& n (i->first);
- const string& d1 (c1->dependent);
- const string& d2 (c2->dependent);
-
- fail << "unable to satisfy constraints on package " << n <<
- info << d1 << " depends on (" << n << " " << c1->value
- << ")" <<
- info << d2 << " depends on (" << n << " " << c2->value
- << ")" <<
- info << "available " << p1->available_name_version () <<
- info << "available " << p2->available_name_version () <<
- info << "explicitly specify " << n << " version to manually "
- << "satisfy both constraints";
- }
- else
- swap (p1, p2);
- }
+ if (orphan_best_match)
+ {
+ // If the exact orphan version is encountered, then we are done.
+ //
+ if (av == *dov)
+ return deorphan_result (move (af), "exactly same version");
- l4 ([&]{trace << "pick " << p1->available_name_version ()
- << " over " << p2->available_name_version ();});
- }
- // If versions are the same, then we still need to pick the entry as
- // one of them can build a package from source while another
- // configure a system package. We prefer a user-selected entry (if
- // there is one). If none of them is user-selected we prefer a
- // source package over a system one.
- //
- else if (p2->user_selection () ||
- (!p1->user_selection () && !p2->system))
- swap (p1, p2);
-
- // See if we are replacing the object. If not, then we don't
- // need to collect its prerequisites since that should have
- // already been done. Remember, p1 points to the object we
- // want to keep.
- //
- bool replace (p1 != &i->second.package);
+ // If the available package is of the same revision as orphan but a
+ // different iteration, then save it as the latest iteration of same
+ // orphan version and revision.
+ //
+ if (deorphan_latest_iteration.first == nullptr &&
+ av.compare (*dov, false /* revision */, true /* iteration */) == 0)
+ deorphan_latest_iteration = af;
- if (replace)
- {
- swap (*p1, *p2);
- swap (p1, p2); // Setup for merge below.
- }
+ // If the available package is of the same version as orphan and its
+ // revision is greater, then save it as the later revision of same
+ // version.
+ //
+ if (deorphan_later_revision.first == nullptr &&
+ av.compare (*dov, true /* revision */) == 0 &&
+ av.compare (*dov, false /* revision */, true /* iteration */) > 0)
+ deorphan_later_revision = af;
+
+ // If the available package is of the same minor version as orphan but
+ // of the greater patch version, then save it as the later patch of
+ // same version.
+ //
+ if (deorphan_later_patch.first == nullptr &&
+ dopc && satisfies (av, *dopc) &&
+ av.compare (*dov, true /* revision */) > 0) // Patch is greater?
+ deorphan_later_patch = af;
+
+ // If the available package is of the same major version as orphan but
+ // of the greater minor version, then save it as the later minor of
+ // same version.
+ //
+ // Note that we can't easily compare minor versions here since these
+ // are bpkg version objects. Thus, we consider that this is a greater
+ // minor version if the version is greater (ignoring revisions) and
+ // the latest patch is not yet saved.
+ //
+ if (deorphan_later_minor.first == nullptr &&
+ domc && satisfies (av, *domc) &&
+ av.compare (*dov, true /* revision */) > 0 &&
+ deorphan_later_patch.first == nullptr)
+ deorphan_later_minor = af;
- p1->merge (move (*p2));
+ // Save the latest available package version.
+ //
+ if (deorphan_latest_available.first == nullptr)
+ deorphan_latest_available = move (af);
- if (!replace)
- return nullptr;
+ // If the available package version is less then the orphan revision
+ // then we can bail out from the loop, since all the versions from the
+ // preference list have already been encountered, if present.
+ //
+ if (av.compare (*dov, false /* revision */, true /* iteration */) < 0)
+ {
+ assert (deorphan_latest_iteration.first != nullptr ||
+ deorphan_later_revision.first != nullptr ||
+ deorphan_later_patch.first != nullptr ||
+ deorphan_later_minor.first != nullptr ||
+ deorphan_latest_available.first != nullptr);
+ break;
}
}
else
{
- // This is the first time we are adding this package name to the map.
+ // In the up/downgrade+deorphan mode always replace the dependency,
+ // re-fetching it from an existing repository if the version stays the
+ // same.
//
- l4 ([&]{trace << "add " << pkg.available_name_version ();});
+ if (deorphan)
+ return deorphan_result (move (af), "constrained version");
- // Note: copy; see emplace() below.
+ // For the regular up/downgrade if the best satisfactory version and
+ // the desired system flag perfectly match the ones of the selected
+ // package, then no package change is required. Otherwise, recommend
+ // an upgrade/downgrade/replacement.
+ //
+ // Note: we need to be careful here not to start yo-yo'ing for a
+ // dependency being built as an existing archive or directory. For
+ // such a dependency we need to return the "no change" recommendation
+ // if any version recommendation (which may not change) has already
+ // been returned.
//
- package_name n (pkg.available->id.name);
- i = map_.emplace (move (n), data_type {end (), move (pkg)}).first;
+ if (dsp != nullptr &&
+ av == dsp->version &&
+ dsp->system () == dsys &&
+ (!existing ||
+ find (existing_deps.begin (), existing_deps.end (),
+ package_key (ddb, nm)) != existing_deps.end ()))
+ {
+ if (reconfigure ())
+ {
+ l5 ([&]{trace << *dsp << ddb << ": reconfigure";});
+ return build_result (move (af));
+ }
+ else
+ {
+ l5 ([&]{trace << *dsp << ddb << ": unchanged";});
+ return no_change ();
+ }
+ }
+ else
+ {
+ l5 ([&]{trace << *sp << db << ": update to "
+ << package_string (nm, av, dsys) << ddb;});
+
+ return build_result (move (af));
+ }
}
+ }
+
+ if (orphan_best_match)
+ {
+ if (deorphan_latest_iteration.first != nullptr)
+ return deorphan_result (move (deorphan_latest_iteration),
+ "latest iteration");
- build_package& p (i->second.package);
+ if (deorphan_later_revision.first != nullptr)
+ return deorphan_result (move (deorphan_later_revision),
+ "later revision");
- // Recursively collect build prerequisites, if requested.
- //
- // Note that detecting dependency cycles during the satisfaction phase
- // would be premature since they may not be present in the final package
- // list. Instead we check for them during the ordering phase.
- //
- // The question, of course, is whether we can still end up with an
- // infinite recursion here? Note that for an existing map entry we only
- // recurse after the entry replacement. The infinite recursion would
- // mean that we may replace a package in the map with the same version
- // multiple times:
- //
- // ... p1 -> p2 -> ... p1
- //
- // Every replacement increases the entry version and/or tightens the
- // constraints the next replacement will need to satisfy. It feels
- // impossible that a package version can "return" into the map being
- // replaced once. So let's wait until some real use case proves this
- // reasoning wrong.
- //
- if (recursively != nullptr)
- collect_build_prerequisites (options, cd, db, p, recursively);
+ if (deorphan_later_patch.first != nullptr)
+ return deorphan_result (move (deorphan_later_patch), "later patch");
- return &p;
+ if (deorphan_later_minor.first != nullptr)
+ return deorphan_result (move (deorphan_later_minor), "later minor");
+
+ if (deorphan_latest_available.first != nullptr)
+ return deorphan_result (move (deorphan_latest_available),
+ "latest available");
}
- // Collect prerequisites of the package being built recursively. But first
- // "prune" this process if the package we build is a system one or is
- // already configured since that would mean all its prerequisites are
- // configured as well. Note that this is not merely an optimization: the
- // package could be an orphan in which case the below logic will fail (no
- // repository fragment in which to search for prerequisites). By skipping
- // the prerequisite check we are able to gracefully handle configured
- // orphans.
+ // If we aim to upgrade to the latest version, then what we currently have
+ // is the only thing that we can get, and so returning the "no change"
+ // result, unless we need to upgrade a package configured as system or to
+ // deorphan.
//
- void
- collect_build_prerequisites (const common_options& options,
- const dir_path& cd,
- database& db,
- const build_package& pkg,
- postponed_packages* postponed)
+ if (!dvc && dsp != nullptr && !dsp->system () && !deorphan)
{
- tracer trace ("collect_build_prerequisites");
-
- assert (pkg.action && *pkg.action == build_package::build);
+ assert (!dsys); // Version cannot be empty for the system package.
- const shared_ptr<selected_package>& sp (pkg.selected);
+ if (reconfigure ())
+ {
+ l5 ([&]{trace << *dsp << ddb << ": reconfigure (only)";});
+ return build_result (find_available_fragment (o, ddb, dsp));
+ }
+ else
+ {
+ l5 ([&]{trace << *dsp << ddb << ": only";});
+ return no_change ();
+ }
+ }
- if (pkg.system ||
- (sp != nullptr &&
- sp->state == package_state::configured &&
- sp->substate != package_substate::system &&
- sp->version == pkg.available_version ()))
- return;
+ // If the version satisfying the desired dependency version constraint is
+ // unavailable or unsatisfiable for some dependents then we fail, unless
+ // requested not to do so. In the latter case we return the "no change"
+ // result.
+ //
+ if (ignore_unsatisfiable)
+ {
+ l5 ([&]{trace << package_string (nm, dvc, dsys) << ddb
+ << (unsatisfiable.empty ()
+ ? ": no source"
+ : ": unsatisfiable");});
- // Show how we got here if things go wrong.
- //
- auto g (
- make_exception_guard (
- [&pkg] ()
- {
- info << "while satisfying " << pkg.available_name_version ();
- }));
+ return no_change ();
+ }
- const shared_ptr<available_package>& ap (pkg.available);
- const shared_ptr<repository_fragment>& af (pkg.repository_fragment);
- const package_name& name (ap->id.name);
+ // If there are no unsatisfiable versions then the package is not present
+ // (or is not available in source) in its dependents' repositories.
+ //
+ if (unsatisfiable.empty ())
+ {
+ diag_record dr (fail);
- for (const dependency_alternatives_ex& da: ap->dependencies)
+ if (patch)
{
- if (da.conditional) // @@ TODO
- fail << "conditional dependencies are not yet supported";
+ // Otherwise, we should have bailed out earlier returning "no change"
+ // (see above).
+ //
+ assert (dsp != nullptr && (dsp->system () || deorphan));
- if (da.size () != 1) // @@ TODO
- fail << "multiple dependency alternatives not yet supported";
+ // Patch (as any upgrade) of a system package is always explicit, so
+ // we always fail and never treat the package as being up to date.
+ //
+ assert (explicitly);
- const dependency& dp (da.front ());
- const package_name& dn (dp.name);
+ fail << "patch version for " << *sp << db << " is not available "
+ << "from its dependents' repositories";
+ }
+ else if (!stub)
+ fail << package_string (nm, dsys ? nullopt : dvc) << ddb
+ << " is not available from its dependents' repositories";
+ else // The only available package is a stub.
+ {
+ // Otherwise, we should have bailed out earlier, returning "no change"
+ // rather then setting the stub flag to true (see above).
+ //
+ assert (!dvc && !dsys && dsp != nullptr && (dsp->system () || deorphan));
- if (da.buildtime)
- {
- // Handle special names.
- //
- if (dn == "build2")
- {
- if (dp.constraint)
- satisfy_build2 (options, name, dp);
+ fail << package_string (nm, dvc) << ddb << " is not available in "
+ << "source from its dependents' repositories";
+ }
+ }
- continue;
- }
- else if (dn == "bpkg")
- {
- if (dp.constraint)
- satisfy_bpkg (options, name, dp);
+ // Issue the diagnostics and fail.
+ //
+ diag_record dr (fail);
+ dr << "package " << nm << ddb << " doesn't satisfy its dependents";
- continue;
- }
- // else
- //
- // @@ TODO: in the future we would need to at least make sure the
- // build and target machines are the same. See also pkg-configure.
- }
+ // Print the list of unsatisfiable versions together with dependents they
+ // don't satisfy: up to three latest versions with no more than five
+ // dependents each.
+ //
+ size_t nv (0);
+ for (const auto& u: unsatisfiable)
+ {
+ dr << info << package_string (nm, u.first) << " doesn't satisfy";
- bool system (false);
- bool dep_optional (false);
+ const sp_set& ps (u.second);
- // If the user specified the desired dependency version constraint,
- // then we will use it to overwrite the constraint imposed by the
- // dependent package, checking that it is still satisfied.
- //
- // Note that we can't just rely on the execution plan refinement that
- // will pick up the proper dependency version at the end of the day.
- // We may just not get to the plan execution simulation, failing due
- // to inability for dependency versions collected by two dependents to
- // satisfy each other constraints (for an example see the
- // pkg-build/dependency/apply-constraints/resolve-conflict{1,2}
- // tests).
-
- // Points to the desired dependency version constraint, if specified,
- // and is NULL otherwise. Can be used as boolean flag.
+ size_t i (0), n (ps.size ());
+ for (auto p (ps.begin ()); i != n; ++p)
+ {
+ // It would probably be nice to also print the unsatisfied constraint
+ // here, but let's keep it simple for now.
//
- const version_constraint* dep_constr (nullptr);
-
- auto i (map_.find (dn));
- if (i != map_.end ())
- {
- const build_package& bp (i->second.package);
-
- dep_optional = !bp.action; // Is pre-entered.
+ dr << (i == 0 ? " " : ", ") << *p->package << p->db;
- if (dep_optional &&
- //
- // The version constraint is specified,
- //
- bp.hold_version && *bp.hold_version)
- {
- assert (bp.constraints.size () == 1);
+ if (++i == 5 && n != 6) // Printing 'and 1 more' looks stupid.
+ break;
+ }
- const build_package::constraint_type& c (bp.constraints[0]);
+ if (i != n)
+ dr << " and " << n - i << " more";
- dep_constr = &c.value;
- system = bp.system;
+ if (++nv == 3 && unsatisfiable.size () != 4)
+ break;
+ }
- // If the user-specified dependency constraint is the wildcard
- // version, then it satisfies any dependency constraint.
- //
- if (!wildcard (*dep_constr) &&
- !satisfies (*dep_constr, dp.constraint))
- fail << "unable to satisfy constraints on package " << dn <<
- info << name << " depends on (" << dn << " "
- << *dp.constraint << ")" <<
- info << c.dependent << " depends on (" << dn << " "
- << c.value << ")" <<
- info << "specify " << dn << " version to satisfy " << name
- << " constraint";
- }
- }
+ if (nv != unsatisfiable.size ())
+ dr << info << "and " << unsatisfiable.size () - nv << " more";
- const dependency& d (!dep_constr
- ? dp
- : dependency {dn, *dep_constr});
+ dr << endf;
+ }
- // First see if this package is already selected. If we already have
- // it in the configuraion and it satisfies our dependency version
- // constraint, then we don't want to be forcing its upgrade (or,
- // worse, downgrade).
- //
- shared_ptr<selected_package> dsp (db.find<selected_package> (dn));
+ // List of dependent packages whose immediate/recursive dependencies must be
+ // upgraded and/or deorphaned (specified with -i/-r on the command line).
+ //
+ struct recursive_package
+ {
+ database& db;
+ package_name name;
- pair<shared_ptr<available_package>,
- shared_ptr<repository_fragment>> rp;
+ // Recursive/immediate upgrade/patch. Note the upgrade member is only
+ // meaningful if recursive is present.
+ //
+ optional<bool> recursive; // true -- recursive, false -- immediate.
+ bool upgrade; // true -- upgrade, false -- patch.
- shared_ptr<available_package>& dap (rp.first);
+ // Recursive/immediate deorphaning.
+ //
+ optional<bool> deorphan; // true -- recursive, false -- immediate.
+ };
+ using recursive_packages = vector<recursive_package>;
- bool force (false);
+ // Recursively check if immediate dependencies of this dependent must be
+ // upgraded or patched and/or deorphaned.
+ //
+ // Cache the results of this function calls to avoid multiple traversals of
+ // the same dependency graphs.
+ //
+ struct upgrade_dependencies_key
+ {
+ package_key dependent;
+ bool recursion;
- if (dsp != nullptr)
- {
- if (dsp->state == package_state::broken)
- fail << "unable to build broken package " << dn <<
- info << "use 'pkg-purge --force' to remove";
+ bool
+ operator< (const upgrade_dependencies_key& v) const
+ {
+ if (recursion != v.recursion)
+ return recursion < v.recursion;
- // If the constraint is imposed by the user we also need to make sure
- // that the system flags are the same.
- //
- if (satisfies (dsp->version, d.constraint) &&
- (!dep_constr || dsp->system () == system))
- {
- system = dsp->system ();
+ return dependent < v.dependent;
+ }
+ };
- // First try to find an available package for this exact version.
- // In particular, this handles the case where a package moves from
- // one repository to another (e.g., from testing to stable). For a
- // system package we pick the latest one (its exact version
- // doesn't really matter).
- //
- shared_ptr<repository_fragment> root (
- db.load<repository_fragment> (""));
-
- rp = system
- ? find_available_one (db, dn, nullopt, root)
- : find_available_one (db,
- dn,
- version_constraint (dsp->version),
- root);
-
- // A stub satisfies any version constraint so we weed them out
- // (returning stub as an available package feels wrong).
- //
- if (dap == nullptr || dap->stub ())
- rp = make_available (options, cd, db, dsp);
- }
- else
- // Remember that we may be forcing up/downgrade; we will deal with
- // it below.
- //
- force = true;
- }
+ struct upgrade_deorphan
+ {
+ optional<bool> upgrade; // true -- upgrade, false -- patch.
+ bool deorphan;
+ };
- // If we didn't get the available package corresponding to the
- // selected package, look for any that satisfies the constraint.
- //
- if (dap == nullptr)
- {
- // And if we have no repository fragment to look in, then that means
- // the package is an orphan (we delay this check until we actually
- // need the repository fragment to allow orphans without
- // prerequisites).
- //
- if (af == nullptr)
- fail << "package " << pkg.available_name_version ()
- << " is orphaned" <<
- info << "explicitly upgrade it to a new version";
-
- // We look for prerequisites only in the repositories of this
- // package (and not in all the repositories of this configuration).
- // At first this might look strange, but it also kind of makes
- // sense: we only use repositories "approved" for this package
- // version. Consider this scenario as an example: hello/1.0.0 and
- // libhello/1.0.0 in stable and libhello/2.0.0 in testing. As a
- // prerequisite of hello, which version should libhello resolve to?
- // While one can probably argue either way, resolving it to 1.0.0 is
- // the conservative choice and the user can always override it by
- // explicitly building libhello.
- //
- // Note though, that if this is a test package, then its special
- // test dependencies (main packages that refer to it) should be
- // searched upstream through the complement repositories
- // recursively, since the test packages may only belong to the main
- // package's repository and its complements.
- //
- // @@ Currently we don't implement the reverse direction search for
- // the test dependencies, effectively only supporting the common
- // case where the main and test packages belong to the same
- // repository. Will need to fix this eventually.
- //
- // Note that this logic (naturally) does not apply if the package is
- // already selected by the user (see above).
- //
- // Also note that for the user-specified dependency version
- // constraint we rely on the satisfying package version be present
- // in repositories of the first dependent met. As a result, we may
- // fail too early if such package version doesn't belong to its
- // repositories, but belongs to the ones of some dependent that
- // we haven't met yet. Can we just search all repositories for an
- // available package of the appropriate version and just take it,
- // if present? We could, but then which repository should we pick?
- // The wrong choice can introduce some unwanted repositories and
- // package versions into play. So instead, we will postpone
- // collecting the problematic dependent, expecting that some other
- // one will find the appropriate version in its repositories.
- //
- // For a system package we pick the latest version just to make sure
- // the package is recognized. An unrecognized package means the
- // broken/stale repository (see below).
- //
- rp = find_available_one (db,
- dn,
- !system ? d.constraint : nullopt,
- af);
+ using upgrade_dependencies_cache = map<upgrade_dependencies_key,
+ upgrade_deorphan>;
- if (dap == nullptr)
- {
- if (dep_constr && !system && postponed)
- {
- postponed->insert (&pkg);
- return;
- }
+ static upgrade_deorphan
+ upgrade_dependencies (database& db,
+ const package_name& nm,
+ const recursive_packages& rs,
+ upgrade_dependencies_cache& cache,
+ bool recursion = false)
+ {
+ // If the result of the upgrade_dependencies() call for these dependent
+ // and recursion flag value is cached, then return that. Otherwise, cache
+ // the calculated result prior to returning it to the caller.
+ //
+ upgrade_dependencies_key k {package_key (db, nm), recursion};
+ {
+ auto i (cache.find (k));
- diag_record dr (fail);
- dr << "unknown dependency " << dn;
+ if (i != cache.end ())
+ return i->second;
+ }
- // We need to be careful not to print the wildcard-based
- // constraint.
- //
- if (d.constraint && (!dep_constr || !wildcard (*dep_constr)))
- dr << ' ' << *d.constraint;
+ auto i (find_if (rs.begin (), rs.end (),
+ [&nm, &db] (const recursive_package& i) -> bool
+ {
+ return i.name == nm && i.db == db;
+ }));
- dr << " of package " << name;
+ upgrade_deorphan r {nullopt /* upgrade */, false /* deorphan */};
- if (!af->location.empty () && (!dep_constr || system))
- dr << info << "repository " << af->location << " appears to "
- << "be broken" <<
- info << "or the repository state could be stale" <<
- info << "run 'bpkg rep-fetch' to update";
- }
+ if (i != rs.end ())
+ {
+ if (i->recursive && *i->recursive >= recursion)
+ r.upgrade = i->upgrade;
- // If all that's available is a stub then we need to make sure the
- // package is present in the system repository and it's version
- // satisfies the constraint. If a source package is available but
- // there is a system package specified on the command line and it's
- // version satisfies the constraint then the system package should
- // be preferred. To recognize such a case we just need to check if
- // the authoritative system version is set and it satisfies the
- // constraint. If the corresponding system package is non-optional
- // it will be preferred anyway.
- //
- if (dap->stub ())
- {
- // Note that the constraint can safely be printed as it can't
- // be a wildcard (produced from the user-specified dependency
- // version constraint). If it were, then the system version
- // wouldn't be NULL and would satisfy itself.
- //
- if (dap->system_version () == nullptr)
- fail << "dependency " << d << " of package " << name << " is "
- << "not available in source" <<
- info << "specify ?sys:" << dn << " if it is available from "
- << "the system";
-
- if (!satisfies (*dap->system_version (), d.constraint))
- fail << "dependency " << d << " of package " << name << " is "
- << "not available in source" <<
- info << package_string (dn,
- *dap->system_version (),
- true /* system */)
- << " does not satisfy the constrains";
-
- system = true;
- }
- else
- {
- auto p (dap->system_version_authoritative ());
+ if (i->deorphan && *i->deorphan >= recursion)
+ r.deorphan = true;
- if (p.first != nullptr &&
- p.second && // Authoritative.
- satisfies (*p.first, d.constraint))
- system = true;
- }
- }
+ // If we both upgrade and deorphan, then we can bail out since the value
+ // may not change any further (upgrade wins over patch and deorphaning
+ // can't be canceled).
+ //
+ if (r.upgrade && *r.upgrade && r.deorphan)
+ {
+ cache[move (k)] = r;
+ return r;
+ }
+ }
- build_package bp {
- build_package::build,
- dsp,
- dap,
- rp.second,
- nullopt, // Hold package.
- nullopt, // Hold version.
- {}, // Constraints.
- system,
- false, // Keep output directory.
- nullopt, // Checkout root.
- false, // Checkout purge.
- strings (), // Configuration variables.
- {name}, // Required by (dependent).
- 0}; // Adjustments.
-
- // Add our constraint, if we have one.
- //
- // Note that we always add the constraint implied by the dependent. The
- // user-implied constraint, if present, will be added when merging from
- // the pre-entered entry. So we will have both constraints for
- // completeness.
- //
- if (dp.constraint)
- bp.constraints.emplace_back (name.string (), *dp.constraint);
-
- // Now collect this prerequisite. If it was actually collected
- // (i.e., it wasn't already there) and we are forcing a downgrade or
- // upgrade, then refuse for a held version, warn for a held package,
- // and print the info message otherwise, unless the verbosity level is
- // less than two.
- //
- // Note though that while the prerequisite was collected it could have
- // happen because it is an optional package and so not being
- // pre-collected earlier. Meanwhile the package was specified
- // explicitly and we shouldn't consider that as a dependency-driven
- // up/down-grade enforcement.
- //
- // Here is an example of the situation we need to handle properly:
- //
- // repo: foo/2(->bar/2), bar/0+1
- // build sys:bar/1
- // build foo ?sys:bar/2
+ for (database& ddb: db.dependent_configs ())
+ {
+ for (auto& pd: query_dependents_cache (ddb, nm, db))
+ {
+ // Note that we cannot end up with an infinite recursion for
+ // configured packages due to a dependency cycle (see order() for
+ // details).
//
- const build_package* p (
- collect_build (options, cd, db, move (bp), postponed));
+ upgrade_deorphan ud (
+ upgrade_dependencies (ddb, pd.name, rs, cache, true /* recursion */));
- if (p != nullptr && force && !dep_optional)
+ if (ud.upgrade || ud.deorphan)
{
- // Fail if the version is held. Otherwise, warn if the package is
- // held.
+ // Upgrade wins over patch.
//
- bool f (dsp->hold_version);
- bool w (!f && dsp->hold_package);
-
- if (f || w || verb >= 2)
- {
- const version& av (p->available_version ());
+ if (ud.upgrade && (!r.upgrade || *r.upgrade < *ud.upgrade))
+ r.upgrade = *ud.upgrade;
- bool u (av > dsp->version);
- bool c (d.constraint);
+ if (ud.deorphan)
+ r.deorphan = true;
- diag_record dr;
-
- (f ? dr << fail :
- w ? dr << warn :
- dr << info)
- << "package " << name << " dependency on "
- << (c ? "(" : "") << d << (c ? ")" : "") << " is forcing "
- << (u ? "up" : "down") << "grade of " << *dsp << " to ";
-
- // Print both (old and new) package names in full if the system
- // attribution changes.
- //
- if (dsp->system ())
- dr << p->available_name_version ();
- else
- dr << av; // Can't be a system version so is never wildcard.
-
- if (dsp->hold_version)
- dr << info << "package version " << *dsp << " is held";
-
- if (f)
- dr << info << "explicitly request version "
- << (u ? "up" : "down") << "grade to continue";
+ // If we both upgrade and deorphan, then we can bail out (see above
+ // for details).
+ //
+ if (r.upgrade && *r.upgrade && r.deorphan)
+ {
+ cache[move (k)] = r;
+ return r;
}
}
}
}
- // Collect the package being dropped.
- //
- void
- collect_drop (shared_ptr<selected_package> sp)
- {
- const package_name& nm (sp->name);
-
- build_package p {
- build_package::drop,
- move (sp),
- nullptr,
- nullptr,
- nullopt, // Hold package.
- nullopt, // Hold version.
- {}, // Constraints.
- false, // System package.
- false, // Keep output directory.
- nullopt, // Checkout root.
- false, // Checkout purge.
- strings (), // Configuration variables.
- {}, // Required by.
- 0}; // Adjustments.
-
- auto i (map_.find (nm));
-
- if (i != map_.end ())
- {
- build_package& bp (i->second.package);
+ cache[move (k)] = r;
+ return r;
+ }
- // Can't think of the scenario when this happens. We would start
- // collecting from scratch (see below).
- //
- assert (!bp.action || *bp.action != build_package::build);
+ // Evaluate a package (not necessarily dependency) and return a new desired
+ // version. If the result is absent (nullopt), then no changes to the
+ // package are necessary. Otherwise, the result is available_package to
+ // upgrade/downgrade to or replace with, as well as the repository fragment
+ // it must come from.
+ //
+ // If the system package cannot be upgraded to the source one, not being
+ // found in the dependents repositories, then return nullopt if
+ // ignore_unsatisfiable argument is true and fail otherwise (see the
+ // evaluate_dependency() function description for details).
+ //
+ static optional<evaluate_result>
+ evaluate_recursive (const common_options& o,
+ database& db,
+ const shared_ptr<selected_package>& sp,
+ const recursive_packages& recs,
+ const existing_dependencies& existing_deps,
+ const deorphaned_dependencies& deorphaned_deps,
+ const build_packages& pkgs,
+ bool ignore_unsatisfiable,
+ upgrade_dependencies_cache& cache)
+ {
+ tracer trace ("evaluate_recursive");
- // Overwrite the existing (possibly pre-entered or adjustment) entry.
- //
- bp = move (p);
- }
- else
- map_.emplace (nm, data_type {end (), move (p)});
- }
+ assert (sp != nullptr);
- // Collect the package being unheld.
+ // Build a set of repository fragment the dependent packages come from.
+ // Also cache the dependents and the constraints they apply to this
+ // dependency.
//
- void
- collect_unhold (const shared_ptr<selected_package>& sp)
+ config_repo_fragments repo_frags;
+ dependent_constraints dpt_constrs;
+
+ // Only collect repository fragments (for best version selection) of
+ // (immediate) dependents that have a hit (direct or indirect) in recs.
+ // Note, however, that we collect constraints from all the dependents.
+ //
+ upgrade_deorphan ud {nullopt /* upgrade */, false /* deorphan */};
+
+ for (database& ddb: db.dependent_configs ())
{
- auto i (map_.find (sp->name));
+ for (auto& pd: query_dependents_cache (ddb, sp->name, db))
+ {
+ shared_ptr<selected_package> p (ddb.load<selected_package> (pd.name));
- // Currently, it must always be pre-entered.
- //
- assert (i != map_.end ());
+ dpt_constrs.emplace_back (ddb, p, move (pd.constraint));
- build_package& bp (i->second.package);
+ upgrade_deorphan u (upgrade_dependencies (ddb, pd.name, recs, cache));
- if (!bp.action) // Pre-entered.
- {
- build_package p {
- build_package::adjust,
- sp,
- nullptr,
- nullptr,
- nullopt, // Hold package.
- nullopt, // Hold version.
- {}, // Constraints.
- false, // System package.
- false, // Keep output directory.
- nullopt, // Checkout root.
- false, // Checkout purge.
- strings (), // Configuration variables.
- {}, // Required by.
- build_package::adjust_unhold};
-
- p.merge (move (bp));
- bp = move (p);
+ if (u.upgrade || u.deorphan)
+ {
+ // Upgrade wins over patch.
+ //
+ if (u.upgrade && (!ud.upgrade || *ud.upgrade < *u.upgrade))
+ ud.upgrade = *u.upgrade;
+
+ if (u.deorphan)
+ ud.deorphan = true;
+ }
+ else
+ continue;
+
+ // While we already know that the dependency upgrade is required, we
+ // continue to iterate over dependents, collecting the repository
+ // fragments and the constraints.
+ //
+ add_dependent_repo_fragments (ddb, p, repo_frags);
}
- else
- bp.adjustments |= build_package::adjust_unhold;
}
- void
- collect_build_prerequisites (const common_options& o,
- const dir_path& cd,
- database& db,
- const package_name& name,
- postponed_packages& postponed)
+ if (!ud.upgrade && !ud.deorphan)
{
- auto mi (map_.find (name));
- assert (mi != map_.end ());
- collect_build_prerequisites (o, cd, db, mi->second.package, &postponed);
+ l5 ([&]{trace << *sp << db << ": no hit";});
+ return nullopt;
}
- void
- collect_build_postponed (const common_options& o,
- const dir_path& cd,
- database& db,
- postponed_packages& pkgs)
- {
- // Try collecting postponed packages for as long as we are making
- // progress.
- //
- for (bool prog (true); !pkgs.empty (); )
- {
- postponed_packages npkgs;
-
- for (const build_package* p: pkgs)
- collect_build_prerequisites (o, cd, db, *p, prog ? &npkgs : nullptr);
+ pair<shared_ptr<available_package>,
+ lazy_shared_ptr<repository_fragment>> rp (
+ find_existing (db, sp->name, nullopt /* version_constraint */));
- assert (prog); // collect_build_prerequisites() should have failed.
- prog = (npkgs != pkgs);
- pkgs.swap (npkgs);
- }
- }
+ optional<evaluate_result> r (
+ evaluate_dependency (o,
+ db,
+ sp,
+ nullopt /* desired */,
+ false /* desired_sys */,
+ rp.first != nullptr /* existing */,
+ db,
+ sp,
+ ud.upgrade,
+ ud.deorphan,
+ false /* explicitly */,
+ repo_frags,
+ dpt_constrs,
+ existing_deps,
+ deorphaned_deps,
+ pkgs,
+ ignore_unsatisfiable));
- // Order the previously-collected package with the specified name
- // returning its positions. Recursively order the package dependencies
- // being ordered failing if a dependency cycle is detected. If reorder is
- // true, then reorder this package to be considered as "early" as
- // possible.
+ // Translate the "no change" result into nullopt.
//
- iterator
- order (const package_name& name, bool reorder = true)
+ assert (!r || !r->unused);
+ return r && r->available == nullptr ? nullopt : r;
+ }
+
+ // Stack of the command line adjustments as per unsatisfied_dependents
+ // description.
+ //
+ struct cmdline_adjustment
+ {
+ enum class adjustment_type: uint8_t
{
- package_names chain;
- return order (name, chain, reorder);
- }
+ hold_existing, // Adjust constraint in existing build-to-hold spec.
+ dep_existing, // Adjust constraint in existing dependency spec.
+ hold_new, // Add new build-to-hold spec.
+ dep_new // Add new dependency spec.
+ };
- // If a configured package is being up/down-graded then that means
- // all its dependents could be affected and we have to reconfigure
- // them. This function examines every package that is already on
- // the list and collects and orders all its dependents. We also need
- // to make sure the dependents are ok with the up/downgrade.
- //
- // Should we reconfigure just the direct depends or also include
- // indirect, recursively? Consider this plauisible scenario as an
- // example: We are upgrading a package to a version that provides
- // an additional API. When its direct dependent gets reconfigured,
- // it notices this new API and exposes its own extra functionality
- // that is based on it. Now it would make sense to let its own
- // dependents (which would be our original package's indirect ones)
- // to also notice this.
+ adjustment_type type;
+ reference_wrapper<database> db;
+ package_name name;
+ bpkg::version version; // Replacement.
+
+ // Meaningful only for the *_new types.
//
- void
- collect_order_dependents (database& db)
- {
- // For each package on the list we want to insert all its dependents
- // before it so that they get configured after the package on which
- // they depend is configured (remember, our build order is reverse,
- // with the last package being built first). This applies to both
- // packages that are already on the list as well as the ones that
- // we add, recursively.
- //
- for (auto i (begin ()); i != end (); ++i)
- {
- const build_package& p (*i);
+ optional<bool> upgrade;
+ bool deorphan = false;
- // Prune if this is not a configured package being up/down-graded
- // or reconfigured.
- //
- assert (p.action);
+ // For the newly created or popped from the stack object the following
+ // three members contain the package version replacement information.
+ // Otherwise (pushed to the stack), they contain the original command line
+ // spec information.
+ //
+ shared_ptr<available_package> available; // NULL for dep_* types.
+ lazy_shared_ptr<bpkg::repository_fragment> repository_fragment; // As above.
+ optional<version_constraint> constraint;
- // Dropped package may have no dependents.
- //
- if (*p.action != build_package::drop && p.reconfigure ())
- collect_order_dependents (db, i);
- }
- }
+ // Create object of the hold_existing type.
+ //
+ cmdline_adjustment (database& d,
+ const package_name& n,
+ shared_ptr<available_package>&& a,
+ lazy_shared_ptr<bpkg::repository_fragment>&& f)
+ : type (adjustment_type::hold_existing),
+ db (d),
+ name (n),
+ version (a->version),
+ available (move (a)),
+ repository_fragment (move (f)),
+ constraint (version_constraint (version)) {}
+
+ // Create object of the dep_existing type.
+ //
+ cmdline_adjustment (database& d,
+ const package_name& n,
+ const bpkg::version& v)
+ : type (adjustment_type::dep_existing),
+ db (d),
+ name (n),
+ version (v),
+ constraint (version_constraint (version)) {}
+
+ // Create object of the hold_new type.
+ //
+ cmdline_adjustment (database& d,
+ const package_name& n,
+ shared_ptr<available_package>&& a,
+ lazy_shared_ptr<bpkg::repository_fragment>&& f,
+ optional<bool> u,
+ bool o)
+ : type (adjustment_type::hold_new),
+ db (d),
+ name (n),
+ version (a->version),
+ upgrade (u),
+ deorphan (o),
+ available (move (a)),
+ repository_fragment (move (f)),
+ constraint (version_constraint (version)) {}
+
+ // Create object of the dep_new type.
+ //
+ cmdline_adjustment (database& d,
+ const package_name& n,
+ const bpkg::version& v,
+ optional<bool> u,
+ bool o)
+ : type (adjustment_type::dep_new),
+ db (d),
+ name (n),
+ version (v),
+ upgrade (u),
+ deorphan (o),
+ constraint (version_constraint (version)) {}
+ };
+ class cmdline_adjustments
+ {
+ public:
+ cmdline_adjustments (vector<build_package>& hps, dependency_packages& dps)
+ : hold_pkgs_ (hps),
+ dep_pkgs_ (dps) {}
+
+ // Apply the specified adjustment to the command line, push the adjustment
+ // to the stack, and record the resulting command line state as the SHA256
+ // checksum.
+ //
void
- collect_order_dependents (database& db, iterator pos)
+ push (cmdline_adjustment&& a)
{
- tracer trace ("collect_order_dependents");
-
- assert (pos != end ());
-
- build_package& p (*pos);
- const shared_ptr<selected_package>& sp (p.selected);
+ using type = cmdline_adjustment::adjustment_type;
- const package_name& n (sp->name);
-
- // See if we are up/downgrading this package. In particular, the
- // available package could be NULL meaning we are just adjusting.
+ // We always set the `== <version>` constraint in the resulting spec.
//
- int ud (p.available != nullptr
- ? sp->version.compare (p.available_version ())
- : 0);
+ assert (a.constraint);
- using query = query<package_dependent>;
+ database& db (a.db);
+ const package_name& nm (a.name);
+ package_version_key cmd_line (db.main_database (), "command line");
- for (auto& pd: db.query<package_dependent> (query::name == n))
+ switch (a.type)
{
- package_name& dn (pd.name);
- auto i (map_.find (dn));
+ case type::hold_existing:
+ {
+ auto i (find_hold_pkg (a));
+ assert (i != hold_pkgs_.end ()); // As per adjustment type.
- // First make sure the up/downgraded package still satisfies this
- // dependent.
- //
- bool check (ud != 0 && pd.constraint);
+ build_package& bp (*i);
+ swap (bp.available, a.available);
+ swap (bp.repository_fragment, a.repository_fragment);
- // There is one tricky aspect: the dependent could be in the process
- // of being up/downgraded as well. In this case all we need to do is
- // detect this situation and skip the test since all the (new)
- // contraints of this package have been satisfied in collect_build().
- //
- if (check && i != map_.end () && i->second.position != end ())
- {
- build_package& dp (i->second.package);
+ if (!bp.constraints.empty ())
+ {
+ swap (bp.constraints[0].value, *a.constraint);
+ }
+ else
+ {
+ bp.constraints.emplace_back (move (*a.constraint),
+ cmd_line.db,
+ cmd_line.name.string ());
+ a.constraint = nullopt;
+ }
- check = dp.available == nullptr ||
- (dp.selected->system () == dp.system &&
- dp.selected->version == dp.available_version ());
+ break;
}
-
- if (check)
+ case type::dep_existing:
+ {
+ auto i (find_dep_pkg (a));
+ assert (i != dep_pkgs_.end ()); // As per adjustment type.
+ swap (i->constraint, a.constraint);
+ break;
+ }
+ case type::hold_new:
{
- const version& av (p.available_version ());
- const version_constraint& c (*pd.constraint);
+ // As per adjustment type.
+ //
+ assert (find_hold_pkg (a) == hold_pkgs_.end ());
- if (!satisfies (av, c))
- {
- diag_record dr (fail);
+ // Start the database transaction to perform the
+ // database::find<selected_package> call, unless we are already in
+ // the transaction.
+ //
+ transaction t (db, !transaction::has_current ());
- dr << "unable to " << (ud < 0 ? "up" : "down") << "grade "
- << "package " << *sp << " to ";
+ build_package bp {
+ build_package::build,
+ db,
+ db.find<selected_package> (nm),
+ move (a.available),
+ move (a.repository_fragment),
+ nullopt, // Dependencies.
+ nullopt, // Dependencies alternatives.
+ nullopt, // Package skeleton.
+ nullopt, // Postponed dependency alternatives.
+ false, // Recursive collection.
+ true, // Hold package.
+ false, // Hold version.
+ {}, // Constraints.
+ false, // System.
+ false, // Keep output directory.
+ false, // Disfigure (from-scratch reconf).
+ false, // Configure-only.
+ nullopt, // Checkout root.
+ false, // Checkout purge.
+ strings (), // Configuration variables.
+ a.upgrade,
+ a.deorphan,
+ {cmd_line}, // Required by (command line).
+ false, // Required by dependents.
+ (a.deorphan
+ ? build_package::build_replace
+ : uint16_t (0))};
- // Print both (old and new) package names in full if the system
- // attribution changes.
- //
- if (p.system != sp->system ())
- dr << p.available_name_version ();
- else
- dr << av; // Can't be the wildcard otherwise would satisfy.
+ t.commit ();
- dr << info << "because package " << dn << " depends on (" << n
- << " " << c << ")";
+ bp.constraints.emplace_back (move (*a.constraint),
+ cmd_line.db,
+ cmd_line.name.string ());
- string rb;
- if (!p.user_selection ())
- {
- for (const package_name& n: p.required_by)
- rb += ' ' + n.string ();
- }
+ a.constraint = nullopt;
- if (!rb.empty ())
- dr << info << "package " << p.available_name_version ()
- << " required by" << rb;
+ hold_pkgs_.push_back (move (bp));
+ break;
+ }
+ case type::dep_new:
+ {
+ // As per adjustment type.
+ //
+ assert (find_dep_pkg (a) == dep_pkgs_.end ());
- dr << info << "explicitly request up/downgrade of package " << dn;
+ // Start the database transaction to perform the
+ // database::find<selected_package> call, unless we are already in
+ // the transaction.
+ //
+ transaction t (db, !transaction::has_current ());
+
+ dep_pkgs_.push_back (
+ dependency_package {&db,
+ nm,
+ move (*a.constraint),
+ false /* hold_version */,
+ db.find<selected_package> (nm),
+ false /* system */,
+ false /* existing */,
+ a.upgrade,
+ a.deorphan,
+ false /* keep_out */,
+ false /* disfigure */,
+ nullopt /* checkout_root */,
+ false /* checkout_purge */,
+ strings () /* config_vars */,
+ nullptr /* system_status */});
- dr << info << "or explicitly specify package " << n << " version "
- << "to manually satisfy these constraints";
- }
+ t.commit ();
- // Add this contraint to the list for completeness.
- //
- p.constraints.emplace_back (dn.string (), c);
+ a.constraint = nullopt;
+ break;
}
+ }
- auto adjustment = [&dn, &n, &db] () -> build_package
- {
- shared_ptr<selected_package> dsp (db.load<selected_package> (dn));
- bool system (dsp->system ()); // Save flag before the move(dsp) call.
+ packages_.insert (package_version_key (db, nm, a.version));
+ adjustments_.push_back (move (a));
+ former_states_.insert (state ());
+ }
- return build_package {
- build_package::adjust,
- move (dsp),
- nullptr, // No available package/repository fragment.
- nullptr,
- nullopt, // Hold package.
- nullopt, // Hold version.
- {}, // Constraints.
- system,
- false, // Keep output directory.
- nullopt, // Checkout root.
- false, // Checkout purge.
- strings (), // Configuration variables.
- {n}, // Required by (dependency).
- build_package::adjust_reconfigure};
- };
+ // Roll back the latest (default) or first command line adjustment, pop it
+ // from the stack, and return the popped adjustment. Assume that the stack
+ // is not empty.
+ //
+ // Note that the returned object can be pushed to the stack again.
+ //
+ cmdline_adjustment
+ pop (bool first = false)
+ {
+ using type = cmdline_adjustment::adjustment_type;
- // We can have three cases here: the package is already on the
- // list, the package is in the map (but not on the list) and it
- // is in neither.
- //
- // If the existing entry is a drop, then we skip it. If it is
- // pre-entered, is an adjustment, or is a build that is not supposed
- // to be built (not in the list), then we merge it into the new
- // adjustment entry. Otherwise (is a build in the list), we just add
- // the reconfigure adjustment flag to it.
- //
- if (i != map_.end ())
+ assert (!empty ());
+
+ // Pop the adjustment.
+ //
+ cmdline_adjustment a (move (!first
+ ? adjustments_.back ()
+ : adjustments_.front ()));
+ if (!first)
+ adjustments_.pop_back ();
+ else
+ adjustments_.erase (adjustments_.begin ());
+
+ packages_.erase (package_version_key (a.db, a.name, a.version));
+
+ // Roll back the adjustment.
+ //
+ switch (a.type)
+ {
+ case type::hold_existing:
{
- build_package& dp (i->second.package);
- iterator& dpos (i->second.position);
+ auto i (find_hold_pkg (a));
+ assert (i != hold_pkgs_.end ());
- if (!dp.action || // Pre-entered.
- *dp.action != build_package::build || // Non-build.
- dpos == end ()) // Build not in the list.
- {
- // Skip the droped package.
- //
- if (dp.action && *dp.action == build_package::drop)
- continue;
+ build_package& bp (*i);
+ swap (bp.available, a.available);
+ swap (bp.repository_fragment, a.repository_fragment);
- build_package bp (adjustment ());
- bp.merge (move (dp));
- dp = move (bp);
- }
- else // Build in the list.
- dp.adjustments |= build_package::adjust_reconfigure;
-
- // It may happen that the dependent is already in the list but is
- // not properly ordered against its dependencies that get into the
- // list via another dependency path. Thus, we check if the dependent
- // is to the right of its dependency and, if that's the case,
- // reinsert it in front of the dependency.
+ // Must contain the replacement version.
//
- if (dpos != end ())
+ assert (!bp.constraints.empty ());
+
+ version_constraint& c (bp.constraints[0].value);
+
+ if (a.constraint) // Original spec contains a version constraint?
{
- for (auto i (pos); i != end (); ++i)
- {
- if (i == dpos)
- {
- erase (dpos);
- dpos = insert (pos, dp);
- break;
- }
- }
+ swap (c, *a.constraint);
}
else
- dpos = insert (pos, dp);
+ {
+ a.constraint = move (c);
+ bp.constraints.clear ();
+ }
+
+ break;
}
- else
+ case type::dep_existing:
+ {
+ auto i (find_dep_pkg (a));
+ assert (i != dep_pkgs_.end ());
+ swap (i->constraint, a.constraint);
+ break;
+ }
+ case type::hold_new:
{
- i = map_.emplace (
- move (dn), data_type {end (), adjustment ()}).first;
+ auto i (find_hold_pkg (a));
+ assert (i != hold_pkgs_.end ());
- i->second.position = insert (pos, i->second.package);
+ build_package& bp (*i);
+ a.available = move (bp.available);
+ a.repository_fragment = move (bp.repository_fragment);
+
+ // Must contain the replacement version.
+ //
+ assert (!bp.constraints.empty ());
+
+ a.constraint = move (bp.constraints[0].value);
+
+ hold_pkgs_.erase (i);
+ break;
}
+ case type::dep_new:
+ {
+ auto i (find_dep_pkg (a));
+ assert (i != dep_pkgs_.end ());
- // Recursively collect our own dependents inserting them before us.
- //
- // Note that we cannot end up with an infinite recursion for
- // configured packages due to a dependency cycle (see order() for
- // details).
- //
- collect_order_dependents (db, i->second.position);
+ a.constraint = move (i->constraint);
+
+ dep_pkgs_.erase (i);
+ break;
+ }
}
- }
- void
- clear ()
- {
- build_package_list::clear ();
- map_.clear ();
+ return a;
}
- void
- clear_order ()
+ // Return the specified adjustment's string representation in the
+ // following form:
+ //
+ // hold_existing: '<pkg>[ <constraint>][ <database>]' -> '<pkg> <constraint>'
+ // dep_existing: '?<pkg>[ <constraint>][ <database>]' -> '?<pkg> <constraint>'
+ // hold_new: '<pkg> <constraint>[ <database>]'
+ // dep_new: '?<pkg> <constraint>[ <database>]'
+ //
+ // Note: the adjustment is assumed to be newly created or be popped from
+ // the stack.
+ //
+ string
+ to_string (const cmdline_adjustment& a) const
{
- build_package_list::clear ();
+ using type = cmdline_adjustment::adjustment_type;
- for (auto& p: map_)
- p.second.position = end ();
- }
+ assert (a.constraint); // Since not pushed.
- private:
- using package_names = small_vector<reference_wrapper<const package_name>,
- 16>;
+ const string& s (a.db.get ().string);
- iterator
- order (const package_name& name, package_names& chain, bool reorder)
- {
- // Every package that we order should have already been collected.
- //
- auto mi (map_.find (name));
- assert (mi != map_.end ());
+ switch (a.type)
+ {
+ case type::hold_existing:
+ {
+ string r ("'" + a.name.string ());
- build_package& p (mi->second.package);
+ auto i (find_hold_pkg (a));
+ assert (i != hold_pkgs_.end ());
- assert (p.action); // Can't order just a pre-entered package.
+ const build_package& bp (*i);
+ if (!bp.constraints.empty ())
+ r += ' ' + bp.constraints[0].value.string ();
- // Make sure there is no dependency cycle.
- //
- {
- auto i (find (chain.begin (), chain.end (), name));
+ if (!s.empty ())
+ r += ' ' + s;
+
+ r += "' -> '" + a.name.string () + ' ' + a.constraint->string () +
+ "'";
- if (i != chain.end ())
+ return r;
+ }
+ case type::dep_existing:
{
- diag_record dr (fail);
- dr << "dependency cycle detected involving package " << name;
+ string r ("'?" + a.name.string ());
- auto nv = [this] (const package_name& name)
- {
- auto mi (map_.find (name));
- assert (mi != map_.end ());
+ auto i (find_dep_pkg (a));
+ assert (i != dep_pkgs_.end ());
- build_package& p (mi->second.package);
+ if (i->constraint)
+ r += ' ' + i->constraint->string ();
- assert (p.action); // See above.
+ if (!s.empty ())
+ r += ' ' + s;
- // We cannot end up with a dependency cycle for actions other than
- // build since these packages are configured and we would fail on
- // a previous run while building them.
- //
- assert (p.available != nullptr);
+ r += "' -> '?" + a.name.string () + ' ' + a.constraint->string () +
+ "'";
- return p.available_name_version ();
- };
+ return r;
+ }
+ case type::hold_new:
+ {
+ assert (find_hold_pkg (a) == hold_pkgs_.end ());
- for (chain.push_back (name); i != chain.end () - 1; ++i)
- dr << info << nv (*i) << " depends on " << nv (*(i + 1));
+ string r ("'" + a.name.string () + ' ' + a.constraint->string ());
+
+ if (!s.empty ())
+ r += ' ' + s;
+
+ r += "'";
+ return r;
}
- }
+ case type::dep_new:
+ {
+ assert (find_dep_pkg (a) == dep_pkgs_.end ());
- // If this package is already in the list, then that would also
- // mean all its prerequisites are in the list and we can just
- // return its position. Unless we want it reordered.
- //
- iterator& pos (mi->second.position);
- if (pos != end ())
- {
- if (reorder)
- erase (pos);
- else
- return pos;
+ string r ("'?" + a.name.string () + ' ' + a.constraint->string ());
+
+ if (!s.empty ())
+ r += ' ' + s;
+
+ r += "'";
+ return r;
+ }
}
- // Order all the prerequisites of this package and compute the
- // position of its "earliest" prerequisite -- this is where it
- // will be inserted.
- //
- const shared_ptr<selected_package>& sp (p.selected);
- const shared_ptr<available_package>& ap (p.available);
+ assert (false); // Can't be here.
+ return "";
+ }
- bool build (*p.action == build_package::build);
+ // Return true, if there are no adjustments in the stack.
+ //
+ bool
+ empty () const
+ {
+ return adjustments_.empty ();
+ }
- // Package build must always have the available package associated.
- //
- assert (!build || ap != nullptr);
+ // Return true, if push() has been called at least once.
+ //
+ bool
+ tried () const
+ {
+ return !former_states_.empty ();
+ }
- // Unless this package needs something to be before it, add it to
- // the end of the list.
- //
- iterator i (end ());
+ // Return the number of adjustments in the stack.
+ //
+ size_t
+ size () const
+ {
+ return adjustments_.size ();
+ }
- // Figure out if j is before i, in which case set i to j. The goal
- // here is to find the position of our "earliest" prerequisite.
- //
- auto update = [this, &i] (iterator j)
- {
- for (iterator k (j); i != j && k != end ();)
- if (++k == i)
- i = j;
- };
+ // Return true if replacing a package build with the specified version
+ // will result in a command line which has already been (unsuccessfully)
+ // tried as a starting point for the package builds re-collection.
+ //
+ bool
+ tried_earlier (database& db, const package_name& n, const version& v) const
+ {
+ if (former_states_.empty ())
+ return false;
- // Similar to collect_build(), we can prune if the package is already
- // configured, right? While in collect_build() we didn't need to add
- // prerequisites of such a package, it doesn't mean that they actually
- // never ended up in the map via another dependency path. For example,
- // some can be a part of the initial selection. And in that case we must
- // order things properly.
+ // Similar to the state() function, calculate the checksum over the
+ // packages set, but also consider the specified package version as if
+ // it were present in the set.
//
- // Also, if the package we are ordering is not a system one and needs to
- // be disfigured during the plan execution, then we must order its
- // (current) dependencies that also need to be disfigured.
+ // Note that the specified package version may not be in the set, since
+ // we shouldn't be trying to replace with the package version which is
+ // already in the command line.
//
- bool src_conf (sp != nullptr &&
- sp->state == package_state::configured &&
- sp->substate != package_substate::system);
+ sha256 cs;
- auto disfigure = [] (const build_package& p)
+ auto lt = [&db, &n, &v] (const package_version_key& pvk)
{
- return p.action && (*p.action == build_package::drop ||
- p.reconfigure ());
- };
+ if (int r = n.compare (pvk.name))
+ return r < 0;
- bool order_disfigured (src_conf && disfigure (p));
+ if (int r = v.compare (*pvk.version))
+ return r < 0;
- chain.push_back (name);
+ return db < pvk.db;
+ };
- // Order the build dependencies.
- //
- if (build && !p.system)
+ bool appended (false);
+ for (const package_version_key& p: packages_)
{
- // So here we are going to do things differently depending on
- // whether the package is already configured or not. If it is and
- // not as a system package, then that means we can use its
- // prerequisites list. Otherwise, we use the manifest data.
- //
- if (src_conf && sp->version == p.available_version ())
- {
- for (const auto& p: sp->prerequisites)
- {
- const package_name& name (p.first.object_id ());
+ assert (p.version); // Only the real packages can be here.
- // The prerequisites may not necessarily be in the map.
- //
- auto i (map_.find (name));
- if (i != map_.end () && i->second.package.action)
- update (order (name, chain, false /* reorder */));
- }
+ if (!appended && lt (p))
+ {
+ cs.append (db.config.string ());
+ cs.append (n.string ());
+ cs.append (v.string ());
- // We just ordered them among other prerequisites.
- //
- order_disfigured = false;
+ appended = true;
}
- else
- {
- // We are iterating in reverse so that when we iterate over
- // the dependency list (also in reverse), prerequisites will
- // be built in the order that is as close to the manifest as
- // possible.
- //
- for (const dependency_alternatives_ex& da:
- reverse_iterate (ap->dependencies))
- {
- assert (!da.conditional && da.size () == 1); // @@ TODO
- const dependency& d (da.front ());
- const package_name& dn (d.name);
- // Skip special names.
- //
- if (da.buildtime && (dn == "build2" || dn == "bpkg"))
- continue;
+ cs.append (p.db.get ().config.string ());
+ cs.append (p.name.string ());
+ cs.append (p.version->string ());
+ }
- update (order (d.name, chain, false /* reorder */));
- }
- }
+ if (!appended)
+ {
+ cs.append (db.config.string ());
+ cs.append (n.string ());
+ cs.append (v.string ());
}
- // Order the dependencies being disfigured.
+ return former_states_.find (cs.string ()) != former_states_.end ();
+ }
+
+ private:
+ // Return the SHA256 checksum of the current command line state.
+ //
+ string
+ state () const
+ {
+ // NOTE: remember to update tried_earlier() if changing anything here.
//
- if (order_disfigured)
+ sha256 cs;
+ for (const package_version_key& p: packages_)
{
- for (const auto& p: sp->prerequisites)
- {
- const package_name& name (p.first.object_id ());
+ assert (p.version); // Only the real packages can be here.
- // The prerequisites may not necessarily be in the map.
- //
- auto i (map_.find (name));
-
- if (i != map_.end () && disfigure (i->second.package))
- update (order (name, chain, false /* reorder */));
- }
+ cs.append (p.db.get ().config.string ());
+ cs.append (p.name.string ());
+ cs.append (p.version->string ());
}
- chain.pop_back ();
+ return cs.string ();
+ }
- return pos = insert (i, p);
+ // Find the command line package spec an adjustment applies to.
+ //
+ vector<build_package>::iterator
+ find_hold_pkg (const cmdline_adjustment& a) const
+ {
+ return find_if (hold_pkgs_.begin (), hold_pkgs_.end (),
+ [&a] (const build_package& p)
+ {
+ return p.name () == a.name && p.db == a.db;
+ });
}
- private:
- struct data_type
+ dependency_packages::iterator
+ find_dep_pkg (const cmdline_adjustment& a) const
{
- iterator position; // Note: can be end(), see collect_build().
- build_package package;
- };
+ return find_if (dep_pkgs_.begin (), dep_pkgs_.end (),
+ [&a] (const dependency_package& p)
+ {
+ return p.name == a.name &&
+ p.db != nullptr &&
+ *p.db == a.db;
+ });
+ }
- map<package_name, data_type> map_;
+ private:
+ vector<build_package>& hold_pkgs_;
+ dependency_packages& dep_pkgs_;
+
+ vector<cmdline_adjustment> adjustments_; // Adjustments stack.
+ set<package_version_key> packages_; // Replacements.
+ set<string> former_states_; // Command line seen states.
};
- // Return a patch version constraint for the selected package if it has a
- // standard version, otherwise, if requested, issue a warning and return
- // nullopt.
+ // Try to replace a collected package with a different available version,
+ // satisfactory for all its new and/or existing dependents. Return the
+ // command line adjustment if such a replacement is deduced and nullopt
+ // otherwise. In the latter case, also return the list of the being built
+ // dependents which are unsatisfied by some of the dependency available
+ // versions (unsatisfied_dpts argument).
//
- // Note that the function may also issue a warning and return nullopt if the
- // selected package minor version reached the limit (see
- // standard-version.cxx for details).
+ // Specifically, try to find the best available package version considering
+ // all the imposed constraints as per unsatisfied_dependents description. If
+ // succeed, return the command line adjustment reflecting the replacement.
//
- static optional<version_constraint>
- patch_constraint (const shared_ptr<selected_package>& sp, bool quiet = false)
+ // Notes:
+ //
+ // - Doesn't perform the actual adjustment of the command line.
+ //
+ // - Expected to be called after the execution plan is fully refined. That,
+ // in particular, means that all the existing dependents are also
+ // collected and thus the constraints they impose are already in their
+ // dependencies' constraints lists.
+ //
+ // - The specified package version may or may not be satisfactory for its
+ // new and existing dependents.
+ //
+ // - The replacement is denied in the following cases:
+ //
+ // - If it turns out that the package have been specified on the command
+ // line (by the user or by us on some previous iteration) with an exact
+ // version constraint, then we cannot try any other version.
+ //
+ // - If the dependency is system, then it is either specified with the
+ // wildcard version or its exact version have been specified by the user
+ // or have been deduced by the system package manager. In the former
+ // case we actually won't be calling this function for this package
+ // since the wildcard version satisfies any constraint. Thus, an exact
+ // version has been specified/deduced for this dependency and so we
+ // cannot try any other version.
+ //
+ // - If the dependency is being built as an existing archive/directory,
+ // then its version is determined and so we cannot try any other
+ // version.
+ //
+ // - If the package is already configured with the version held and the
+ // user didn't specify this package on the command line and it is not
+ // requested to be upgraded, patched, and/or deorphaned, then we
+ // shouldn't be silently up/down-grading it.
+ //
+ optional<cmdline_adjustment>
+ try_replace_dependency (const common_options& o,
+ const build_package& p,
+ const build_packages& pkgs,
+ const vector<build_package>& hold_pkgs,
+ const dependency_packages& dep_pkgs,
+ const cmdline_adjustments& cmdline_adjs,
+ vector<package_key>& unsatisfied_dpts,
+ const char* what)
{
- const package_name& nm (sp->name);
- const version& sv (sp->version);
+ tracer trace ("try_replace_dependency");
- // Note that we don't pass allow_stub flag so the system wildcard version
- // will (naturally) not be patched.
- //
- string vs (sv.string ());
- optional<standard_version> v (parse_standard_version (vs));
+ assert (p.available != nullptr); // By definition.
- if (!v)
+ // Bail out for the system package build.
+ //
+ if (p.system)
{
- if (!quiet)
- warn << "unable to patch " << package_string (nm, sv) <<
- info << "package is not using semantic/standard version";
+ l5 ([&]{trace << "replacement of " << what << " version "
+ << p.available_name_version_db () << " is denied "
+ << "since it is being configured as system";});
return nullopt;
}
- try
- {
- return version_constraint ("~" + vs);
- }
- // Note that the only possible reason for invalid_argument exception to
- // be thrown is that minor version reached the 99999 limit (see
- // standard-version.cxx for details).
+ // Bail out for an existing package archive/directory.
//
- catch (const invalid_argument&)
+ database& db (p.db);
+ const package_name& nm (p.name ());
+ const version& ver (p.available->version);
+
+ if (find_existing (db,
+ nm,
+ nullopt /* version_constraint */).first != nullptr)
{
- if (!quiet)
- warn << "unable to patch " << package_string (nm, sv) <<
- info << "minor version limit reached";
+ l5 ([&]{trace << "replacement of " << what << " version "
+ << p.available_name_version_db () << " is denied since "
+ << "it is being built as existing archive/directory";});
return nullopt;
}
- }
-
- // List of dependency packages (specified with ? on the command line).
- //
- struct dependency_package
- {
- package_name name;
- optional<version_constraint> constraint; // nullopt if unspecified.
- shared_ptr<selected_package> selected; // NULL if not present.
- bool system;
- bool patch; // Only for an empty version.
- bool keep_out;
- optional<dir_path> checkout_root;
- bool checkout_purge;
- strings config_vars; // Only if not system.
- };
- using dependency_packages = vector<dependency_package>;
- // Evaluate a dependency package and return a new desired version. If the
- // result is absent (nullopt), then there are no user expectations regarding
- // this dependency. If the result is a NULL available_package, then it is
- // either no longer used and can be dropped, or no changes to the dependency
- // are necessary. Otherwise, the result is available_package to
- // upgrade/downgrade to as well as the repository fragment it must come
- // from, and the system flag.
- //
- // If the package version that satisfies explicitly specified dependency
- // version constraint can not be found in the dependents repositories, then
- // return the "no changes are necessary" result if ignore_unsatisfiable
- // argument is true and fail otherwise. The common approach is to pass true
- // for this argument until the execution plan is finalized, assuming that
- // the problematic dependency might be dropped.
- //
- struct evaluate_result
- {
- shared_ptr<available_package> available;
- shared_ptr<bpkg::repository_fragment> repository_fragment;
- bool unused;
- bool system; // Is meaningless if unused.
- };
+ // Find the package command line entry and stash the reference to its
+ // version constraint, if any. Bail out if the constraint is specified as
+ // an exact package version.
+ //
+ const build_package* hold_pkg (nullptr);
+ const dependency_package* dep_pkg (nullptr);
+ const version_constraint* constraint (nullptr);
- using package_dependents = vector<pair<shared_ptr<selected_package>,
- optional<version_constraint>>>;
+ for (const build_package& hp: hold_pkgs)
+ {
+ if (hp.name () == nm && hp.db == db)
+ {
+ hold_pkg = &hp;
- static optional<evaluate_result>
- evaluate_dependency (database&,
- const shared_ptr<selected_package>&,
- const optional<version_constraint>& desired,
- bool desired_sys,
- bool patch,
- bool explicitly,
- const set<shared_ptr<repository_fragment>>&,
- const package_dependents&,
- bool ignore_unsatisfiable);
+ if (!hp.constraints.empty ())
+ {
+ // Can only contain the user-specified constraint.
+ //
+ assert (hp.constraints.size () == 1);
- static optional<evaluate_result>
- evaluate_dependency (database& db,
- const dependency_packages& deps,
- const shared_ptr<selected_package>& sp,
- bool ignore_unsatisfiable)
- {
- tracer trace ("evaluate_dependency");
+ const version_constraint& c (hp.constraints[0].value);
- assert (sp != nullptr && !sp->hold_package);
+ if (c.min_version == c.max_version)
+ {
+ l5 ([&]{trace << "replacement of " << what << " version "
+ << p.available_name_version_db () << " is denied "
+ << "since it is specified on command line as '"
+ << nm << ' ' << c << "'";});
- const package_name& nm (sp->name);
+ return nullopt;
+ }
+ else
+ constraint = &c;
+ }
- // Query the dependents and bail out if the dependency is unused.
- //
- auto pds (db.query<package_dependent> (
- query<package_dependent>::name == nm));
+ break;
+ }
+ }
- if (pds.empty ())
+ if (hold_pkg == nullptr)
{
- l5 ([&]{trace << *sp << ": unused";});
+ for (const dependency_package& dp: dep_pkgs)
+ {
+ if (dp.name == nm && dp.db != nullptr && *dp.db == db)
+ {
+ dep_pkg = &dp;
- return evaluate_result {nullptr /* available */,
- nullptr /* repository_fragment */,
- true /* unused */,
- false /* system */};
+ if (dp.constraint)
+ {
+ const version_constraint& c (*dp.constraint);
+
+ if (c.min_version == c.max_version)
+ {
+ l5 ([&]{trace << "replacement of " << what << " version "
+ << p.available_name_version_db () << " is denied "
+ << "since it is specified on command line as '?"
+ << nm << ' ' << c << "'";});
+
+ return nullopt;
+ }
+ else
+ constraint = &c;
+ }
+
+ break;
+ }
+ }
}
- // If there are no user expectations regarding this dependency, then we
- // give no up/down-grade recommendation.
+ // Bail out if the selected package version is held and the package is not
+ // specified on the command line nor is being upgraded/deorphaned via its
+ // dependents recursively.
//
- auto i (find_if (
- deps.begin (), deps.end (),
- [&nm] (const dependency_package& i) {return i.name == nm;}));
+ const shared_ptr<selected_package>& sp (p.selected);
+
+ if (sp != nullptr && sp->hold_version &&
+ hold_pkg == nullptr && dep_pkg == nullptr &&
+ !p.upgrade && !p.deorphan)
+ {
+ l5 ([&]{trace << "replacement of " << what << " version "
+ << p.available_name_version_db () << " is denied since "
+ << "it is already built to hold version and it is not "
+ << "specified on command line nor is being upgraded or "
+ << "deorphaned";});
- if (i == deps.end ())
return nullopt;
+ }
- // If the selected package matches the user expectations then no package
- // change is required.
- //
- const version& sv (sp->version);
- bool ssys (sp->system ());
+ transaction t (db);
- // The requested dependency version constraint and system flag.
+ // Collect the repository fragments to search the available packages in.
//
- const optional<version_constraint>& dvc (i->constraint); // May be nullopt.
- bool dsys (i->system);
+ config_repo_fragments rfs;
- if (ssys == dsys &&
- dvc &&
- (ssys ? sv == *dvc->min_version : satisfies (sv, dvc)))
+ // Add a repository fragment to the specified list, suppressing duplicates.
+ //
+ auto add = [] (shared_ptr<repository_fragment>&& rf,
+ vector<shared_ptr<repository_fragment>>& rfs)
{
- l5 ([&]{trace << *sp << ": unchanged";});
+ if (find (rfs.begin (), rfs.end (), rf) == rfs.end ())
+ rfs.push_back (move (rf));
+ };
- return evaluate_result {nullptr /* available */,
- nullptr /* repository_fragment */,
- false /* unused */,
- false /* system */};
+ // If the package is specified as build-to-hold on the command line, then
+ // collect the root repository fragment from its database. Otherwise,
+ // collect the repository fragments its dependent packages come from.
+ //
+ if (hold_pkg != nullptr)
+ {
+ add (db.find<repository_fragment> (empty_string), rfs[db]);
}
+ else
+ {
+ // Collect the repository fragments the new dependents come from.
+ //
+ if (p.required_by_dependents)
+ {
+ for (const package_version_key& dvk: p.required_by)
+ {
+ if (dvk.version) // Real package?
+ {
+ const build_package* d (pkgs.entered_build (dvk.db, dvk.name));
- // Build a set of repository fragments the dependent packages now come
- // from. Also cache the dependents and the constraints they apply to this
- // dependency.
- //
- set<shared_ptr<repository_fragment>> repo_frags;
- package_dependents dependents;
+ // Must be collected as a package build (see
+ // build_package::required_by for details).
+ //
+ assert (d != nullptr &&
+ d->action &&
+ *d->action == build_package::build &&
+ d->available != nullptr);
- for (auto& pd: pds)
- {
- shared_ptr<selected_package> dsp (db.load<selected_package> (pd.name));
+ for (const package_location& pl: d->available->locations)
+ {
+ const lazy_shared_ptr<repository_fragment>& lrf (
+ pl.repository_fragment);
- shared_ptr<available_package> dap (
- db.find<available_package> (
- available_package_id (dsp->name, dsp->version)));
+ // Note that here we also handle dependents fetched/unpacked
+ // using the existing archive/directory adding the root
+ // repository fragments from their configurations.
+ //
+ if (!rep_masked_fragment (lrf))
+ add (lrf.load (), rfs[lrf.database ()]);
+ }
+ }
+ }
+ }
- if (dap != nullptr)
+ // Collect the repository fragments the existing dependents come from.
+ //
+ // Note that all the existing dependents are already in the map (since
+ // collect_dependents() has already been called) and are either
+ // reconfigure adjustments or non-collected recursively builds.
+ //
+ if (sp != nullptr)
{
- assert (!dap->locations.empty ());
+ for (database& ddb: db.dependent_configs ())
+ {
+ for (const auto& pd: query_dependents (ddb, nm, db))
+ {
+ const build_package* d (pkgs.entered_build (ddb, pd.name));
- for (const auto& pl: dap->locations)
- repo_frags.insert (pl.repository_fragment.load ());
- }
+ // See collect_dependents() for details.
+ //
+ assert (d != nullptr && d->action);
+
+ if ((*d->action == build_package::adjust &&
+ (d->flags & build_package::adjust_reconfigure) != 0) ||
+ (*d->action == build_package::build && !d->dependencies))
+ {
+ shared_ptr<selected_package> p (
+ ddb.load<selected_package> (pd.name));
- dependents.emplace_back (move (dsp), move (pd.constraint));
+ add_dependent_repo_fragments (ddb, p, rfs);
+ }
+ }
+ }
+ }
}
- return evaluate_dependency (db,
- sp,
- dvc,
- dsys,
- i->patch,
- true /* explicitly */,
- repo_frags,
- dependents,
- ignore_unsatisfiable);
- }
+ // Query the dependency available packages from all the collected
+ // repository fragments and select the most appropriate one. Note that
+ // this code is inspired by the evaluate_dependency() function
+ // implementation, which documents the below logic in great detail.
+ //
+ optional<version_constraint> c (constraint != nullptr
+ ? *constraint
+ : optional<version_constraint> ());
- static optional<evaluate_result>
- evaluate_dependency (database& db,
- const shared_ptr<selected_package>& sp,
- const optional<version_constraint>& dvc,
- bool dsys,
- bool patch,
- bool explicitly,
- const set<shared_ptr<repository_fragment>>& rfs,
- const package_dependents& dependents,
- bool ignore_unsatisfiable)
- {
- tracer trace ("evaluate_dependency");
+ if (!c && p.upgrade && !*p.upgrade)
+ {
+ assert (sp != nullptr); // See build_package::upgrade.
- const package_name& nm (sp->name);
- const version& sv (sp->version);
+ c = patch_constraint (sp);
- auto no_change = [] ()
- {
- return evaluate_result {nullptr /* available */,
- nullptr /* repository_fragment */,
- false /* unused */,
- false /* system */};
- };
+ assert (c); // See build_package::upgrade.
+ }
- // Build the list of available packages for the potential up/down-grade
- // to, in the version-descending order. If patching, then we constrain the
- // choice with the latest patch version and place no constraints if
- // upgrading. For a system package we also put no constraints just to make
- // sure that the package is recognized.
+ available_packages afs (find_available (nm, c, rfs));
+
+ using available = pair<shared_ptr<available_package>,
+ lazy_shared_ptr<repository_fragment>>;
+
+ available ra;
+
+ // Version to deorphan.
//
- optional<version_constraint> c;
+ const version* dov (p.deorphan ? &sp->version : nullptr);
- if (!dvc)
- {
- assert (!dsys); // The version can't be empty for the system package.
+ optional<version_constraint> dopc; // Patch constraint for the above.
+ optional<version_constraint> domc; // Minor constraint for the above.
- if (patch)
- {
- c = patch_constraint (sp, ignore_unsatisfiable);
+ bool orphan_best_match (p.deorphan && constraint == nullptr && !p.upgrade);
- if (!c)
- {
- l5 ([&]{trace << *sp << ": non-patchable";});
- return no_change ();
- }
- }
+ if (orphan_best_match)
+ {
+ // Note that non-zero iteration makes a version non-standard, so we
+ // reset it to 0 to produce the patch/minor constraints.
+ //
+ version v (dov->epoch,
+ dov->upstream,
+ dov->release,
+ dov->revision,
+ 0 /* iteration */);
+
+ dopc = patch_constraint (nm, v, true /* quiet */);
+ domc = minor_constraint (nm, v, true /* quiet */);
}
- else if (!dsys)
- c = dvc;
- vector<pair<shared_ptr<available_package>,
- shared_ptr<repository_fragment>>> afs (
- find_available (db,
- nm,
- c,
- vector<shared_ptr<repository_fragment>> (rfs.begin (),
- rfs.end ())));
+ available deorphan_latest_iteration;
+ available deorphan_later_revision;
+ available deorphan_later_patch;
+ available deorphan_later_minor;
+ available deorphan_latest_available;
- // Go through up/down-grade candidates and pick the first one that
- // satisfies all the dependents. Collect (and sort) unsatisfied dependents
- // per the unsatisfiable version in case we need to print them.
+ // Return true if a version satisfies all the dependency constraints.
+ // Otherwise, save all the being built unsatisfied dependents into the
+ // resulting list, suppressing duplicates.
//
- struct compare_sp
+ auto satisfactory = [&p, &unsatisfied_dpts] (const version& v)
{
- bool
- operator() (const shared_ptr<selected_package>& x,
- const shared_ptr<selected_package>& y) const
+ bool r (true);
+
+ for (const auto& c: p.constraints)
{
- return x->name < y->name;
+ if (!satisfies (v, c.value))
+ {
+ r = false;
+
+ if (c.dependent.version && !c.selected_dependent)
+ {
+ package_key pk (c.dependent.db, c.dependent.name);
+
+ if (find (unsatisfied_dpts.begin (),
+ unsatisfied_dpts.end (),
+ pk) == unsatisfied_dpts.end ())
+ unsatisfied_dpts.push_back (move (pk));
+ }
+ }
}
+
+ return r;
};
- using sp_set = set<reference_wrapper<const shared_ptr<selected_package>>,
- compare_sp>;
+ for (available& af: afs)
+ {
+ shared_ptr<available_package>& ap (af.first);
- vector<pair<version, sp_set>> unsatisfiable;
+ if (ap->stub ())
+ continue;
- bool stub (false);
- bool ssys (sp->system ());
+ const version& av (ap->version);
- assert (!dsys || system_repository.find (nm) != nullptr);
+ // Skip if the available package version doesn't satisfy all the
+ // constraints (note: must be checked first since has a byproduct).
+ //
+ if (!satisfactory (av))
+ continue;
- for (auto& af: afs)
- {
- shared_ptr<available_package>& ap (af.first);
- const version& av (!dsys ? ap->version : *ap->system_version ());
+ // Don't offer to replace to the same version.
+ //
+ if (av == ver)
+ continue;
+
+ // Don't repeatedly offer the same adjustments for the same command
+ // line.
+ //
+ if (cmdline_adjs.tried_earlier (db, nm, av))
+ {
+ l5 ([&]{trace << "replacement " << package_version_key (db, nm, av)
+ << " tried earlier for same command line, skipping";});
+
+ continue;
+ }
// If we aim to upgrade to the latest version and it tends to be less
// then the selected one, then what we currently have is the best that
- // we can get, and so we return the "no change" result.
- //
- // Note that we also handle a package stub here.
+ // we can get. Thus, we use the selected version as a replacement,
+ // unless it doesn't satisfy all the constraints or we are deorphaning.
//
- if (!dvc && av < sv)
+ if (constraint == nullptr && sp != nullptr)
{
- assert (!dsys); // Version can't be empty for the system package.
-
- // For the selected system package we still need to pick a source
- // package version to downgrade to.
- //
- if (!ssys)
+ const version& sv (sp->version);
+ if (av < sv && !sp->system () && !p.deorphan)
{
- l5 ([&]{trace << *sp << ": best";});
- return no_change ();
+ // Only consider the selected package if its version is satisfactory
+ // for its new dependents (note: must be checked first since has a
+ // byproduct), differs from the version being replaced, and was
+ // never used for the same command line (see above for details).
+ //
+ if (satisfactory (sv) && sv != ver)
+ {
+ if (!cmdline_adjs.tried_earlier (db, nm, sv))
+ {
+ ra = make_available_fragment (o, db, sp);
+ break;
+ }
+ else
+ l5 ([&]{trace << "selected package replacement "
+ << package_version_key (db, nm, sp->version)
+ << " tried earlier for same command line, "
+ << "skipping";});
+ }
}
+ }
- // We can not upgrade the (system) package to a stub version, so just
- // skip it.
- //
- if (ap->stub ())
+ if (orphan_best_match)
+ {
+ if (av == *dov)
{
- stub = true;
- continue;
+ ra = move (af);
+ break;
}
- }
- // Check if the version satisfies all the dependents and collect
- // unsatisfied ones.
- //
- bool satisfactory (true);
- sp_set unsatisfied_dependents;
+ if (deorphan_latest_iteration.first == nullptr &&
+ av.compare (*dov, false /* revision */, true /* iteration */) == 0)
+ deorphan_latest_iteration = af;
- for (const auto& dp: dependents)
- {
- if (!satisfies (av, dp.second))
- {
- satisfactory = false;
+ if (deorphan_later_revision.first == nullptr &&
+ av.compare (*dov, true /* revision */) == 0 &&
+ av.compare (*dov, false /* revision */, true /* iteration */) > 0)
+ deorphan_later_revision = af;
- // Continue to collect dependents of the unsatisfiable version if
- // we need to print them before failing.
- //
- if (ignore_unsatisfiable)
- break;
+ if (deorphan_later_patch.first == nullptr &&
+ dopc && satisfies (av, *dopc) &&
+ av.compare (*dov, true /* revision */) > 0) // Patch is greater?
+ deorphan_later_patch = af;
- unsatisfied_dependents.insert (dp.first);
- }
- }
+ if (deorphan_later_minor.first == nullptr &&
+ domc && satisfies (av, *domc) &&
+ av.compare (*dov, true /* revision */) > 0 &&
+ deorphan_later_patch.first == nullptr)
+ deorphan_later_minor = af;
- if (!satisfactory)
- {
- if (!ignore_unsatisfiable)
- unsatisfiable.emplace_back (av, move (unsatisfied_dependents));
+ if (deorphan_latest_available.first == nullptr)
+ deorphan_latest_available = move (af);
- // If the dependency is expected to be configured as system, then bail
- // out, as an available package version will always resolve to the
- // system one (see above).
- //
- if (dsys)
- break;
+ if (av.compare (*dov, false /* revision */, true /* iteration */) < 0)
+ {
+ assert (deorphan_latest_iteration.first != nullptr ||
+ deorphan_later_revision.first != nullptr ||
+ deorphan_later_patch.first != nullptr ||
+ deorphan_later_minor.first != nullptr ||
+ deorphan_latest_available.first != nullptr);
- continue;
+ break;
+ }
}
-
- // If the best satisfactory version and the desired system flag perfectly
- // match the ones of the selected package, then no package change is
- // required. Otherwise, recommend an up/down-grade.
- //
- if (av == sv && ssys == dsys)
+ else
{
- l5 ([&]{trace << *sp << ": unchanged";});
- return no_change ();
+ ra = move (af);
+ break;
}
-
- l5 ([&]{trace << *sp << ": update to "
- << package_string (nm, av, dsys);});
-
- return evaluate_result {
- move (ap), move (af.second), false /* unused */, dsys};
}
- // If we aim to upgrade to the latest version, then what we currently have
- // is the only thing that we can get, and so returning the "no change"
- // result, unless we need to upgrade a package configured as system.
- //
- if (!dvc && !ssys)
- {
- assert (!dsys); // Version cannot be empty for the system package.
+ shared_ptr<available_package>& rap (ra.first);
- l5 ([&]{trace << *sp << ": only";});
- return no_change ();
+ if (rap == nullptr && orphan_best_match)
+ {
+ if (deorphan_latest_iteration.first != nullptr)
+ ra = move (deorphan_latest_iteration);
+ else if (deorphan_later_revision.first != nullptr)
+ ra = move (deorphan_later_revision);
+ else if (deorphan_later_patch.first != nullptr)
+ ra = move (deorphan_later_patch);
+ else if (deorphan_later_minor.first != nullptr)
+ ra = move (deorphan_later_minor);
+ else if (deorphan_latest_available.first != nullptr)
+ ra = move (deorphan_latest_available);
}
- // If the version satisfying the desired dependency version constraint is
- // unavailable or unsatisfiable for some dependents then we fail, unless
- // requested not to do so. In the later case we return the "no change"
- // result.
+ t.commit ();
+
+ // Bail out if no appropriate replacement is found and return the
+ // command line adjustment object otherwise.
//
- if (ignore_unsatisfiable)
- {
- l5 ([&]{trace << package_string (nm, dvc, dsys)
- << (unsatisfiable.empty ()
- ? ": no source"
- : ": unsatisfiable");});
+ if (rap == nullptr)
+ return nullopt;
- return no_change ();
- }
+ optional<cmdline_adjustment> r;
- // If there are no unsatisfiable versions then the package is not present
- // (or is not available in source) in its dependents' repositories.
- //
- if (unsatisfiable.empty ())
- {
- diag_record dr (fail);
+ lazy_shared_ptr<repository_fragment>& raf (ra.second);
- if (!dvc && patch)
+ if (hold_pkg != nullptr || dep_pkg != nullptr) // Specified on command line?
+ {
+ if (hold_pkg != nullptr)
{
- assert (ssys); // Otherwise, we would bail out earlier (see above).
+ r = cmdline_adjustment (hold_pkg->db,
+ hold_pkg->name (),
+ move (rap),
+ move (raf));
- // Patch (as any upgrade) of a system package is always explicit, so
- // we always fail and never treat the package as being up to date.
- //
- assert (explicitly);
-
- fail << "patch version for " << *sp << " is not available "
- << "from its dependents' repositories";
+ if (constraint != nullptr)
+ {
+ l5 ([&]{trace << "replace " << what << " version "
+ << p.available_name_version () << " with "
+ << r->version << " by overwriting constraint "
+ << cmdline_adjs.to_string (*r) << " on command line";});
+ }
+ else
+ {
+ l5 ([&]{trace << "replace " << what << " version "
+ << p.available_name_version () << " with "
+ << r->version << " by adding constraint "
+ << cmdline_adjs.to_string (*r) << " on command line";});
+ }
}
- else if (!stub)
- fail << package_string (nm, dsys ? nullopt : dvc)
- << " is not available from its dependents' repositories";
- else // The only available package is a stub.
+ else // dep_pkg != nullptr
{
- // Note that we don't advise to "build" the package as a system one as
- // it is already as such (see above).
- //
- assert (!dvc && !dsys && ssys);
+ r = cmdline_adjustment (*dep_pkg->db, dep_pkg->name, rap->version);
- fail << package_string (nm, dvc) << " is not available in source "
- << "from its dependents' repositories";
+ if (constraint != nullptr)
+ {
+ l5 ([&]{trace << "replace " << what << " version "
+ << p.available_name_version () << " with "
+ << r->version << " by overwriting constraint "
+ << cmdline_adjs.to_string (*r) << " on command line";});
+ }
+ else
+ {
+ l5 ([&]{trace << "replace " << what << " version "
+ << p.available_name_version () << " with "
+ << r->version << " by adding constraint "
+ << cmdline_adjs.to_string (*r) << " on command line";});
+ }
}
}
-
- // Issue the diagnostics and fail.
- //
- diag_record dr (fail);
- dr << "package " << nm << " doesn't satisfy its dependents";
-
- // Print the list of unsatisfiable versions together with dependents they
- // don't satisfy: up to three latest versions with no more than five
- // dependents each.
- //
- size_t nv (0);
- for (const auto& u: unsatisfiable)
+ else // The package is not specified on the command line.
{
- dr << info << package_string (nm, u.first) << " doesn't satisfy";
+ // If the package is configured as system, then since it is not
+ // specified by the user (both hold_pkg and dep_pkg are NULL) we may
+ // only build it as system. Thus we wouldn't be here (see above).
+ //
+ assert (sp == nullptr || !sp->system ());
- size_t n (0);
- const sp_set& ps (u.second);
- for (const shared_ptr<selected_package>& p: ps)
+ // Similar to the collect lambda in collect_build_prerequisites(), issue
+ // the warning if we are forcing an up/down-grade.
+ //
+ if (sp != nullptr && (sp->hold_package || verb >= 2))
{
- dr << ' ' << *p;
+ const version& av (rap->version);
+ const version& sv (sp->version);
- if (++n == 5 && ps.size () != 6) // Printing 'and 1 more' looks stupid.
- break;
+ int ud (sv.compare (av));
+
+ if (ud != 0)
+ {
+ for (const auto& c: p.constraints)
+ {
+ if (c.dependent.version && !satisfies (sv, c.value))
+ {
+ warn << "package " << c.dependent << " dependency on ("
+ << nm << ' ' << c.value << ") is forcing "
+ << (ud < 0 ? "up" : "down") << "grade of " << *sp << db
+ << " to " << av;
+
+ break;
+ }
+ }
+ }
}
- if (n != ps.size ())
- dr << " and " << ps.size () - n << " more";
+ // For the selected built-to-hold package create the build-to-hold
+ // package spec and the dependency spec otherwise.
+ //
+ if (sp != nullptr && sp->hold_package)
+ {
+ r = cmdline_adjustment (db,
+ nm,
+ move (rap),
+ move (raf),
+ p.upgrade,
+ p.deorphan);
+
+ l5 ([&]{trace << "replace " << what << " version "
+ << p.available_name_version () << " with " << r->version
+ << " by adding package spec "
+ << cmdline_adjs.to_string (*r)
+ << " to command line";});
+ }
+ else
+ {
+ r = cmdline_adjustment (db, nm, rap->version, p.upgrade, p.deorphan);
- if (++nv == 3 && unsatisfiable.size () != 4)
- break;
+ l5 ([&]{trace << "replace " << what << " version "
+ << p.available_name_version () << " with " << r->version
+ << " by adding package spec "
+ << cmdline_adjs.to_string (*r)
+ << " to command line";});
+ }
}
- if (nv != unsatisfiable.size ())
- dr << info << "and " << unsatisfiable.size () - nv << " more";
-
- dr << endf;
+ return r;
}
- // List of dependent packages whose immediate/recursive dependencies must be
- // upgraded (specified with -i/-r on the command line).
+ // Try to replace some of the being built, potentially indirect, dependents
+ // of the specified dependency with a different available version,
+ // satisfactory for all its new and existing dependents (if any). Return the
+ // command line adjustment if such a replacement is deduced and nullopt
+ // otherwise. It is assumed that the dependency replacement has been
+ // (unsuccessfully) tried by using the try_replace_dependency() call and its
+ // resulting list of the dependents, unsatisfied by some of the dependency
+ // available versions, is also passed to the function call as the
+ // unsatisfied_dpts argument.
//
- struct recursive_package
- {
- package_name name;
- bool upgrade; // true -- upgrade, false -- patch.
- bool recursive; // true -- recursive, false -- immediate.
- };
- using recursive_packages = vector<recursive_package>;
-
- // Recursively check if immediate dependencies of this dependent must be
- // upgraded or patched. Return true if it must be upgraded, false if
- // patched, and nullopt otherwise.
+ // Specifically, try to replace the dependents in the following order by
+ // calling try_replace_dependency() for them:
//
- static optional<bool>
- upgrade_dependencies (database& db,
- const package_name& nm,
- const recursive_packages& recs,
- bool recursion = false)
+ // - Immediate dependents unsatisfied with the specified dependency. For the
+ // sake of tracing and documentation, we (naturally) call them unsatisfied
+ // dependents.
+ //
+ // - Immediate dependents satisfied with the dependency but applying the
+ // version constraint which has prevented us from picking a version which
+ // would be satisfactory to the unsatisfied dependents. Note that this
+ // information is only available for the being built unsatisfied
+ // dependents (added by collect_build() rather than collect_dependents()).
+ // We call them conflicting dependents.
+ //
+ // - Immediate dependents which apply constraint to this dependency,
+ // incompatible with constraints of some other dependents (both new and
+ // existing). We call them unsatisfiable dependents.
+ //
+ // - Immediate dependents from unsatisfied_dpts argument. We call them
+ // constraining dependents.
+ //
+ // - Dependents of all the above types of dependents, discovered by
+ // recursively calling try_replace_dependent() for them.
+ //
+ optional<cmdline_adjustment>
+ try_replace_dependent (const common_options& o,
+ const build_package& p, // Dependency.
+ const vector<unsatisfied_constraint>* ucs,
+ const build_packages& pkgs,
+ const cmdline_adjustments& cmdline_adjs,
+ const vector<package_key>& unsatisfied_dpts,
+ vector<build_package>& hold_pkgs,
+ dependency_packages& dep_pkgs,
+ set<const build_package*>& visited_dpts)
{
- auto i (find_if (recs.begin (), recs.end (),
- [&nm] (const recursive_package& i) -> bool
- {
- return i.name == nm;
- }));
+ tracer trace ("try_replace_dependent");
- optional<bool> r;
+ // Bail out if the dependent has already been visited and add it to the
+ // visited set otherwise.
+ //
+ if (!visited_dpts.insert (&p).second)
+ return nullopt;
- if (i != recs.end () && i->recursive >= recursion)
- {
- r = i->upgrade;
+ using constraint_type = build_package::constraint_type;
- if (*r) // Upgrade (vs patch)?
- return r;
- }
+ const shared_ptr<available_package>& ap (p.available);
+ assert (ap != nullptr); // By definition.
+
+ const version& av (ap->version);
+
+ // List of the dependents which we have (unsuccessfully) tried to replace
+ // together with the lists of the constraining dependents.
+ //
+ vector<pair<package_key, vector<package_key>>> dpts;
- for (const auto& pd: db.query<package_dependent> (
- query<package_dependent>::name == nm))
+ // Try to replace a dependent, unless we have already tried to replace it.
+ //
+ auto try_replace = [&o,
+ &p,
+ &pkgs,
+ &cmdline_adjs,
+ &hold_pkgs,
+ &dep_pkgs,
+ &visited_dpts,
+ &dpts,
+ &trace] (package_key dk, const char* what)
+ -> optional<cmdline_adjustment>
{
- // Note that we cannot end up with an infinite recursion for configured
- // packages due to a dependency cycle (see order() for details).
- //
- if (optional<bool> u = upgrade_dependencies (db, pd.name, recs, true))
+ if (find_if (dpts.begin (), dpts.end (),
+ [&dk] (const auto& v) {return v.first == dk;}) ==
+ dpts.end ())
{
- if (!r || *r < *u) // Upgrade wins patch.
+ const build_package* d (pkgs.entered_build (dk));
+
+ // Always come from the dependency's constraints member.
+ //
+ assert (d != nullptr);
+
+ // Skip the visited dependents since, by definition, we have already
+ // tried to replace them.
+ //
+ if (find (visited_dpts.begin (), visited_dpts.end (), d) ==
+ visited_dpts.end ())
{
- r = u;
+ l5 ([&]{trace << "try to replace " << what << ' '
+ << d->available_name_version_db () << " of dependency "
+ << p.available_name_version_db () << " with some "
+ << "other version";});
+
+ vector<package_key> uds;
+
+ if (optional<cmdline_adjustment> a = try_replace_dependency (
+ o,
+ *d,
+ pkgs,
+ hold_pkgs,
+ dep_pkgs,
+ cmdline_adjs,
+ uds,
+ what))
+ {
+ return a;
+ }
- if (*r) // Upgrade (vs patch)?
- return r;
+ dpts.emplace_back (move (dk), move (uds));
}
}
- }
- return r;
- }
+ return nullopt;
+ };
- // Evaluate a package (not necessarily dependency) and return a new desired
- // version. If the result is absent (nullopt), then no changes to the
- // package are necessary. Otherwise, the result is available_package to
- // upgrade/downgrade to as well as the repository fragment it must come
- // from.
- //
- // If the system package cannot be upgraded to the source one, not being
- // found in the dependents repositories, then return nullopt if
- // ignore_unsatisfiable argument is true and fail otherwise (see the
- // evaluate_dependency() function description for details).
- //
- static optional<evaluate_result>
- evaluate_recursive (database& db,
- const recursive_packages& recs,
- const shared_ptr<selected_package>& sp,
- bool ignore_unsatisfiable)
- {
- tracer trace ("evaluate_recursive");
+ // Try to replace unsatisfied dependents.
+ //
+ for (const constraint_type& c: p.constraints)
+ {
+ const package_version_key& dvk (c.dependent);
- assert (sp != nullptr);
+ if (dvk.version && !c.selected_dependent && !satisfies (av, c.value))
+ {
+ if (optional<cmdline_adjustment> a = try_replace (
+ package_key (dvk.db, dvk.name), "unsatisfied dependent"))
+ {
+ return a;
+ }
+ }
+ }
- // Build a set of repository fragment the dependent packages come from.
- // Also cache the dependents and the constraints they apply to this
- // dependency.
+ // Try to replace conflicting dependents.
//
- set<shared_ptr<repository_fragment>> repo_frags;
- package_dependents dependents;
+ if (ucs != nullptr)
+ {
+ for (const unsatisfied_constraint& uc: *ucs)
+ {
+ const package_version_key& dvk (uc.constraint.dependent);
- auto pds (db.query<package_dependent> (
- query<package_dependent>::name == sp->name));
+ if (dvk.version)
+ {
+ if (optional<cmdline_adjustment> a = try_replace (
+ package_key (dvk.db, dvk.name), "conflicting dependent"))
+ {
+ return a;
+ }
+ }
+ }
+ }
- // Only collect repository fragments (for best version selection) of
- // (immediate) dependents that have a hit (direct or indirect) in recs.
- // Note, however, that we collect constraints from all the dependents.
+ // Try to replace unsatisfiable dependents.
//
- optional<bool> upgrade;
-
- for (const auto& pd: pds)
+ for (const constraint_type& c1: p.constraints)
{
- shared_ptr<selected_package> dsp (db.load<selected_package> (pd.name));
- dependents.emplace_back (dsp, move (pd.constraint));
+ const package_version_key& dvk (c1.dependent);
- if (optional<bool> u = upgrade_dependencies (db, pd.name, recs))
+ if (dvk.version && !c1.selected_dependent)
{
- if (!upgrade || *upgrade < *u) // Upgrade wins patch.
- upgrade = u;
- }
- else
- continue;
+ const version_constraint& v1 (c1.value);
- // While we already know that the dependency upgrade is required, we
- // continue to iterate over dependents, collecting the repository
- // fragments and the constraints.
- //
- shared_ptr<available_package> dap (
- db.find<available_package> (
- available_package_id (dsp->name, dsp->version)));
+ bool unsatisfiable (false);
+ for (const constraint_type& c2: p.constraints)
+ {
+ const version_constraint& v2 (c2.value);
- if (dap != nullptr)
- {
- assert (!dap->locations.empty ());
+ if (!satisfies (v1, v2) && !satisfies (v2, v1))
+ {
+ unsatisfiable = true;
+ break;
+ }
+ }
- for (const auto& pl: dap->locations)
- repo_frags.insert (pl.repository_fragment.load ());
+ if (unsatisfiable)
+ {
+ if (optional<cmdline_adjustment> a = try_replace (
+ package_key (dvk.db, dvk.name), "unsatisfiable dependent"))
+ {
+ return a;
+ }
+ }
}
}
- if (!upgrade)
+ // Try to replace constraining dependents.
+ //
+ for (const auto& dk: unsatisfied_dpts)
{
- l5 ([&]{trace << *sp << ": no hit";});
- return nullopt;
+ if (optional<cmdline_adjustment> a = try_replace (
+ dk, "constraining dependent"))
+ {
+ return a;
+ }
}
- // Recommends the highest possible version.
+ // Try to replace dependents of the above dependents, recursively.
//
- optional<evaluate_result> r (
- evaluate_dependency (db,
- sp,
- nullopt /* desired */,
- false /*desired_sys */,
- !*upgrade /* patch */,
- false /* explicitly */,
- repo_frags,
- dependents,
- ignore_unsatisfiable));
+ for (const auto& dep: dpts)
+ {
+ const build_package* d (pkgs.entered_build (dep.first));
+
+ assert (d != nullptr);
+
+ if (optional<cmdline_adjustment> a = try_replace_dependent (
+ o,
+ *d,
+ nullptr /* unsatisfied_constraints */,
+ pkgs,
+ cmdline_adjs,
+ dep.second,
+ hold_pkgs,
+ dep_pkgs,
+ visited_dpts))
+ {
+ return a;
+ }
+ }
- // Translate the "no change" result into nullopt.
- //
- assert (!r || !r->unused);
- return r && r->available == nullptr ? nullopt : r;
+ return nullopt;
}
- static void
+ // Return false if the plan execution was noop. If unsatisfied dependents
+ // are specified then we are in the simulation mode.
+ //
+ static bool
execute_plan (const pkg_build_options&,
- const dir_path&,
- database&,
build_package_list&,
- bool simulate);
+ unsatisfied_dependents* simulate,
+ const function<find_database_function>&);
using pkg_options = pkg_build_pkg_options;
@@ -2338,20 +2938,39 @@ namespace bpkg
dr << fail << "both --immediate|-i and --recursive|-r specified";
// The --immediate or --recursive option can only be specified with an
- // explicit --upgrade or --patch.
+ // explicit --upgrade, --patch, or --deorphan.
//
if (const char* n = (o.immediate () ? "--immediate" :
o.recursive () ? "--recursive" : nullptr))
{
- if (!o.upgrade () && !o.patch ())
- dr << fail << n << " requires explicit --upgrade|-u or --patch|-p";
+ if (!o.upgrade () && !o.patch () && !o.deorphan ())
+ dr << fail << n << " requires explicit --upgrade|-u, --patch|-p, or "
+ << "--deorphan";
}
if (((o.upgrade_immediate () ? 1 : 0) +
(o.upgrade_recursive () ? 1 : 0) +
(o.patch_immediate () ? 1 : 0) +
(o.patch_recursive () ? 1 : 0)) > 1)
- fail << "multiple --(upgrade|patch)-(immediate|recursive) specified";
+ dr << fail << "multiple --(upgrade|patch)-(immediate|recursive) "
+ << "specified";
+
+ if (o.deorphan_immediate () && o.deorphan_recursive ())
+ dr << fail << "both --deorphan-immediate and --deorphan-recursive "
+ << "specified";
+
+ if (multi_config ())
+ {
+ if (const char* opt = o.config_name_specified () ? "--config-name" :
+ o.config_id_specified () ? "--config-id" :
+ nullptr)
+ {
+ dr << fail << opt << " specified for multiple current "
+ << "configurations" <<
+ info << "use --config-uuid to specify configurations in "
+ << "this mode";
+ }
+ }
if (!dr.empty () && !pkg.empty ())
dr << info << "while validating options for " << pkg;
@@ -2366,13 +2985,16 @@ namespace bpkg
dst.recursive (src.recursive ());
// If -r|-i was specified at the package level, then so should
- // -u|-p.
+ // -u|-p and --deorphan.
//
if (!(dst.upgrade () || dst.patch ()))
{
dst.upgrade (src.upgrade ());
dst.patch (src.patch ());
}
+
+ if (!dst.deorphan ())
+ dst.deorphan (src.deorphan ());
}
if (!(dst.upgrade_immediate () || dst.upgrade_recursive () ||
@@ -2384,8 +3006,15 @@ namespace bpkg
dst.patch_recursive (src.patch_recursive ());
}
+ if (!(dst.deorphan_immediate () || dst.deorphan_recursive ()))
+ {
+ dst.deorphan_immediate (src.deorphan_immediate ());
+ dst.deorphan_recursive (src.deorphan_recursive ());
+ }
+
dst.dependency (src.dependency () || dst.dependency ());
dst.keep_out (src.keep_out () || dst.keep_out ());
+ dst.disfigure (src.disfigure () || dst.disfigure ());
if (!dst.checkout_root_specified () && src.checkout_root_specified ())
{
@@ -2394,23 +3023,54 @@ namespace bpkg
}
dst.checkout_purge (src.checkout_purge () || dst.checkout_purge ());
+
+ if (src.config_id_specified ())
+ {
+ const vector<uint64_t>& s (src.config_id ());
+ vector<uint64_t>& d (dst.config_id ());
+ d.insert (d.end (), s.begin (), s.end ());
+
+ dst.config_id_specified (true);
+ }
+
+ if (src.config_name_specified ())
+ {
+ const strings& s (src.config_name ());
+ strings& d (dst.config_name ());
+ d.insert (d.end (), s.begin (), s.end ());
+
+ dst.config_name_specified (true);
+ }
+
+ if (src.config_uuid_specified ())
+ {
+ const vector<uuid>& s (src.config_uuid ());
+ vector<uuid>& d (dst.config_uuid ());
+ d.insert (d.end (), s.begin (), s.end ());
+
+ dst.config_uuid_specified (true);
+ }
}
static bool
compare_options (const pkg_options& x, const pkg_options& y)
{
- return x.keep_out () == y.keep_out () &&
- x.dependency () == y.dependency () &&
- x.upgrade () == y.upgrade () &&
- x.patch () == y.patch () &&
- x.immediate () == y.immediate () &&
- x.recursive () == y.recursive () &&
- x.upgrade_immediate () == y.upgrade_immediate () &&
- x.upgrade_recursive () == y.upgrade_recursive () &&
- x.patch_immediate () == y.patch_immediate () &&
- x.patch_recursive () == y.patch_recursive () &&
- x.checkout_root () == y.checkout_root () &&
- x.checkout_purge () == y.checkout_purge ();
+ return x.keep_out () == y.keep_out () &&
+ x.disfigure () == y.disfigure () &&
+ x.dependency () == y.dependency () &&
+ x.upgrade () == y.upgrade () &&
+ x.patch () == y.patch () &&
+ x.deorphan () == y.deorphan () &&
+ x.immediate () == y.immediate () &&
+ x.recursive () == y.recursive () &&
+ x.upgrade_immediate () == y.upgrade_immediate () &&
+ x.upgrade_recursive () == y.upgrade_recursive () &&
+ x.patch_immediate () == y.patch_immediate () &&
+ x.patch_recursive () == y.patch_recursive () &&
+ x.deorphan_immediate () == y.deorphan_immediate () &&
+ x.deorphan_recursive () == y.deorphan_recursive () &&
+ x.checkout_root () == y.checkout_root () &&
+ x.checkout_purge () == y.checkout_purge ();
}
int
@@ -2418,21 +3078,97 @@ namespace bpkg
{
tracer trace ("pkg_build");
- const dir_path& c (o.directory ());
- l4 ([&]{trace << "configuration: " << c;});
+ dir_paths cs;
+ const dir_paths& config_dirs (!o.directory ().empty ()
+ ? o.directory ()
+ : cs);
- validate_options (o, ""); // Global package options.
+ if (config_dirs.empty ())
+ cs.push_back (current_dir);
+
+ l4 ([&]{for (const auto& d: config_dirs) trace << "configuration: " << d;});
+
+ // Make sure that potential stdout writing failures can be detected.
+ //
+ cout.exceptions (ostream::badbit | ostream::failbit);
+
+ if (o.noop_exit_specified ())
+ {
+ if (o.print_only ())
+ fail << "--noop-exit specified with --print-only";
+
+ // We can probably use build2's --structured-result to support this.
+ //
+ if (!o.configure_only ())
+ fail << "--noop-exit is only supported in --configure-only mode";
+ }
if (o.update_dependent () && o.leave_dependent ())
fail << "both --update-dependent|-U and --leave-dependent|-L "
<< "specified" <<
info << "run 'bpkg help pkg-build' for more information";
- if (!args.more () && !o.upgrade () && !o.patch ())
+ if (o.sys_no_query () && o.sys_install ())
+ fail << "both --sys-no-query and --sys-install specified" <<
+ info << "run 'bpkg help pkg-build' for more information";
+
+ if (!args.more () && !o.upgrade () && !o.patch () && !o.deorphan ())
fail << "package name argument expected" <<
info << "run 'bpkg help pkg-build' for more information";
- database db (open (c, trace)); // Also populates the system repository.
+ // If multiple current configurations are specified, then open the first
+ // one, attach the remaining, verify that their schemas match (which may
+ // not be the case if they don't belong to the same linked database
+ // cluster), and attach their explicitly linked databases, recursively.
+ //
+ // Also populates the system repository.
+ //
+ // @@ Note that currently we don't verify the specified configurations
+ // belong to the same cluster.
+ //
+ database mdb (config_dirs[0],
+ trace,
+ true /* pre_attach */,
+ true /* sys_rep */,
+ dir_paths () /* pre_link */,
+ (config_dirs.size () == 1
+ ? empty_string
+ : '[' + config_dirs[0].representation () + ']'));
+
+ // Command line as a dependent.
+ //
+ package_version_key cmd_line (mdb, "command line");
+
+ current_configs.push_back (mdb);
+
+ if (config_dirs.size () != 1)
+ {
+ transaction t (mdb);
+
+ odb::schema_version sv (mdb.schema_version ());
+ for (auto i (config_dirs.begin () + 1); i != config_dirs.end (); ++i)
+ {
+ database& db (mdb.attach (normalize (*i, "configuration"),
+ true /* sys_rep */));
+
+ if (db.schema_version () != sv)
+ fail << "specified configurations belong to different linked "
+ << "configuration clusters" <<
+ info << mdb.config_orig <<
+ info << db.config_orig;
+
+ db.attach_explicit (true /* sys_rep */);
+
+ // Suppress duplicates.
+ //
+ if (!current (db))
+ current_configs.push_back (db);
+ }
+
+ t.commit ();
+ }
+
+ validate_options (o, ""); // Global package options.
// Note that the session spans all our transactions. The idea here is that
// selected_package objects in build_packages below will be cached in this
@@ -2440,7 +3176,7 @@ namespace bpkg
// will modify the cached instance, which means our list will always "see"
// their updated state.
//
- // Also note that rep_fetch() must be called in session.
+ // Also note that rep_fetch() and pkg_fetch() must be called in session.
//
session ses;
@@ -2452,12 +3188,24 @@ namespace bpkg
// duplicates. Note that the last repository location overrides the
// previous ones with the same canonical name.
//
+ // Also note that the dependency specs may not have the repository
+ // location specified, since they obtain the repository information via
+ // their ultimate dependent configurations.
+ //
+ // Also collect the databases specified on the command line for the held
+ // packages, to later use them as repository information sources for the
+ // dependencies. Additionally use the current configurations as repository
+ // information sources.
+ //
+ repo_configs = current_configs;
+
struct pkg_spec
{
- string packages;
- repository_location location;
- pkg_options options;
- strings config_vars;
+ reference_wrapper<database> db;
+ string packages;
+ repository_location location;
+ pkg_options options;
+ strings config_vars;
};
vector<pkg_spec> specs;
@@ -2498,16 +3246,16 @@ namespace bpkg
fail << "unexpected options group for configuration variable '"
<< v << "'";
- cvars.push_back (move (v));
+ cvars.push_back (move (trim (v)));
}
if (!cvars.empty () && !sep)
fail << "configuration variables must be separated from packages "
<< "with '--'";
- vector<repository_location> locations;
+ database_map<vector<repository_location>> locations;
- transaction t (db);
+ transaction t (mdb);
while (args.more ())
{
@@ -2520,28 +3268,33 @@ namespace bpkg
fail << "unexpected configuration variable '" << a << "'" <<
info << "use the '--' separator to treat it as a package";
- specs.emplace_back ();
- pkg_spec& ps (specs.back ());
+ pkg_options po;
+
+ // Merge the common and package-specific configuration variables
+ // (commons go first).
+ //
+ strings cvs (cvars);
try
{
- auto& po (ps.options);
-
cli::scanner& ag (args.group ());
- po.parse (ag, cli::unknown_mode::fail, cli::unknown_mode::stop);
-
- // Merge the common and package-specific configuration variables
- // (commons go first).
- //
- ps.config_vars = cvars;
while (ag.more ())
{
- string a (ag.next ());
- if (a.find ('=') == string::npos)
- fail << "unexpected group argument '" << a << "'";
+ if (!po.parse (ag) || ag.more ())
+ {
+ string a (ag.next ());
+ if (a.find ('=') == string::npos)
+ fail << "unexpected group argument '" << a << "'";
- ps.config_vars.push_back (move (a));
+ trim (a);
+
+ if (a[0] == '!')
+ fail << "global override in package-specific configuration "
+ << "variable '" << a << "'";
+
+ cvs.push_back (move (a));
+ }
}
// We have to manually merge global options into local since just
@@ -2554,15 +3307,80 @@ namespace bpkg
}
catch (const cli::exception& e)
{
- fail << e << " grouped for argument '" << a << "'";
+ fail << e << " grouped for argument " << a;
+ }
+
+ // Resolve the configuration options into the databases, suppressing
+ // duplicates.
+ //
+ // Note: main database if no --config-* option is specified, unless we
+ // are in the multi-config mode, in which case we fail.
+ //
+ linked_databases dbs;
+ auto add_db = [&dbs] (database& db)
+ {
+ if (find (dbs.begin (), dbs.end (), db) == dbs.end ())
+ dbs.push_back (db);
+ };
+
+ for (const string& nm: po.config_name ())
+ {
+ assert (!multi_config ()); // Should have failed earlier.
+ add_db (mdb.find_attached (nm));
}
+ for (uint64_t id: po.config_id ())
+ {
+ assert (!multi_config ()); // Should have failed earlier.
+ add_db (mdb.find_attached (id));
+ }
+
+ for (const uuid& uid: po.config_uuid ())
+ {
+ database* db (nullptr);
+
+ for (database& cdb: current_configs)
+ {
+ if ((db = cdb.try_find_dependency_config (uid)) != nullptr)
+ break;
+ }
+
+ if (db == nullptr)
+ fail << "no configuration with uuid " << uid << " is linked with "
+ << (!multi_config ()
+ ? mdb.config_orig.representation ()
+ : "specified current configurations");
+
+ add_db (*db);
+ }
+
+ // Note that unspecified package configuration in the multi-
+ // configurations mode is an error, unless this is a system
+ // dependency. We, however, do not parse the package scheme at this
+ // stage and so delay the potential failure.
+ //
+ if (dbs.empty ())
+ dbs.push_back (mdb);
+
if (!a.empty () && a[0] == '?')
{
- ps.options.dependency (true);
+ po.dependency (true);
a.erase (0, 1);
}
+ // If this is a package to hold, then add its databases to the
+ // repository information source list, suppressing duplicates.
+ //
+ if (!po.dependency ())
+ {
+ for (database& db: dbs)
+ {
+ if (find (repo_configs.begin (), repo_configs.end (), db) ==
+ repo_configs.end ())
+ repo_configs.push_back (db);
+ }
+ }
+
// Check if the argument has the [<packages>]@<location> form or looks
// like a URL. Find the position of <location> if that's the case and
// set it to string::npos otherwise.
@@ -2603,96 +3421,162 @@ namespace bpkg
if (l.empty ())
fail << "empty repository location in '" << a << "'";
- // Search for the repository location in the database before trying
- // to parse it. Note that the straight parsing could otherwise fail,
- // being unable to properly guess the repository type.
- //
- // Also note that the repository location URL is not unique and we
- // can potentially end up with multiple repositories. For example:
- //
- // $ bpkg add git+file:/path/to/git/repo dir+file:/path/to/git/repo
- // $ bpkg build @/path/to/git/repo
- //
- // That's why we pick the repository only if there is exactly one
- // match.
- //
- shared_ptr<repository> r;
+ if (po.dependency ())
+ fail << "unexpected repository location in '?" << a << "'" <<
+ info << "repository location cannot be specified for "
+ << "dependencies";
+
+ string pks (p > 1 ? string (a, 0, p - 1) : empty_string);
+
+ for (size_t i (0); i != dbs.size (); ++i)
{
- using query = query<repository>;
+ database& db (dbs[i]);
- // For case-insensitive filesystems (Windows) we need to match the
- // location case-insensitively against the local repository URLs
- // and case-sensitively against the remote ones.
+ // Search for the repository location in the database before
+ // trying to parse it. Note that the straight parsing could
+ // otherwise fail, being unable to properly guess the repository
+ // type.
//
- // Note that the root repository will never be matched, since its
- // location is empty.
+ // Also note that the repository location URL is not unique and we
+ // can potentially end up with multiple repositories. For example:
//
- const auto& url (query::location.url);
+ // $ bpkg add git+file:/path/to/git/repo dir+file:/path/to/git/repo
+ // $ bpkg build @/path/to/git/repo
+ //
+ // That's why we pick the repository only if there is exactly one
+ // match.
+ //
+ shared_ptr<repository> r;
+ {
+ using query = query<repository>;
+
+ // For case-insensitive filesystems (Windows) we need to match
+ // the location case-insensitively against the local repository
+ // URLs and case-sensitively against the remote ones.
+ //
+ // Note that the root repository will never be matched, since
+ // its location is empty.
+ //
+ const auto& url (query::location.url);
#ifndef _WIN32
- query q (url == l);
+ query q (url == l);
#else
- string u (url.table ());
- u += '.';
- u += url.column ();
+ string u (url.table ());
+ u += '.';
+ u += url.column ();
- query q (
- (!query::local && url == l) ||
- ( query::local && u + " COLLATE nocase = " + query::_val (l)));
+ query q (
+ (!query::local && url == l) ||
+ ( query::local && u + " COLLATE nocase = " + query::_val (l)));
#endif
- auto rs (db.query<repository> (q));
- auto i (rs.begin ());
+ auto rs (db.query<repository> (q));
+ auto i (rs.begin ());
- if (i != rs.end ())
- {
- r = i.load ();
+ if (i != rs.end ())
+ {
+ r = i.load ();
- // Fallback to parsing the location if several repositories
- // match.
- //
- if (++i != rs.end ())
- r = nullptr;
+ // Fallback to parsing the location if several repositories
+ // match.
+ //
+ if (++i != rs.end ())
+ r = nullptr;
+ }
}
- }
-
- ps.location = r != nullptr
- ? r->location
- : parse_location (l, nullopt /* type */);
- if (p > 1)
- ps.packages = string (a, 0, p - 1);
+ repository_location loc (r != nullptr
+ ? r->location
+ : parse_location (l, nullopt /* type */));
- if (!o.no_fetch ())
- {
- auto pr = [&ps] (const repository_location& i) -> bool
+ if (!o.no_fetch ())
{
- return i.canonical_name () == ps.location.canonical_name ();
- };
+ auto i (locations.find (db));
+ if (i == locations.end ())
+ i = locations.insert (db,
+ vector<repository_location> ()).first;
+
+ auto pr = [&loc] (const repository_location& i) -> bool
+ {
+ return i.canonical_name () == loc.canonical_name ();
+ };
- auto i (find_if (locations.begin (), locations.end (), pr));
+ vector<repository_location>& ls (i->second);
+ auto j (find_if (ls.begin (), ls.end (), pr));
- if (i != locations.end ())
- *i = ps.location;
+ if (j != ls.end ())
+ *j = loc;
+ else
+ ls.push_back (loc);
+ }
+
+ // Move the pkg_spec components for the last database on the list,
+ // rather then copying them.
+ //
+ if (i != dbs.size () - 1)
+ specs.push_back (pkg_spec {db, pks, move (loc), po, cvs});
else
- locations.push_back (ps.location);
+ specs.push_back (pkg_spec {db,
+ move (pks),
+ move (loc),
+ move (po),
+ move (cvs)});
}
}
else
- ps.packages = move (a);
+ {
+ // Move the pkg_spec components for the last database in the list,
+ // rather then copying them.
+ //
+ for (size_t i (0); i != dbs.size (); ++i)
+ {
+ database& db (dbs[i]);
+
+ if (i != dbs.size () - 1)
+ specs.emplace_back (pkg_spec {db,
+ a,
+ repository_location (),
+ po,
+ cvs});
+ else
+ specs.emplace_back (pkg_spec {db,
+ move (a),
+ repository_location (),
+ move (po),
+ move (cvs)});
+ }
+ }
}
t.commit ();
- if (!locations.empty ())
+ // Initialize tmp directories.
+ //
+ for (database& db: repo_configs)
+ init_tmp (db.config_orig);
+
+ // Fetch the repositories in the current configuration.
+ //
+ // Note that during this build only the repositories information from
+ // the main database will be used.
+ //
+ for (const auto& l: locations)
rep_fetch (o,
- c,
- db,
- locations,
+ l.first,
+ l.second,
o.fetch_shallow (),
string () /* reason for "fetching ..." */);
}
+ // Now, as repo_configs is filled and the repositories are fetched mask
+ // the repositories, if any.
+ //
+ if (o.mask_repository_specified () || o.mask_repository_uuid_specified ())
+ rep_mask (o.mask_repository (),
+ o.mask_repository_uuid (),
+ current_configs);
+
// Expand the package specs into individual package args, parsing them
// into the package scheme, name, and version constraint components, and
// also saving associated options and configuration variables.
@@ -2704,65 +3588,22 @@ namespace bpkg
//
struct pkg_arg
{
+ // NULL for system dependency with unspecified configuration.
+ //
+ database* db;
+
package_scheme scheme;
package_name name;
optional<version_constraint> constraint;
string value;
pkg_options options;
strings config_vars;
- };
-
- // Create the parsed package argument.
- //
- auto arg_package = [] (package_scheme sc,
- package_name nm,
- optional<version_constraint> vc,
- pkg_options os,
- strings vs) -> pkg_arg
- {
- assert (!vc || !vc->empty ()); // May not be empty if present.
-
- pkg_arg r {sc, move (nm), move (vc), string (), move (os), move (vs)};
-
- switch (sc)
- {
- case package_scheme::sys:
- {
- if (!r.constraint)
- r.constraint = version_constraint (wildcard_version);
-
- // The system package may only have an exact/wildcard version
- // specified.
- //
- assert (r.constraint->min_version == r.constraint->max_version);
-
- const system_package* sp (system_repository.find (r.name));
-
- // Will deal with all the duplicates later.
- //
- if (sp == nullptr || !sp->authoritative)
- system_repository.insert (r.name,
- *r.constraint->min_version,
- true /* authoritative */);
-
- break;
- }
- case package_scheme::none: break; // Nothing to do.
- }
- return r;
- };
-
- // Create the unparsed package argument.
- //
- auto arg_raw = [] (string v, pkg_options os, strings vs) -> pkg_arg
- {
- return pkg_arg {package_scheme::none,
- package_name (),
- nullopt /* constraint */,
- move (v),
- move (os),
- move (vs)};
+ // If schema is sys then this member indicates whether the constraint
+ // came from the system package manager (not NULL) or user/fallback
+ // (NULL).
+ //
+ const system_package_status* system_status;
};
auto arg_parsed = [] (const pkg_arg& a) {return !a.name.empty ();};
@@ -2824,23 +3665,41 @@ namespace bpkg
append (v, s);
};
+ auto add_num = [&add_string] (const char* o, auto v)
+ {
+ add_string (o, to_string (v));
+ };
+
const pkg_options& o (a.options);
- add_bool ("--keep-out", o.keep_out ());
- add_bool ("--upgrade", o.upgrade ());
- add_bool ("--patch", o.patch ());
- add_bool ("--immediate", o.immediate ());
- add_bool ("--recursive", o.recursive ());
- add_bool ("--upgrade-immediate", o.upgrade_immediate ());
- add_bool ("--upgrade-recursive", o.upgrade_recursive ());
- add_bool ("--patch-immediate", o.patch_immediate ());
- add_bool ("--patch-recursive", o.patch_recursive ());
+ add_bool ("--keep-out", o.keep_out ());
+ add_bool ("--disfigure", o.disfigure ());
+ add_bool ("--upgrade", o.upgrade ());
+ add_bool ("--patch", o.patch ());
+ add_bool ("--deorphan", o.deorphan ());
+ add_bool ("--immediate", o.immediate ());
+ add_bool ("--recursive", o.recursive ());
+ add_bool ("--upgrade-immediate", o.upgrade_immediate ());
+ add_bool ("--upgrade-recursive", o.upgrade_recursive ());
+ add_bool ("--patch-immediate", o.patch_immediate ());
+ add_bool ("--patch-recursive", o.patch_recursive ());
+ add_bool ("--deorphan-immediate", o.deorphan_immediate ());
+ add_bool ("--deorphan-recursive", o.deorphan_recursive ());
if (o.checkout_root_specified ())
add_string ("--checkout-root", o.checkout_root ().string ());
add_bool ("--checkout-purge", o.checkout_purge ());
+ for (const string& nm: o.config_name ())
+ add_string ("--config-name", nm);
+
+ for (uint64_t id: o.config_id ())
+ add_num ("--config-id", id);
+
+ for (const uuid& uid: o.config_uuid ())
+ add_string ("--config-uuid", uid.string ());
+
// Compose the option/variable group.
//
if (!s.empty () || !a.config_vars.empty ())
@@ -2863,6 +3722,222 @@ namespace bpkg
return r;
};
+ // Figure out the system package version unless explicitly specified and
+ // add the system package authoritative information to the database's
+ // system repository unless the database is NULL or it already contains
+ // authoritative information for this package. Return the figured out
+ // system package version as constraint.
+ //
+ // Note that it is assumed that all the possible duplicates are handled
+ // elsewhere/later.
+ //
+ auto add_system_package = [&o] (database* db,
+ const package_name& nm,
+ optional<version_constraint> vc,
+ const system_package_status* sps,
+ vector<shared_ptr<available_package>>* stubs)
+ -> pair<version_constraint, const system_package_status*>
+ {
+ if (!vc)
+ {
+ assert (sps == nullptr);
+
+ // See if we should query the system package manager.
+ //
+ if (!sys_pkg_mgr)
+ sys_pkg_mgr = o.sys_no_query ()
+ ? nullptr
+ : make_consumption_system_package_manager (o,
+ host_triplet,
+ o.sys_distribution (),
+ o.sys_architecture (),
+ o.sys_install (),
+ !o.sys_no_fetch (),
+ o.sys_yes (),
+ o.sys_sudo ());
+ if (*sys_pkg_mgr != nullptr)
+ {
+ system_package_manager& spm (**sys_pkg_mgr);
+
+ // First check the cache.
+ //
+ optional<const system_package_status*> os (spm.status (nm, nullptr));
+
+ available_packages aps;
+ if (!os)
+ {
+ // If no cache hit, then collect the available packages for the
+ // mapping information.
+ //
+ aps = find_available_all (current_configs, nm);
+
+ // If no source/stub for the package (and thus no mapping), issue
+ // diagnostics consistent with other such places unless explicitly
+ // allowed by the user.
+ //
+ if (aps.empty ())
+ {
+ if (!o.sys_no_stub ())
+ fail << "unknown package " << nm <<
+ info << "consider specifying --sys-no-stub or " << nm << "/*";
+
+ // Add the stub package to the imaginary system repository (like
+ // the user-specified case below).
+ //
+ if (stubs != nullptr)
+ stubs->push_back (make_shared<available_package> (nm));
+ }
+ }
+
+ // This covers both our diagnostics below as well as anything that
+ // might be issued by status().
+ //
+ auto df = make_diag_frame (
+ [&nm] (diag_record& dr)
+ {
+ dr << info << "specify " << nm << "/* if package is not "
+ << "installed with system package manager";
+
+ dr << info << "specify --sys-no-query to disable system "
+ << "package manager interactions";
+ });
+
+ if (!os)
+ {
+ os = spm.status (nm, &aps);
+ assert (os);
+ }
+
+ if ((sps = *os) != nullptr)
+ vc = version_constraint (sps->version);
+ else
+ {
+ diag_record dr (fail);
+
+ dr << "no installed " << (o.sys_install () ? "or available " : "")
+ << "system package for " << nm;
+
+ if (!o.sys_install ())
+ dr << info << "specify --sys-install to try to install it";
+ }
+ }
+ else
+ vc = version_constraint (wildcard_version);
+ }
+ else
+ {
+ // The system package may only have an exact/wildcard version
+ // specified.
+ //
+ assert (vc->min_version == vc->max_version);
+
+ // For system packages not associated with a specific repository
+ // location add the stub package to the imaginary system repository
+ // (see below for details).
+ //
+ if (stubs != nullptr)
+ stubs->push_back (make_shared<available_package> (nm));
+ }
+
+ if (db != nullptr)
+ {
+ assert (db->system_repository);
+
+ const system_package* sp (db->system_repository->find (nm));
+
+ // Note that we don't check for the version match here since that's
+ // handled by check_dup() lambda at a later stage, which covers both
+ // db and no-db cases consistently.
+ //
+ if (sp == nullptr || !sp->authoritative)
+ db->system_repository->insert (nm,
+ *vc->min_version,
+ true /* authoritative */,
+ sps);
+ }
+
+ return make_pair (move (*vc), sps);
+ };
+
+ // Create the parsed package argument. Issue diagnostics and fail if the
+ // package specification is invalid.
+ //
+ auto arg_package = [&arg_string, &add_system_package]
+ (database* db,
+ package_scheme sc,
+ package_name nm,
+ optional<version_constraint> vc,
+ pkg_options os,
+ strings vs,
+ vector<shared_ptr<available_package>>* stubs = nullptr)
+ -> pkg_arg
+ {
+ assert (!vc || !vc->empty ()); // May not be empty if present.
+
+ if (db == nullptr)
+ assert (sc == package_scheme::sys && os.dependency ());
+
+ pkg_arg r {db,
+ sc,
+ move (nm),
+ move (vc),
+ string () /* value */,
+ move (os),
+ move (vs),
+ nullptr /* system_status */};
+
+ // Verify that the package database is specified in the multi-config
+ // mode, unless this is a system dependency package.
+ //
+ if (multi_config () &&
+ !os.config_uuid_specified () &&
+ !(db == nullptr &&
+ sc == package_scheme::sys &&
+ os.dependency ()))
+ fail << "no configuration specified for " << arg_string (r) <<
+ info << "configuration must be explicitly specified for each "
+ << "package in multi-configurations mode" <<
+ info << "use --config-uuid to specify its configuration";
+
+ switch (sc)
+ {
+ case package_scheme::sys:
+ {
+ assert (stubs != nullptr);
+
+ auto sp (add_system_package (db,
+ r.name,
+ move (r.constraint),
+ nullptr /* system_package_status */,
+ stubs));
+
+ r.constraint = move (sp.first);
+ r.system_status = sp.second;
+ break;
+ }
+ case package_scheme::none: break; // Nothing to do.
+ }
+
+ return r;
+ };
+
+ // Create the unparsed package argument.
+ //
+ auto arg_raw = [] (database& db,
+ string v,
+ pkg_options os,
+ strings vs) -> pkg_arg
+ {
+ return pkg_arg {&db,
+ package_scheme::none,
+ package_name (),
+ nullopt /* constraint */,
+ move (v),
+ move (os),
+ move (vs),
+ nullptr /* system_status */};
+ };
+
vector<pkg_arg> pkg_args;
{
// Cache the system stubs to create the imaginary system repository at
@@ -2873,18 +3948,18 @@ namespace bpkg
//
vector<shared_ptr<available_package>> stubs;
- transaction t (db);
+ transaction t (mdb);
// Don't fold the zero revision if building the package from source so
// that we build the exact X+0 package revision if it is specified.
//
- auto fold_zero_rev = [] (package_scheme sc)
+ auto version_flags = [] (package_scheme sc)
{
- bool r (false);
+ version::flags r (version::none);
switch (sc)
{
- case package_scheme::none: r = false; break;
- case package_scheme::sys: r = true; break;
+ case package_scheme::none: r = version::none; break;
+ case package_scheme::sys: r = version::fold_zero_revision; break;
}
return r;
};
@@ -2921,39 +3996,51 @@ namespace bpkg
optional<version_constraint> vc (
parse_package_version_constraint (
- s, sys, fold_zero_rev (sc), version_only (sc)));
+ s, sys, version_flags (sc), version_only (sc)));
- // For system packages not associated with a specific repository
- // location add the stub package to the imaginary system
- // repository (see above for details).
- //
- if (sys && vc)
- stubs.push_back (make_shared<available_package> (n));
+ pkg_options& o (ps.options);
- pkg_args.push_back (arg_package (sc,
+ // Disregard the (main) database for a system dependency with
+ // unspecified configuration.
+ //
+ bool no_db (sys &&
+ o.dependency () &&
+ !o.config_name_specified () &&
+ !o.config_id_specified () &&
+ !o.config_uuid_specified ());
+
+ pkg_args.push_back (arg_package (no_db ? nullptr : &ps.db.get (),
+ sc,
move (n),
move (vc),
- move (ps.options),
- move (ps.config_vars)));
+ move (o),
+ move (ps.config_vars),
+ &stubs));
}
else // Add unparsed.
- pkg_args.push_back (arg_raw (move (ps.packages),
+ pkg_args.push_back (arg_raw (ps.db,
+ move (ps.packages),
move (ps.options),
move (ps.config_vars)));
continue;
}
+ // Use it both as the package database and the source of the
+ // repository information.
+ //
+ database& pdb (ps.db);
+
// Expand the [[<packages>]@]<location> spec. Fail if the repository
// is not found in this configuration, that can be the case in the
// presence of --no-fetch option.
//
shared_ptr<repository> r (
- db.find<repository> (ps.location.canonical_name ()));
+ pdb.find<repository> (ps.location.canonical_name ()));
if (r == nullptr)
- fail << "repository '" << ps.location
- << "' does not exist in this configuration";
+ fail << "repository '" << ps.location << "' does not exist in this "
+ << "configuration";
// If no packages are specified explicitly (the argument starts with
// '@' or is a URL) then we select latest versions of all the packages
@@ -2972,7 +4059,7 @@ namespace bpkg
{
using query = query<repository_fragment_package>;
- for (const auto& rp: db.query<repository_fragment_package> (
+ for (const auto& rp: pdb.query<repository_fragment_package> (
(query::repository_fragment::name ==
rf.fragment.load ()->name) +
order_by_version_desc (query::package::id.version)))
@@ -2987,7 +4074,7 @@ namespace bpkg
if (ps.options.patch ())
{
shared_ptr<selected_package> sp (
- db.find<selected_package> (nm));
+ pdb.find<selected_package> (nm));
// It seems natural in the presence of --patch option to only
// patch the selected packages and not to build new packages if
@@ -3038,7 +4125,8 @@ namespace bpkg
info << "package " << pv.first << " is not present in "
<< "configuration";
else
- pkg_args.push_back (arg_package (package_scheme::none,
+ pkg_args.push_back (arg_package (&pdb,
+ package_scheme::none,
pv.first,
version_constraint (pv.second),
ps.options,
@@ -3065,7 +4153,7 @@ namespace bpkg
optional<version_constraint> vc (
parse_package_version_constraint (
- s, sys, fold_zero_rev (sc), version_only (sc)));
+ s, sys, version_flags (sc), version_only (sc)));
// Check if the package is present in the repository and its
// complements, recursively. If the version is not specified then
@@ -3099,7 +4187,7 @@ namespace bpkg
if (!vc)
{
if (ps.options.patch () &&
- (sp = db.find<selected_package> (n)) != nullptr)
+ (sp = pdb.find<selected_package> (n)) != nullptr)
{
c = patch_constraint (sp);
@@ -3115,7 +4203,7 @@ namespace bpkg
}
shared_ptr<available_package> ap (
- find_available_one (db, n, c, rfs, false /* prereq */).first);
+ find_available_one (pdb, n, c, rfs, false /* prereq */).first);
// Fail if no available package is found or only a stub is
// available and we are building a source package.
@@ -3127,7 +4215,7 @@ namespace bpkg
// If the selected package is loaded then we aim to patch it.
//
if (sp != nullptr)
- dr << "patch version for " << *sp << " is not found in "
+ dr << "patch version for " << *sp << pdb << " is not found in "
<< r->name;
else if (ap == nullptr)
dr << "package " << pkg << " is not found in " << r->name;
@@ -3152,11 +4240,17 @@ namespace bpkg
// Don't move options and variables as they may be reused.
//
- pkg_args.push_back (arg_package (sc,
+ // Note that this cannot be a system dependency with unspecified
+ // configuration since location is specified and so we always pass
+ // the database to the constructor.
+ //
+ pkg_args.push_back (arg_package (&pdb,
+ sc,
move (n),
move (vc),
ps.options,
- ps.config_vars));
+ ps.config_vars,
+ &stubs));
}
}
}
@@ -3166,6 +4260,10 @@ namespace bpkg
imaginary_stubs = move (stubs);
}
+ // List of package configurations specified on the command line.
+ //
+ vector<package_key> pkg_confs;
+
// Separate the packages specified on the command line into to hold and to
// up/down-grade as dependencies, and save dependents whose dependencies
// must be upgraded recursively.
@@ -3174,18 +4272,64 @@ namespace bpkg
dependency_packages dep_pkgs;
recursive_packages rec_pkgs;
+ // Note that the command line adjustments which resolve the unsatisfied
+ // dependent issue (see unsatisfied_dependents for details) may
+ // potentially be sub-optimal, since we do not perform the full
+ // backtracking by trying all the possible adjustments and picking the
+ // most optimal combination. Instead, we keep collecting adjustments until
+ // either the package builds collection succeeds or there are no more
+ // adjustment combinations to try (and we don't try all of them). As a
+ // result we, for example, may end up with some redundant constraints on
+ // the command line just because the respective dependents have been
+ // evaluated first. Generally, dropping all the redundant adjustments can
+ // potentially be quite time-consuming, since we would need to try
+ // dropping all their possible combinations. We, however, will implement
+ // the refinement for only the common case (adjustments are independent),
+ // trying to drop just one adjustment per the refinement cycle iteration
+ // and wait and see how it goes.
+ //
+ cmdline_adjustments cmdline_adjs (hold_pkgs, dep_pkgs);
+
+ // If both are present, then we are in the command line adjustments
+ // refinement cycle, where cmdline_refine_adjustment is the adjustment
+ // being currently dropped and cmdline_refine_index is its index on the
+ // stack (as it appears at the beginning of the cycle).
+ //
+ optional<cmdline_adjustment> cmdline_refine_adjustment;
+ optional<size_t> cmdline_refine_index;
+
{
// Check if the package is a duplicate. Return true if it is but
// harmless.
//
- map<package_name, pkg_arg> package_map;
+ struct sys_package_key // Like package_key but with NULL'able db.
+ {
+ package_name name;
+ database* db; // Can be NULL for system dependency.
- auto check_dup = [&package_map, &arg_string, arg_parsed] (
- const pkg_arg& pa) -> bool
+ sys_package_key (package_name n, database* d)
+ : name (move (n)), db (d) {}
+
+ bool
+ operator< (const sys_package_key& v) const
+ {
+ if (int r = name.compare (v.name))
+ return r < 0;
+
+ return db != nullptr && v.db != nullptr ? *db < *v.db :
+ db == nullptr && v.db == nullptr ? false :
+ db == nullptr;
+ }
+ };
+
+ map<sys_package_key, pkg_arg> package_map;
+
+ auto check_dup = [&package_map, &arg_string, &arg_parsed]
+ (const pkg_arg& pa) -> bool
{
assert (arg_parsed (pa));
- auto r (package_map.emplace (pa.name, pa));
+ auto r (package_map.emplace (sys_package_key {pa.name, pa.db}, pa));
const pkg_arg& a (r.first->second);
assert (arg_parsed (a));
@@ -3200,19 +4344,132 @@ namespace bpkg
if (!r.second &&
(a.scheme != pa.scheme ||
a.name != pa.name ||
+ a.db != pa.db ||
a.constraint != pa.constraint ||
!compare_options (a.options, pa.options) ||
a.config_vars != pa.config_vars))
fail << "duplicate package " << pa.name <<
- info << "first mentioned as " << arg_string (r.first->second) <<
+ info << "first mentioned as " << arg_string (a) <<
info << "second mentioned as " << arg_string (pa);
return !r.second;
};
- transaction t (db);
+ transaction t (mdb);
+
+ // Return the available package that matches the specified orphan best
+ // (see evaluate_dependency() description for details). Also return the
+ // repository fragment the package comes from. Return a pair of NULLs if
+ // no suitable package has been found.
+ //
+ auto find_orphan_match =
+ [] (const shared_ptr<selected_package>& sp,
+ const lazy_shared_ptr<repository_fragment>& root)
+ {
+ using available = pair<shared_ptr<available_package>,
+ lazy_shared_ptr<repository_fragment>>;
+
+ assert (sp != nullptr);
- shared_ptr<repository_fragment> root (db.load<repository_fragment> (""));
+ const package_name& n (sp->name);
+ const version& v (sp->version);
+ optional<version_constraint> vc {version_constraint (v)};
+
+ // Note that non-zero iteration makes a version non-standard, so we
+ // reset it to 0 to produce the patch/minor constraints.
+ //
+ version vr (v.epoch,
+ v.upstream,
+ v.release,
+ v.revision,
+ 0 /* iteration */);
+
+ optional<version_constraint> pc (
+ patch_constraint (n, vr, true /* quiet */));
+
+ optional<version_constraint> mc (
+ minor_constraint (n, vr, true /* quiet */));
+
+ // Note: explicit revision makes query_available() to always consider
+ // revisions (but not iterations) regardless of the revision argument
+ // value.
+ //
+ optional<version_constraint> verc {
+ version_constraint (version (v.epoch,
+ v.upstream,
+ v.release,
+ v.revision ? v.revision : 0,
+ 0 /* iteration */))};
+
+ optional<version_constraint> vlc {
+ version_constraint (version (v.epoch,
+ v.upstream,
+ v.release,
+ nullopt,
+ 0 /* iteration */))};
+
+ // Find the latest available non-stub package, optionally matching a
+ // constraint and considering revision. If a package is found, then
+ // cache it together with the repository fragment it comes from and
+ // return true.
+ //
+ available find_result;
+ const version* find_version (nullptr);
+ auto find = [&n,
+ &root,
+ &find_result,
+ &find_version] (const optional<version_constraint>& c,
+ bool revision = false) -> bool
+ {
+ available r (
+ find_available_one (n, c, root, false /* prereq */, revision));
+
+ const shared_ptr<available_package>& ap (r.first);
+
+ if (ap != nullptr && !ap->stub ())
+ {
+ find_result = move (r);
+ find_version = &find_result.first->version;
+ return true;
+ }
+ else
+ return false;
+ };
+
+ if (// Same version, revision, and iteration.
+ //
+ find (vc, true) ||
+ //
+ // Latest iteration of same version and revision.
+ //
+ find (verc) ||
+ //
+ // Later revision of same version.
+ //
+ (find (vlc) &&
+ find_version->compare (v,
+ false /* revision */,
+ true /* iteration */) > 0) ||
+ //
+ // Later patch of same version.
+ //
+ (pc && find (pc) &&
+ find_version->compare (v, true /* revision */) > 0) ||
+ //
+ // Later minor of same version.
+ //
+ (mc && find (mc) &&
+ find_version->compare (v, true /* revision */) > 0) ||
+ //
+ // Latest available version, including earlier.
+ //
+ find (nullopt))
+ {
+ return find_result;
+ }
+
+ return available ();
+ };
// Here is what happens here: for unparsed package args we are going to
// try and guess whether we are dealing with a package archive, package
@@ -3220,27 +4477,33 @@ namespace bpkg
// then as a directory, and then assume it is name/version. Sometimes,
// however, it is really one of the first two but just broken. In this
// case things are really confusing since we suppress all diagnostics
- // for the first two "guesses". So what we are going to do here is re-run
- // them with full diagnostics if the name/version guess doesn't pan out.
+ // for the first two "guesses". So what we are going to do here is
+ // re-run them with full diagnostics if the name/version guess doesn't
+ // pan out.
//
bool diag (false);
for (auto i (pkg_args.begin ()); i != pkg_args.end (); )
{
- pkg_arg& pa (*i);
+ pkg_arg& pa (*i);
+ database* pdb (pa.db);
// Reduce all the potential variations (archive, directory, package
// name, package name/version) to a single available_package object.
//
- shared_ptr<repository_fragment> af;
+ // Note that the repository fragment is only used for the
+ // build-to-hold packages.
+ //
+ lazy_shared_ptr<repository_fragment> af;
shared_ptr<available_package> ap;
+ bool existing (false); // True if build as an archive or directory.
if (!arg_parsed (pa))
{
- const char* package (pa.value.c_str ());
+ assert (pdb != nullptr); // Unparsed and so can't be system.
- // Is this a package archive?
- //
- bool package_arc (false);
+ lazy_shared_ptr<repository_fragment> root (*pdb, empty_string);
+
+ const char* package (pa.value.c_str ());
try
{
@@ -3255,26 +4518,18 @@ namespace bpkg
pkg_verify (o,
a,
true /* ignore_unknown */,
+ false /* ignore_toolchain */,
false /* expand_values */,
- true /* complete_depends */,
- diag));
+ true /* load_buildfiles */,
+ true /* complete_values */,
+ diag ? 2 : 1));
// This is a package archive.
//
- // Note that throwing failed from here on will be fatal.
- //
- package_arc = true;
-
l4 ([&]{trace << "archive '" << a << "': " << arg_string (pa);});
- // Supporting this would complicate things a bit, but we may add
- // support for it one day.
- //
- if (pa.options.dependency ())
- fail << "package archive '" << a
- << "' may not be built as a dependency";
-
- pa = arg_package (package_scheme::none,
+ pa = arg_package (pdb,
+ package_scheme::none,
m.name,
version_constraint (m.version),
move (pa.options),
@@ -3283,19 +4538,17 @@ namespace bpkg
af = root;
ap = make_shared<available_package> (move (m));
ap->locations.push_back (package_location {root, move (a)});
+
+ existing_packages.push_back (make_pair (ref (*pdb), ap));
+ existing = true;
}
}
catch (const invalid_path&)
{
// Not a valid path so cannot be an archive.
}
- catch (const failed&)
+ catch (const not_package&)
{
- // If this is a valid package archive but something went wrong
- // afterwards, then we are done.
- //
- if (package_arc)
- throw;
}
// Is this a package directory?
@@ -3309,8 +4562,6 @@ namespace bpkg
size_t pn (strlen (package));
if (pn != 0 && path::traits_type::is_separator (package[pn - 1]))
{
- bool package_dir (false);
-
try
{
dir_path d (package);
@@ -3320,47 +4571,52 @@ namespace bpkg
info << "'" << package << "' does not appear to be a valid "
<< "package directory: ";
+ // For better diagnostics, let's obtain the package info after
+ // pkg_verify() verifies that this is a package directory.
+ //
+ package_version_info pvi;
+
package_manifest m (
pkg_verify (
+ o,
d,
true /* ignore_unknown */,
- [&o, &d] (version& v)
+ false /* ignore_toolchain */,
+ true /* load_buildfiles */,
+ [&o, &d, &pvi] (version& v)
{
- if (optional<version> pv = package_version (o, d))
- v = move (*pv);
+ // Note that we also query subprojects since the package
+ // information will be used for the subsequent
+ // package_iteration() call.
+ //
+ pvi = package_version (o, d, b_info_flags::subprojects);
+
+ if (pvi.version)
+ v = move (*pvi.version);
},
- diag));
+ diag ? 2 : 1));
// This is a package directory.
//
- // Note that throwing failed from here on will be fatal.
- //
- package_dir = true;
-
l4 ([&]{trace << "directory '" << d << "': "
<< arg_string (pa);});
- // Supporting this would complicate things a bit, but we may
- // add support for it one day.
- //
- if (pa.options.dependency ())
- fail << "package directory '" << d
- << "' may not be built as a dependency";
-
// Fix-up the package version to properly decide if we need to
// upgrade/downgrade the package.
//
if (optional<version> v =
package_iteration (o,
- c,
+ *pdb,
t,
d,
m.name,
m.version,
+ &pvi.info,
true /* check_external */))
m.version = move (*v);
- pa = arg_package (package_scheme::none,
+ pa = arg_package (pdb,
+ package_scheme::none,
m.name,
version_constraint (m.version),
move (pa.options),
@@ -3369,19 +4625,17 @@ namespace bpkg
ap = make_shared<available_package> (move (m));
af = root;
ap->locations.push_back (package_location {root, move (d)});
+
+ existing_packages.push_back (make_pair (ref (*pdb), ap));
+ existing = true;
}
}
catch (const invalid_path&)
{
// Not a valid path so cannot be a package directory.
}
- catch (const failed&)
+ catch (const not_package&)
{
- // If this is a valid package directory but something went wrong
- // afterwards, then we are done.
- //
- if (package_dir)
- throw;
}
}
}
@@ -3395,6 +4649,7 @@ namespace bpkg
//
shared_ptr<selected_package> sp;
bool patch (false);
+ bool deorphan (false);
if (ap == nullptr)
{
@@ -3416,9 +4671,10 @@ namespace bpkg
parse_package_version_constraint (
package,
false /* allow_wildcard */,
- false /* fold_zero_revision */));
+ version::none));
- pa = arg_package (package_scheme::none,
+ pa = arg_package (pdb,
+ package_scheme::none,
move (n),
move (vc),
move (pa.options),
@@ -3429,18 +4685,28 @@ namespace bpkg
if (!pa.options.dependency ())
{
- // Either get the user-specified version or the latest allowed
- // for a source code package. For a system package we pick the
- // latest one just to make sure the package is recognized.
+ assert (pdb != nullptr);
+
+ lazy_shared_ptr<repository_fragment> root (*pdb, empty_string);
+
+ // Get the user-specified version, the latest allowed version,
+ // or the orphan best match for a source code package. For a
+ // system package we will try to find the available package that
+ // matches the user-specified system version (preferable for the
+ // configuration negotiation machinery) and, if fail, fallback
+ // to picking the latest one just to make sure the package is
+ // recognized.
//
optional<version_constraint> c;
+ bool sys (arg_sys (pa));
+
if (!pa.constraint)
{
- assert (!arg_sys (pa));
+ assert (!sys);
if (pa.options.patch () &&
- (sp = db.find<selected_package> (pa.name)) != nullptr)
+ (sp = pdb->find<selected_package> (pa.name)) != nullptr)
{
c = patch_constraint (sp);
@@ -3456,16 +4722,59 @@ namespace bpkg
patch = true;
}
}
- else if (!arg_sys (pa))
+ else if (!sys || !wildcard (*pa.constraint))
c = pa.constraint;
- auto rp (find_available_one (db, pa.name, c, root));
+ if (pa.options.deorphan ())
+ {
+ if (!sys)
+ {
+ if (sp == nullptr)
+ sp = pdb->find<selected_package> (pa.name);
+
+ if (sp != nullptr && orphan_package (*pdb, sp))
+ deorphan = true;
+ }
+
+ // If the package is not an orphan, its version is not
+ // constrained and upgrade/patch is not requested, then just
+ // skip the package.
+ //
+ if (!deorphan &&
+ !pa.constraint &&
+ !pa.options.upgrade () &&
+ !pa.options.patch ())
+ {
+ ++i;
+ continue;
+ }
+ }
+
+ pair<shared_ptr<available_package>,
+ lazy_shared_ptr<repository_fragment>> rp (
+ deorphan &&
+ !pa.constraint &&
+ !pa.options.upgrade () &&
+ !pa.options.patch ()
+ ? find_orphan_match (sp, root)
+ : find_available_one (pa.name, c, root));
+
+ if (rp.first == nullptr && sys)
+ {
+ available_packages aps (
+ find_available_all (repo_configs, pa.name));
+
+ if (!aps.empty ())
+ rp = move (aps.front ());
+ }
+
ap = move (rp.first);
af = move (rp.second);
}
}
- catch (const failed&)
+ catch (const failed& e)
{
+ assert (e.code == 1);
diag = true;
continue;
}
@@ -3477,27 +4786,66 @@ namespace bpkg
continue;
// Save (both packages to hold and dependencies) as dependents for
- // recursive upgrade.
+ // recursive upgrade/deorphaning.
//
{
- optional<bool> u;
- optional<bool> r;
+ // Recursive/immediate upgrade/patch.
+ //
+ optional<bool> r; // true -- recursive, false -- immediate.
+ optional<bool> u; // true -- upgrade, false -- patch.
+
+ // Recursive/immediate deorphaning.
+ //
+ optional<bool> d; // true -- recursive, false -- immediate.
const auto& po (pa.options);
- if (po.upgrade_immediate ()) { u = true; r = false; }
- else if (po.upgrade_recursive ()) { u = true; r = true; }
- else if ( po.patch_immediate ()) { u = false; r = false; }
- else if ( po.patch_recursive ()) { u = false; r = true; }
- else if ( po.immediate ()) { u = po.upgrade (); r = false; }
- else if ( po.recursive ()) { u = po.upgrade (); r = true; }
+ // Note that, for example, --upgrade-immediate wins over the
+ // --upgrade --recursive options pair.
+ //
+ if (po.immediate ())
+ {
+ if (po.upgrade () || po.patch ())
+ {
+ r = false;
+ u = po.upgrade ();
+ }
- if (r)
+ if (po.deorphan ())
+ d = false;
+ }
+ else if (po.recursive ())
{
- l4 ([&]{trace << "stashing recursive package "
- << arg_string (pa);});
+ if (po.upgrade () || po.patch ())
+ {
+ r = true;
+ u = po.upgrade ();
+ }
- rec_pkgs.push_back (recursive_package {pa.name, *u, *r});
+ if (po.deorphan ())
+ d = true;
+ }
+
+ if (po.upgrade_immediate ()) { u = true; r = false; }
+ else if (po.upgrade_recursive ()) { u = true; r = true; }
+ else if ( po.patch_immediate ()) { u = false; r = false; }
+ else if ( po.patch_recursive ()) { u = false; r = true; }
+
+ if (po.deorphan_immediate ()) { d = false; }
+ else if (po.deorphan_recursive ()) { d = true; }
+
+ if (r || d)
+ {
+ l4 ([&]{trace << "stash recursive package " << arg_string (pa);});
+
+ // The above options are meaningless for system packages, so we
+ // just ignore them for a system dependency with unspecified
+ // configuration.
+ //
+ if (pdb != nullptr)
+ rec_pkgs.push_back (recursive_package {*pdb, pa.name,
+ r, u && *u,
+ d});
}
}
@@ -3505,46 +4853,83 @@ namespace bpkg
//
if (pa.options.dependency ())
{
- l4 ([&]{trace << "stashing dependency package "
- << arg_string (pa);});
+ l4 ([&]{trace << "stash dependency package " << arg_string (pa);});
bool sys (arg_sys (pa));
- // Make sure that the package is known.
- //
- auto apr (!pa.constraint || sys
- ? find_available (db, pa.name, nullopt)
- : find_available (db, pa.name, *pa.constraint));
+ if (pdb != nullptr)
+ sp = pdb->find<selected_package> (pa.name);
- if (apr.empty ())
+ // Make sure that the package is known. Only allow to unhold an
+ // unknown orphaned selected package (with the view that there is
+ // a good chance it will get dropped; and if not, such an unhold
+ // should be harmless).
+ //
+ if (!existing &&
+ find_available (repo_configs,
+ pa.name,
+ !sys ? pa.constraint : nullopt).empty ())
{
- diag_record dr (fail);
+ // Don't fail if the selected package is held and satisfies the
+ // constraints, if specified. Note that we may still fail later
+ // with the "not available from its dependents' repositories"
+ // error if the dependency is requested to be deorphaned and all
+ // its dependents are orphaned.
+ //
+ if (!(sp != nullptr &&
+ sp->hold_package &&
+ (!pa.constraint || satisfies (sp->version, pa.constraint))))
+ {
+ string n (arg_string (pa, false /* options */));
- dr << "unknown package " << arg_string (pa, false /* options */);
- check_any_available (c, t, &dr);
+ diag_record dr (fail);
+ dr << "unknown package " << n;
+ if (sys)
+ {
+ // Feels like we can't end up here if the version was specified
+ // explicitly.
+ //
+ dr << info << "consider specifying " << n << "/*";
+ }
+ else
+ check_any_available (repo_configs, t, &dr);
+ }
}
- // Save before the name move.
- //
- sp = db.find<selected_package> (pa.name);
+ if (pdb != nullptr)
+ pkg_confs.emplace_back (*pdb, pa.name);
+
+ bool hold_version (pa.constraint.has_value ());
dep_pkgs.push_back (
- dependency_package {move (pa.name),
+ dependency_package {pdb,
+ move (pa.name),
move (pa.constraint),
+ hold_version,
move (sp),
sys,
- pa.options.patch (),
+ existing,
+ (pa.options.upgrade () || pa.options.patch ()
+ ? pa.options.upgrade ()
+ : optional<bool> ()),
+ pa.options.deorphan (),
pa.options.keep_out (),
+ pa.options.disfigure (),
(pa.options.checkout_root_specified ()
? move (pa.options.checkout_root ())
: optional<dir_path> ()),
pa.options.checkout_purge (),
- move (pa.config_vars)});
+ move (pa.config_vars),
+ pa.system_status});
continue;
}
// Add the held package to the list.
//
+ assert (pdb != nullptr);
+
+ lazy_shared_ptr<repository_fragment> root (*pdb, empty_string);
+
// Load the package that may have already been selected (if not done
// yet) and figure out what exactly we need to do here. The end goal
// is the available_package object corresponding to the actual
@@ -3552,15 +4937,17 @@ namespace bpkg
// the same as the selected package).
//
if (sp == nullptr)
- sp = db.find<selected_package> (pa.name);
+ sp = pdb->find<selected_package> (pa.name);
if (sp != nullptr && sp->state == package_state::broken)
- fail << "unable to build broken package " << pa.name <<
+ fail << "unable to build broken package " << pa.name << *pdb <<
info << "use 'pkg-purge --force' to remove";
bool found (true);
bool sys_advise (false);
+ bool sys (arg_sys (pa));
+
// If the package is not available from the repository we can try to
// create it from the orphaned selected package. Meanwhile that
// doesn't make sense for a system package. The only purpose to
@@ -3568,7 +4955,7 @@ namespace bpkg
// package is not in the repository then there is no dependent for it
// (otherwise the repository would be broken).
//
- if (!arg_sys (pa))
+ if (!sys)
{
// If we failed to find the requested package we can still check if
// the package name is present in the repositories and if that's the
@@ -3579,10 +4966,7 @@ namespace bpkg
if (ap == nullptr)
{
if (pa.constraint &&
- find_available_one (db,
- pa.name,
- nullopt,
- root).first != nullptr)
+ find_available_one (pa.name, nullopt, root).first != nullptr)
sys_advise = true;
}
else if (ap->stub ())
@@ -3596,17 +4980,18 @@ namespace bpkg
//
if (pa.constraint)
{
- for (;;)
+ for (;;) // Breakout loop.
{
if (ap != nullptr) // Must be that version, see above.
break;
// Otherwise, our only chance is that the already selected object
- // satisfies the version constraint.
+ // satisfies the version constraint, unless we are deorphaning.
//
- if (sp != nullptr &&
- !sp->system () &&
- satisfies (sp->version, pa.constraint))
+ if (sp != nullptr &&
+ !sp->system () &&
+ satisfies (sp->version, pa.constraint) &&
+ !deorphan)
break; // Derive ap from sp below.
found = false;
@@ -3614,13 +4999,10 @@ namespace bpkg
}
}
//
- // No explicit version was specified by the user (not relevant for a
- // system package, see above).
+ // No explicit version was specified by the user.
//
else
{
- assert (!arg_sys (pa));
-
if (ap != nullptr)
{
assert (!ap->stub ());
@@ -3629,14 +5011,17 @@ namespace bpkg
// we have a newer version, we treat it as an upgrade request;
// otherwise, why specify the package in the first place? We just
// need to check if what we already have is "better" (i.e.,
- // newer).
+ // newer), unless we are deorphaning.
//
- if (sp != nullptr && !sp->system () && ap->version < sp->version)
+ if (sp != nullptr &&
+ !sp->system () &&
+ ap->version < sp->version &&
+ !deorphan)
ap = nullptr; // Derive ap from sp below.
}
else
{
- if (sp == nullptr || sp->system ())
+ if (sp == nullptr || sp->system () || deorphan)
found = false;
// Otherwise, derive ap from sp below.
@@ -3657,15 +5042,30 @@ namespace bpkg
if (!sys_advise)
{
- dr << "unknown package " << pa.name;
+ // Note that if the package is not system and its version was
+ // explicitly specified, then we can only be here if no version of
+ // this package is available in source from the repository
+ // (otherwise we would advise to configure it as a system package;
+ // see above). Thus, let's not print it's version constraint in
+ // this case.
+ //
+ // Also note that for a system package we can't end up here if the
+ // version was specified explicitly.
+ //
+ string n (package_string (pa.name, nullopt /* vc */, sys));
+
+ dr << "unknown package " << n;
// Let's help the new user out here a bit.
//
- check_any_available (c, t, &dr);
+ if (sys)
+ dr << info << "consider specifying " << n << "/*";
+ else
+ check_any_available (*pdb, t, &dr);
}
else
{
- assert (!arg_sys (pa));
+ assert (!sys);
dr << arg_string (pa, false /* options */)
<< " is not available in source";
@@ -3682,11 +5082,11 @@ namespace bpkg
//
if (ap == nullptr)
{
- assert (sp != nullptr && sp->system () == arg_sys (pa));
+ assert (sp != nullptr && sp->system () == sys);
- auto rp (make_available (o, c, db, sp));
- ap = rp.first;
- af = rp.second; // Could be NULL (orphan).
+ auto rp (make_available_fragment (o, *pdb, sp));
+ ap = move (rp.first);
+ af = move (rp.second); // Could be NULL (orphan).
}
// We will keep the output directory only if the external package is
@@ -3698,28 +5098,50 @@ namespace bpkg
bool keep_out (pa.options.keep_out () &&
sp != nullptr && sp->external ());
+ bool replace ((existing && sp != nullptr) || deorphan);
+
// Finally add this package to the list.
//
+ optional<bool> upgrade (sp != nullptr &&
+ !pa.constraint &&
+ (pa.options.upgrade () || pa.options.patch ())
+ ? pa.options.upgrade ()
+ : optional<bool> ());
+
+ // @@ Pass pa.configure_only() when support for package-specific
+ // --configure-only is added.
+ //
build_package p {
build_package::build,
+ *pdb,
move (sp),
move (ap),
move (af),
+ nullopt, // Dependencies.
+ nullopt, // Dependencies alternatives.
+ nullopt, // Package skeleton.
+ nullopt, // Postponed dependency alternatives.
+ false, // Recursive collection.
true, // Hold package.
pa.constraint.has_value (), // Hold version.
{}, // Constraints.
- arg_sys (pa),
+ sys,
keep_out,
+ pa.options.disfigure (),
+ false, // Configure-only.
(pa.options.checkout_root_specified ()
? move (pa.options.checkout_root ())
: optional<dir_path> ()),
pa.options.checkout_purge (),
move (pa.config_vars),
- {package_name ()}, // Required by (command line).
- 0}; // Adjustments.
+ upgrade,
+ deorphan,
+ {cmd_line}, // Required by (command line).
+ false, // Required by dependents.
+ replace ? build_package::build_replace : uint16_t (0)};
- l4 ([&]{trace << "stashing held package "
- << p.available_name_version ();});
+ l4 ([&]{trace << "stash held package "
+ << p.available_name_version_db ();});
// "Fix" the version the user asked for by adding the constraint.
//
@@ -3727,7 +5149,10 @@ namespace bpkg
// this build_package instance is never replaced).
//
if (pa.constraint)
- p.constraints.emplace_back ("command line", move (*pa.constraint));
+ p.constraints.emplace_back (
+ move (*pa.constraint), cmd_line.db, cmd_line.name.string ());
+
+ pkg_confs.emplace_back (p.db, p.name ());
hold_pkgs.push_back (move (p));
}
@@ -3735,88 +5160,143 @@ namespace bpkg
// If this is just pkg-build -u|-p, then we are upgrading all held
// packages.
//
+ // Should we also upgrade the held packages in the explicitly linked
+ // configurations, recursively? Maybe later and we probably will need a
+ // command line option to enable this behavior.
+ //
if (hold_pkgs.empty () && dep_pkgs.empty () &&
- (o.upgrade () || o.patch ()))
+ (o.upgrade () || o.patch () || o.deorphan ()))
{
- using query = query<selected_package>;
-
- for (shared_ptr<selected_package> sp:
- pointer_result (
- db.query<selected_package> (query::state == "configured" &&
- query::hold_package)))
+ for (database& cdb: current_configs)
{
- // Let's skip upgrading system packages as they are, probably,
- // configured as such for a reason.
- //
- if (sp->system ())
- continue;
+ lazy_shared_ptr<repository_fragment> root (cdb, empty_string);
- const package_name& name (sp->name);
+ using query = query<selected_package>;
- optional<version_constraint> pc;
-
- if (o.patch ())
+ for (shared_ptr<selected_package> sp:
+ pointer_result (
+ cdb.query<selected_package> (
+ query::state == "configured" && query::hold_package)))
{
- pc = patch_constraint (sp);
-
- // Skip the non-patchable selected package. Note that the warning
- // have already been issued in this case.
+ // Let's skip upgrading system packages as they are, probably,
+ // configured as such for a reason.
//
- if (!pc)
+ if (sp->system ())
continue;
- }
- auto apr (find_available_one (db, name, pc, root));
+ const package_name& name (sp->name);
- shared_ptr<available_package> ap (move (apr.first));
- if (ap == nullptr || ap->stub ())
- {
- diag_record dr (fail);
- dr << name << " is not available";
+ optional<version_constraint> pc;
- if (ap != nullptr)
- dr << " in source" <<
- info << "consider building it as "
- << package_string (name, version (), true /* system */)
- << " if it is available from the system";
+ if (o.patch ())
+ {
+ pc = patch_constraint (sp);
- // Let's help the new user out here a bit.
+ // Skip the non-patchable selected package. Note that the
+ // warning have already been issued in this case.
+ //
+ if (!pc)
+ continue;
+ }
+
+ bool deorphan (false);
+
+ if (o.deorphan ())
+ {
+ // If the package is not an orphan and upgrade/patch is not
+ // requested, then just skip the package.
+ //
+ if (orphan_package (cdb, sp))
+ deorphan = true;
+ else if (!o.upgrade () && !o.patch ())
+ continue;
+ }
+
+ // In the deorphan mode with no upgrade/patch requested pick the
+ // version that matches the orphan best. Otherwise, pick the patch
+ // or the latest available version, as requested.
//
- check_any_available (c, t, &dr);
- }
+ auto apr (deorphan && !o.upgrade () && !o.patch ()
+ ? find_orphan_match (sp, root)
+ : find_available_one (name, pc, root));
- // We will keep the output directory only if the external package is
- // replaced with an external one (see above for details).
- //
- bool keep_out (o.keep_out () && sp->external ());
+ shared_ptr<available_package> ap (move (apr.first));
+ if (ap == nullptr || ap->stub ())
+ {
+ diag_record dr (fail);
+ dr << name << " is not available";
- build_package p {
- build_package::build,
+ if (ap != nullptr) // Stub?
+ {
+ dr << " in source" <<
+ info << "consider building it as "
+ << package_string (name, version (), true /* system */)
+ << " if it is available from the system";
+ }
+
+ // Let's help the new user out here a bit.
+ //
+ check_any_available (cdb, t, &dr);
+ }
+
+ // We will keep the output directory only if the external package
+ // is replaced with an external one (see above for details).
+ //
+ bool keep_out (o.keep_out () && sp->external ());
+
+ // @@ Pass pa.configure_only() when support for package-specific
+ // --configure-only is added.
+ //
+ build_package p {
+ build_package::build,
+ cdb,
move (sp),
move (ap),
move (apr.second),
- true, // Hold package.
- false, // Hold version.
- {}, // Constraints.
- false, // System package.
+ nullopt, // Dependencies.
+ nullopt, // Dependencies alternatives.
+ nullopt, // Package skeleton.
+ nullopt, // Postponed dependency alternatives.
+ false, // Recursive collection.
+ true, // Hold package.
+ false, // Hold version.
+ {}, // Constraints.
+ false, // System package.
keep_out,
- nullopt, // Checkout root.
- false, // Checkout purge.
- strings (), // Configuration variables.
- {package_name ()}, // Required by (command line).
- 0}; // Adjustments.
-
- l4 ([&]{trace << "stashing held package "
- << p.available_name_version ();});
-
- hold_pkgs.push_back (move (p));
-
- // If there are also -i|-r, then we are also upgrading dependencies
- // of all held packages.
- //
- if (o.immediate () || o.recursive ())
- rec_pkgs.push_back (
- recursive_package {name, o.upgrade (), o.recursive ()});
+ o.disfigure (),
+ false, // Configure-only.
+ nullopt, // Checkout root.
+ false, // Checkout purge.
+ strings (), // Configuration variables.
+ (o.upgrade () || o.patch ()
+ ? o.upgrade ()
+ : optional<bool> ()),
+ deorphan,
+ {cmd_line}, // Required by (command line).
+ false, // Required by dependents.
+ deorphan ? build_package::build_replace : uint16_t (0)};
+
+ l4 ([&]{trace << "stash held package "
+ << p.available_name_version_db ();});
+
+ hold_pkgs.push_back (move (p));
+
+ // If there are also -i|-r, then we are also upgrading and/or
+ // deorphaning dependencies of all held packages.
+ //
+ if (o.immediate () || o.recursive ())
+ {
+ rec_pkgs.push_back (recursive_package {
+ cdb, name,
+ (o.upgrade () || o.patch ()
+ ? o.recursive ()
+ : optional<bool> ()),
+ o.upgrade (),
+ (o.deorphan ()
+ ? o.recursive ()
+ : optional<bool> ())});
+ }
+ }
}
}
@@ -3827,14 +5307,59 @@ namespace bpkg
{
assert (rec_pkgs.empty ());
+ if (o.noop_exit_specified ())
+ return o.noop_exit ();
+
info << "nothing to build";
return 0;
}
+ // Search for the package prerequisite among packages specified on the
+ // command line and, if found, return its desired database. Return NULL
+ // otherwise. The `db` argument specifies the dependent database.
+ //
+ // Note that the semantics of a package specified on the command line is:
+ // build the package in the specified configuration (current by default)
+ // and repoint all dependents in the current configuration of this
+ // prerequisite to this new prerequisite. Thus, the function always
+ // returns NULL for dependents not in the current configuration.
+ //
+ // Also note that we rely on "small function object" optimization here.
+ //
+ const function<find_database_function> find_prereq_database (
+ [&pkg_confs] (database& db,
+ const package_name& nm,
+ bool buildtime) -> database*
+ {
+ database* r (nullptr);
+
+ linked_databases ddbs (db.dependency_configs (nm, buildtime));
+
+ for (const package_key& p: pkg_confs)
+ {
+ if (p.name == nm &&
+ find (ddbs.begin (), ddbs.end (), p.db) != ddbs.end ())
+ {
+ if (r == nullptr)
+ r = &p.db.get ();
+ else
+ fail << "multiple " << p.db.get ().type << " configurations "
+ << "specified for package " << nm <<
+ info << r->config_orig <<
+ info << p.db.get ().config_orig;
+ }
+ }
+
+ return r;
+ });
+
// Assemble the list of packages we will need to build-to-hold, still used
// dependencies to up/down-grade, and unused dependencies to drop. We call
// this the plan.
//
+ // Note: for the sake of brevity we also assume the package replacement
+ // wherever we mention the package up/down-grade in this description.
+ //
// The way we do it is tricky: we first create the plan based on build-to-
// holds (i.e., the user selected). Next, to decide whether we need to
// up/down-grade or drop any dependecies we need to take into account an
@@ -3873,147 +5398,695 @@ namespace bpkg
// grade order where any subsequent entry does not affect the decision of
// the previous ones.
//
+ // Note that we also need to rebuild the plan from scratch on adding a new
+ // up/down-grade/drop if any dependency configuration negotiation has been
+ // performed, since any package replacement may affect the already
+ // negotiated configurations.
+ //
// Package managers are an easy, already solved problem, right?
//
build_packages pkgs;
{
struct dep
{
- package_name name; // Empty if up/down-grade.
+ reference_wrapper<database> db;
+ package_name name; // Empty if up/down-grade.
// Both are NULL if drop.
//
- shared_ptr<available_package> available;
- shared_ptr<bpkg::repository_fragment> repository_fragment;
+ shared_ptr<available_package> available;
+ lazy_shared_ptr<bpkg::repository_fragment> repository_fragment;
- bool system;
+ bool system;
+ bool existing; // Build as an existing archive or directory.
+ optional<bool> upgrade;
+ bool deorphan;
};
vector<dep> deps;
+ existing_dependencies existing_deps;
+ deorphaned_dependencies deorphaned_deps;
+
+ replaced_versions replaced_vers;
+ postponed_dependencies postponed_deps;
+ unacceptable_alternatives unacceptable_alts;
+
+ // Map the repointed dependents to the replacement flags (see
+ // repointed_dependents for details), unless --no-move is specified.
+ //
+ // Note that the overall plan is to add the replacement prerequisites to
+ // the repointed dependents prerequisites sets at the beginning of the
+ // refinement loop iteration and remove them right before the plan
+ // execution simulation. This will allow the collecting/ordering
+ // functions to see both kinds of prerequisites (being replaced and
+ // their replacements) and only consider one kind or another or both, as
+ // appropriate.
+ //
+ repointed_dependents rpt_depts;
+
+ if (!o.no_move ())
+ {
+ transaction t (mdb);
+
+ using query = query<selected_package>;
+
+ query q (query::state == "configured");
+
+ for (database& cdb: current_configs)
+ {
+ for (shared_ptr<selected_package> sp:
+ pointer_result (cdb.query<selected_package> (q)))
+ {
+ map<package_key, bool> ps; // Old/new prerequisites.
+
+ for (const auto& p: sp->prerequisites)
+ {
+ database& db (p.first.database ());
+ const package_name& name (p.first.object_id ());
+
+ // Note that if a prerequisite is in a configuration of the host
+ // type, it is not necessarily a build-time dependency (think of
+ // a dependent from a self-hosted configuration and its runtime
+ // dependency). However, here it doesn't really matter.
+ //
+ database* pdb (
+ find_prereq_database (cdb,
+ name,
+ (db.type == host_config_type ||
+ db.type == build2_config_type)));
+
+ if (pdb != nullptr && *pdb != db && pdb->type == db.type)
+ {
+ ps.emplace (package_key {*pdb, name}, true);
+ ps.emplace (package_key { db, name}, false);
+ }
+ }
+
+ if (!ps.empty ())
+ rpt_depts.emplace (package_key {cdb, sp->name}, move (ps));
+ }
+ }
+
+ t.commit ();
+ }
// Iteratively refine the plan with dependency up/down-grades/drops.
//
- for (bool refine (true), scratch (true); refine; )
+ // Note that we should not clean the deps list on scratch_col (scratch
+ // during the package collection) because we want to enter them before
+ // collect_build_postponed() and they could be the dependents that have
+ // the config clauses. In a sense, change to replaced_vers,
+ // postponed_deps, or unacceptable_alts maps should not affect the deps
+ // list. But not the other way around: a dependency erased from the deps
+ // list could have caused an entry in the replaced_vers, postponed_deps,
+ // and/or unacceptable_alts maps. And so we clean replaced_vers,
+ // postponed_deps, and unacceptable_alts on scratch_exe (scratch during
+ // the plan execution).
+ //
+ for (bool refine (true), scratch_exe (true), scratch_col (false);
+ refine; )
{
- l4 ([&]{trace << "refining execution plan"
+ bool scratch (scratch_exe || scratch_col);
+
+ l4 ([&]{trace << "refine package collection/plan execution"
<< (scratch ? " from scratch" : "");});
- transaction t (db);
+ transaction t (mdb);
- build_packages::postponed_packages postponed;
+ // Collect all configurations where dependency packages can
+ // potentially be built or amended during this run.
+ //
+ linked_databases dep_dbs;
- if (scratch)
+ for (database& cdb: current_configs)
{
- pkgs.clear ();
- postponed.clear ();
-
- // Pre-enter dependencies to keep track of the desired versions and
- // options specified on the command line. In particular, if the
- // version is specified and the dependency is used as part of the
- // plan, then the desired version must be used. We also need it to
- // distinguish user-driven dependency up/down-grades from the
- // dependent-driven ones, not to warn/refuse.
- //
- // Also, if a dependency package already has selected package that
- // is held, then we need to unhold it.
- //
- for (const dependency_package& p: dep_pkgs)
+ for (database& db: cdb.dependency_configs ())
{
- build_package bp {
- nullopt, // Action.
- nullptr, // Selected package.
- nullptr, // Available package/repository frag.
- nullptr,
- false, // Hold package.
- p.constraint.has_value (), // Hold version.
- {}, // Constraints.
- p.system,
- p.keep_out,
- p.checkout_root,
- p.checkout_purge,
- p.config_vars,
- {package_name ()}, // Required by (command line).
- 0}; // Adjustments.
-
- if (p.constraint)
- bp.constraints.emplace_back ("command line", *p.constraint);
-
- pkgs.enter (p.name, move (bp));
+ if (find (dep_dbs.begin (), dep_dbs.end (), db) == dep_dbs.end ())
+ dep_dbs.push_back (db);
}
+ }
- // Pre-collect user selection to make sure dependency-forced
- // up/down-grades are handled properly (i.e., the order in which we
- // specify packages on the command line does not matter).
- //
- for (const build_package& p: hold_pkgs)
- pkgs.collect_build (o, c, db, p);
+ // Temporarily add the replacement prerequisites to the repointed
+ // dependent prerequisites sets and persist the changes.
+ //
+ for (auto& rd: rpt_depts)
+ {
+ database& db (rd.first.db);
+ const package_name& nm (rd.first.name);
- // Collect all the prerequisites of the user selection.
- //
- for (const build_package& p: hold_pkgs)
- pkgs.collect_build_prerequisites (o, c, db, p.name (), postponed);
+ shared_ptr<selected_package> sp (db.load<selected_package> (nm));
+ package_prerequisites& prereqs (sp->prerequisites);
- // Note that we need to collect unheld after prerequisites, not to
- // overwrite the pre-entered entries before they are used to provide
- // additional constraints for the collected prerequisites.
- //
- for (const dependency_package& p: dep_pkgs)
+ for (const auto& prq: rd.second)
{
- if (p.selected != nullptr && p.selected->hold_package)
- pkgs.collect_unhold (p.selected);
+ if (prq.second) // Prerequisite replacement?
+ {
+ const package_key& p (prq.first);
+
+ // Find the being replaced prerequisite to copy it's information
+ // into the replacement.
+ //
+ auto i (find_if (prereqs.begin (), prereqs.end (),
+ [&p] (const auto& pr)
+ {
+ return pr.first.object_id () == p.name;
+ }));
+
+ assert (i != prereqs.end ());
+
+ auto j (prereqs.emplace (
+ lazy_shared_ptr<selected_package> (p.db.get (),
+ p.name),
+ i->second));
+
+ // The selected package should only contain the old
+ // prerequisites at this time, so adding a replacement should
+ // always succeed.
+ //
+ assert (j.second);
+ }
}
- scratch = false;
+ db.update (sp);
}
- else
- pkgs.clear_order (); // Only clear the ordered list.
- // Add to the plan dependencies to up/down-grade/drop that were
- // discovered on the previous iterations.
+ // Erase the replacements from the repointed dependents prerequisite
+ // sets and persist the changes.
//
- for (const dep& d: deps)
+ auto restore_repointed_dependents = [&rpt_depts] ()
{
- if (d.available == nullptr)
- pkgs.collect_drop (db.load<selected_package> (d.name));
- else
+ for (auto& rd: rpt_depts)
{
- shared_ptr<selected_package> sp (
- db.find<selected_package> (d.name));
+ database& db (rd.first.db);
+ const package_name& nm (rd.first.name);
- // We will keep the output directory only if the external package
- // is replaced with an external one (see above for details).
+ shared_ptr<selected_package> sp (db.load<selected_package> (nm));
+
+ for (const auto& prq: rd.second)
+ {
+ if (prq.second) // Prerequisite replacement?
+ {
+ const package_key& p (prq.first);
+
+ size_t n (
+ sp->prerequisites.erase (
+ lazy_shared_ptr<selected_package> (p.db.get (), p.name)));
+
+ // The selected package should always contain the prerequisite
+ // replacement at this time, so its removal should always
+ // succeed.
+ //
+ assert (n == 1);
+ }
+ }
+
+ db.update (sp);
+ }
+ };
+
+ // Pre-enter dependency to keep track of the desired versions and
+ // options specified on the command line. In particular, if the
+ // version is specified and the dependency is used as part of the
+ // plan, then the desired version must be used. We also need it to
+ // distinguish user-driven dependency up/down-grades from the
+ // dependent-driven ones, not to warn/refuse.
+ //
+ // Also, if a dependency package already has selected package that
+ // is held, then we need to unhold it.
+ //
+ auto enter = [&pkgs, &cmd_line] (database& db,
+ const dependency_package& p)
+ {
+ // Note that we don't set the upgrade and deorphan flags based on
+ // the --upgrade, --patch, and --deorphan options since an option
+ // presense doesn't necessarily means that the respective flag needs
+ // to be set (the package may not be selected, may not be patchable
+ // and/or an orphan, etc). The proper flags will be provided by
+ // evaluate_dependency() if/when any upgrade/deorphan recommendation
+ // is given.
+ //
+ build_package bp {
+ nullopt, // Action.
+ db,
+ nullptr, // Selected package.
+ nullptr, // Available package/repo fragment.
+ nullptr,
+ nullopt, // Dependencies.
+ nullopt, // Dependencies alternatives.
+ nullopt, // Package skeleton.
+ nullopt, // Postponed dependency alternatives.
+ false, // Recursive collection.
+ false, // Hold package.
+ p.hold_version,
+ {}, // Constraints.
+ p.system,
+ p.keep_out,
+ p.disfigure,
+ false, // Configure-only.
+ p.checkout_root,
+ p.checkout_purge,
+ p.config_vars,
+ nullopt, // Upgrade.
+ false, // Deorphan.
+ {cmd_line}, // Required by (command line).
+ false, // Required by dependents.
+ 0}; // State flags.
+
+ if (p.constraint)
+ bp.constraints.emplace_back (*p.constraint,
+ cmd_line.db,
+ cmd_line.name.string ());
+
+ pkgs.enter (p.name, move (bp));
+ };
+
+ // Add the system dependency to the database's system repository and
+ // pre-enter it to the build package map.
+ //
+ auto enter_system_dependency = [&add_system_package, &enter]
+ (database& db, const dependency_package& p)
+ {
+ // The system package may only have an exact/wildcard version
+ // specified.
+ //
+ add_system_package (&db,
+ p.name,
+ p.constraint,
+ p.system_status,
+ nullptr /* stubs */);
+ enter (db, p);
+ };
+
+ // Private configurations that were created during collection of the
+ // package builds. The list contains the private configuration paths,
+ // relative to the containing configuration directories (.bpkg/host/,
+ // etc), together with the containing configuration databases.
+ //
+ // Note that the private configurations are linked to their parent
+ // configurations right after being created, so that the subsequent
+ // collecting, ordering, and plan execution simulation logic can use
+ // them. However, we can not easily commit these changes at some
+ // point, since there could also be some other changes made to the
+ // database which needs to be rolled back at the end of the refinement
+ // iteration.
+ //
+ // Thus, the plan is to collect configurations where the private
+ // configurations were created and, after the transaction is rolled
+ // back, re-link these configurations and persist the changes using
+ // the new transaction.
+ //
+ vector<pair<database&, dir_path>> priv_cfgs;
+
+ // Add a newly created private configuration to the private
+ // configurations and the dependency databases lists and pre-enter
+ // builds of system dependencies with unspecified configuration for
+ // this configuration.
+ //
+ const function<build_packages::add_priv_cfg_function> add_priv_cfg (
+ [&priv_cfgs, &dep_dbs, &dep_pkgs, &enter_system_dependency]
+ (database& pdb, dir_path&& cfg)
+ {
+ database& db (pdb.find_attached (pdb.config / cfg,
+ false /* self */));
+
+ priv_cfgs.emplace_back (pdb, move (cfg));
+
+ dep_dbs.push_back (db);
+
+ for (const dependency_package& p: dep_pkgs)
+ {
+ if (p.db == nullptr)
+ enter_system_dependency (db, p);
+ }
+ });
+
+ postponed_packages postponed_repo;
+ postponed_packages postponed_alts;
+ postponed_packages postponed_recs;
+ postponed_existing_dependencies postponed_edeps;
+ postponed_configurations postponed_cfgs;
+ strings postponed_cfgs_history;
+ unsatisfied_dependents unsatisfied_depts;
+
+ try
+ {
+ if (scratch)
+ {
+ pkgs.clear ();
+
+ if (scratch_exe)
+ {
+ replaced_vers.clear ();
+ postponed_deps.clear ();
+ unacceptable_alts.clear ();
+
+ scratch_exe = false;
+ }
+ else
+ {
+ assert (scratch_col); // See the scratch definition above.
+
+ // Reset to detect bogus entries.
+ //
+ for (auto& rv: replaced_vers)
+ rv.second.replaced = false;
+
+ for (auto& pd: postponed_deps)
+ {
+ pd.second.wout_config = false;
+ pd.second.with_config = false;
+ }
+
+ scratch_col = false;
+ }
+
+ // Pre-enter dependencies with specified configurations.
//
- bool keep_out (o.keep_out () && sp->external ());
+ for (const dependency_package& p: dep_pkgs)
+ {
+ if (p.db != nullptr)
+ enter (*p.db, p);
+ }
- // Marking upgraded dependencies as "required by command line" may
- // seem redundant as they should already be pre-entered as such
- // (see above). But remember dependencies upgraded with -i|-r?
- // Note that the required_by data member should never be empty, as
- // it is used in prompts/diagnostics.
+ // Pre-enter system dependencies with unspecified configuration
+ // for all dependency configurations, excluding those which
+ // already have this dependency pre-entered.
//
- build_package p {
- build_package::build,
- move (sp),
- d.available,
- d.repository_fragment,
- nullopt, // Hold package.
- nullopt, // Hold version.
- {}, // Constraints.
- d.system,
- keep_out,
- nullopt, // Checkout root.
- false, // Checkout purge.
- strings (), // Configuration variables.
- {package_name ()}, // Required by (command line).
- 0}; // Adjustments.
+ for (const dependency_package& p: dep_pkgs)
+ {
+ if (p.db == nullptr)
+ {
+ for (database& db: dep_dbs)
+ {
+ if (!pkgs.entered_build (db, p.name))
+ enter_system_dependency (db, p);
+ }
+ }
+ }
+
+ // Pre-collect user selection to make sure dependency-forced
+ // up/down-grades are handled properly (i.e., the order in which we
+ // specify packages on the command line does not matter).
+ //
+ for (const build_package& p: hold_pkgs)
+ pkgs.collect_build (
+ o, p, replaced_vers, postponed_cfgs, unsatisfied_depts);
+
+ // Collect all the prerequisites of the user selection.
+ //
+ // Note that some of the user-selected packages can well be
+ // dependencies whose recursive processing should be postponed.
+ //
+ for (const build_package& p: hold_pkgs)
+ {
+ package_key pk (p.db, p.name ());
+
+ auto i (postponed_deps.find (pk));
+
+ if (i != postponed_deps.end ())
+ {
+ // Even though the user selection may have a configuration, we
+ // treat it as a dependent without any configuration because
+ // it is non-negotiable, known at the outset, and thus cannot
+ // be a reason to postpone anything.
+ //
+ i->second.wout_config = true;
+
+ l5 ([&]{trace << "dep-postpone user-specified " << pk;});
+ }
+ else
+ {
+ const postponed_configuration* pcfg (
+ postponed_cfgs.find_dependency (pk));
+
+ if (pcfg != nullptr)
+ {
+ l5 ([&]{trace << "dep-postpone user-specified " << pk
+ << " since already in cluster " << *pcfg;});
+ }
+ else
+ {
+ pkgs.collect_build_prerequisites (
+ o,
+ p.db,
+ p.name (),
+ find_prereq_database,
+ add_priv_cfg,
+ rpt_depts,
+ replaced_vers,
+ postponed_repo,
+ postponed_alts,
+ 0 /* max_alt_index */,
+ postponed_recs,
+ postponed_edeps,
+ postponed_deps,
+ postponed_cfgs,
+ unacceptable_alts,
+ unsatisfied_depts);
+ }
+ }
+ }
+
+ // Note that we need to collect unheld after prerequisites, not to
+ // overwrite the pre-entered entries before they are used to
+ // provide additional constraints for the collected prerequisites.
+ //
+ for (const dependency_package& p: dep_pkgs)
+ {
+ auto unhold = [&p, &pkgs] (database& db)
+ {
+ shared_ptr<selected_package> sp (
+ p.db != nullptr
+ ? p.selected
+ : db.find<selected_package> (p.name));
- pkgs.collect_build (o, c, db, p, &postponed /* recursively */);
+ if (sp != nullptr && sp->hold_package)
+ pkgs.collect_unhold (db, sp);
+ };
+
+ if (p.db != nullptr)
+ {
+ unhold (*p.db);
+ }
+ else
+ {
+ for (database& db: dep_dbs)
+ unhold (db);
+ }
+ }
+
+ // Collect dependents whose dependencies need to be repointed to
+ // packages from different configurations.
+ //
+ pkgs.collect_repointed_dependents (o,
+ rpt_depts,
+ replaced_vers,
+ postponed_repo,
+ postponed_alts,
+ postponed_recs,
+ postponed_edeps,
+ postponed_deps,
+ postponed_cfgs,
+ unacceptable_alts,
+ unsatisfied_depts,
+ find_prereq_database,
+ add_priv_cfg);
}
+ else
+ pkgs.clear_order (); // Only clear the ordered list.
+
+ // Add to the plan dependencies to up/down-grade/drop that were
+ // discovered on the previous iterations.
+ //
+ // Note: this loop takes care of both the from-scratch and
+ // refinement cases.
+ //
+ for (const dep& d: deps)
+ {
+ database& ddb (d.db);
+
+ if (d.available == nullptr)
+ {
+ pkgs.collect_drop (o,
+ ddb,
+ ddb.load<selected_package> (d.name),
+ replaced_vers);
+ }
+ else
+ {
+ shared_ptr<selected_package> sp (
+ ddb.find<selected_package> (d.name));
+
+ // We will keep the output directory only if the external package
+ // is replaced with an external one (see above for details).
+ //
+ bool keep_out (o.keep_out () && sp->external ());
+
+ // Marking upgraded dependencies as "required by command line"
+ // may seem redundant as they should already be pre-entered as
+ // such (see above). But remember dependencies upgraded with
+ // -i|-r? Note that the required_by data member should never be
+ // empty, as it is used in prompts/diagnostics.
+ //
+ build_package p {
+ build_package::build,
+ ddb,
+ move (sp),
+ d.available,
+ d.repository_fragment,
+ nullopt, // Dependencies.
+ nullopt, // Dependencies alternatives.
+ nullopt, // Package skeleton.
+ nullopt, // Postponed dependency alternatives.
+ false, // Recursive collection.
+ nullopt, // Hold package.
+ nullopt, // Hold version.
+ {}, // Constraints.
+ d.system,
+ keep_out,
+ o.disfigure (),
+ false, // Configure-only.
+ nullopt, // Checkout root.
+ false, // Checkout purge.
+ strings (), // Configuration variables.
+ d.upgrade,
+ d.deorphan,
+ {cmd_line}, // Required by (command line).
+ false, // Required by dependents.
+ (d.existing || d.deorphan
+ ? build_package::build_replace
+ : uint16_t (0))};
+
+ package_key pk {ddb, d.name};
+
+ // Similar to the user-selected packages, collect non-
+ // recursively the dependencies for which recursive collection
+ // is postponed (see above for details).
+ //
+ auto i (postponed_deps.find (pk));
+ if (i != postponed_deps.end ())
+ {
+ i->second.wout_config = true;
+
+ // Note: not recursive.
+ //
+ pkgs.collect_build (
+ o, move (p), replaced_vers, postponed_cfgs, unsatisfied_depts);
+
+ l5 ([&]{trace << "dep-postpone user-specified dependency "
+ << pk;});
+ }
+ else
+ {
+ const postponed_configuration* pcfg (
+ postponed_cfgs.find_dependency (pk));
+
+ if (pcfg != nullptr)
+ {
+ // Note: not recursive.
+ //
+ pkgs.collect_build (o,
+ move (p),
+ replaced_vers,
+ postponed_cfgs,
+ unsatisfied_depts);
+
+ l5 ([&]{trace << "dep-postpone user-specified dependency "
+ << pk << " since already in cluster "
+ << *pcfg;});
+ }
+ else
+ {
+ build_package_refs dep_chain;
+
+ // Note: recursive.
+ //
+ pkgs.collect_build (o,
+ move (p),
+ replaced_vers,
+ postponed_cfgs,
+ unsatisfied_depts,
+ &dep_chain,
+ find_prereq_database,
+ add_priv_cfg,
+ &rpt_depts,
+ &postponed_repo,
+ &postponed_alts,
+ &postponed_recs,
+ &postponed_edeps,
+ &postponed_deps,
+ &unacceptable_alts);
+ }
+ }
+ }
+ }
+
+ // Handle the (combined) postponed collection.
+ //
+ if (find_if (postponed_recs.begin (), postponed_recs.end (),
+ [] (const build_package* p)
+ {
+ // Note that we check for the dependencies presence
+ // rather than for the recursive_collection flag
+ // (see collect_build_postponed() for details).
+ //
+ return !p->dependencies;
+ }) != postponed_recs.end () ||
+ !postponed_repo.empty () ||
+ !postponed_alts.empty () ||
+ postponed_deps.has_bogus () ||
+ !postponed_cfgs.empty ())
+ pkgs.collect_build_postponed (o,
+ replaced_vers,
+ postponed_repo,
+ postponed_alts,
+ postponed_recs,
+ postponed_edeps,
+ postponed_deps,
+ postponed_cfgs,
+ postponed_cfgs_history,
+ unacceptable_alts,
+ unsatisfied_depts,
+ find_prereq_database,
+ rpt_depts,
+ add_priv_cfg);
+
+ // Erase the bogus replacements and re-collect from scratch, if any
+ // (see replaced_versions for details).
+ //
+ replaced_vers.cancel_bogus (trace, true /* scratch */);
}
+ catch (const scratch_collection& e)
+ {
+ // Re-collect from scratch (but keep deps).
+ //
+ scratch_col = true;
- // Handle the (combined) postponed collection.
- //
- if (!postponed.empty ())
- pkgs.collect_build_postponed (o, c, db, postponed);
+ l5 ([&]{trace << "collection failed due to " << e.description
+ << (e.package != nullptr
+ ? " (" + e.package->string () + ')'
+ : empty_string)
+ << ", retry from scratch";});
+
+ // Erase the package version replacements that we didn't apply
+ // during the current (re-)collection iteration since the dependents
+ // demanding this version are not collected anymore.
+ //
+ replaced_vers.cancel_bogus (trace, false /* scratch */);
+
+ restore_repointed_dependents ();
+
+ // Commit linking of private configurations that were potentially
+ // created during the collection of the package builds with their
+ // parent configurations.
+ //
+ t.commit ();
+
+ continue;
+ }
+
+ set<package_key> depts (
+ pkgs.collect_dependents (rpt_depts, unsatisfied_depts));
// Now that we have collected all the package versions that we need to
// build, arrange them in the "dependency order", that is, with every
@@ -4025,75 +6098,224 @@ namespace bpkg
// dependencies between the specified packages).
//
// The order of dependency upgrades/downgrades/drops is not really
- // deterministic. We, however, do them before hold_pkgs so that they
- // appear (e.g., on the plan) last.
+ // deterministic. We, however, do upgrades/downgrades before hold_pkgs
+ // so that they appear (e.g., on the plan) after the packages being
+ // built to hold. We handle drops last, though, so that the unused
+ // packages are likely get purged before the package fetches, so that
+ // the disk space they occupy can be reused.
//
for (const dep& d: deps)
- pkgs.order (d.name, false /* reorder */);
+ {
+ if (d.available != nullptr)
+ pkgs.order (d.db,
+ d.name,
+ find_prereq_database,
+ false /* reorder */);
+ }
for (const build_package& p: reverse_iterate (hold_pkgs))
- pkgs.order (p.name ());
+ pkgs.order (p.db, p.name (), find_prereq_database);
+
+ for (const auto& rd: rpt_depts)
+ pkgs.order (rd.first.db,
+ rd.first.name,
+ find_prereq_database,
+ false /* reorder */);
+
+ // Order the existing dependents which have participated in
+ // negotiation of the configuration of their dependencies.
+ //
+ for (const postponed_configuration& cfg: postponed_cfgs)
+ {
+ for (const auto& d: cfg.dependents)
+ {
+ if (d.second.existing)
+ {
+ const package_key& p (d.first);
+ pkgs.order (p.db, p.name, find_prereq_database);
+ }
+ }
+ }
- // Collect and order all the dependents that we will need to
- // reconfigure because of the up/down-grades of packages that are now
- // on the list.
+ // Order the existing dependents whose dependencies are being
+ // up/down-graded or reconfigured.
//
- pkgs.collect_order_dependents (db);
+ for (const package_key& p: depts)
+ pkgs.order (p.db, p.name, find_prereq_database, false /* reorder */);
- // And, finally, make sure all the packages that we need to unhold
- // are on the list.
+ // Order the re-collected packages (deviated dependents, etc).
+ //
+ for (build_package* p: postponed_recs)
+ {
+ assert (p->recursive_collection);
+
+ pkgs.order (p->db, p->name (), find_prereq_database);
+ }
+
+ // Make sure all the packages that we need to unhold are on the list.
//
for (const dependency_package& p: dep_pkgs)
{
- if (p.selected != nullptr && p.selected->hold_package)
- pkgs.order (p.name, false /* reorder */);
+ auto order_unheld = [&p, &pkgs, &find_prereq_database] (database& db)
+ {
+ shared_ptr<selected_package> sp (
+ p.db != nullptr
+ ? p.selected
+ : db.find<selected_package> (p.name));
+
+ if (sp != nullptr && sp->hold_package)
+ pkgs.order (db,
+ p.name,
+ find_prereq_database,
+ false /* reorder */);
+ };
+
+ if (p.db != nullptr)
+ {
+ order_unheld (*p.db);
+ }
+ else
+ {
+ for (database& db: dep_dbs)
+ order_unheld (db);
+ }
}
+ // And, finally, order the package drops.
+ //
+ for (const dep& d: deps)
+ {
+ if (d.available == nullptr)
+ pkgs.order (d.db,
+ d.name,
+ find_prereq_database,
+ false /* reorder */);
+ }
+
+ // Make sure all the postponed dependencies of existing dependents
+ // have been collected and fail if that's not the case.
+ //
+ for (const auto& pd: postponed_edeps)
+ {
+ const build_package* p (pkgs.entered_build (pd.first));
+ assert (p != nullptr && p->available != nullptr);
+
+ if (!p->recursive_collection)
+ {
+ // Feels like this shouldn't happen but who knows.
+ //
+ diag_record dr (fail);
+ dr << "package " << p->available_name_version_db () << " is not "
+ << "built due to its configured dependents deviation in "
+ << "dependency resolution" <<
+ info << "deviated dependents:";
+
+ for (const package_key& d: pd.second)
+ dr << ' ' << d;
+
+ dr << info << "please report in "
+ << "https://github.com/build2/build2/issues/302";
+ }
+ }
+
+#ifndef NDEBUG
+ pkgs.verify_ordering ();
+#endif
+ // Now, as we are done with package builds collecting/ordering, erase
+ // the replacements from the repointed dependents prerequisite sets
+ // and persist the changes.
+ //
+ restore_repointed_dependents ();
+
// We are about to execute the plan on the database (but not on the
// filesystem / actual packages). Save the session state for the
// selected_package objects so that we can restore it later (see
// below for details).
//
using selected_packages = session::object_map<selected_package>;
- auto selected_packages_session = [&db, &ses] () -> selected_packages*
+ auto sp_session = [] (const auto& tm) -> selected_packages*
{
- auto& m (ses.map ()[&db]);
- auto i (m.find (&typeid (selected_package)));
- return (i != m.end ()
+ auto i (tm.find (&typeid (selected_package)));
+ return (i != tm.end ()
? &static_cast<selected_packages&> (*i->second)
: nullptr);
};
- selected_packages old_sp;
- if (const selected_packages* sp = selected_packages_session ())
- old_sp = *sp;
+ map<const odb::database*, selected_packages> old_sp;
+
+ for (const auto& dps: ses.map ())
+ {
+ if (const selected_packages* sps = sp_session (dps.second))
+ old_sp.emplace (dps.first, *sps);
+ }
// Note that we need to perform the execution on the copies of the
// build/drop_package objects to preserve the original ones. The
// selected_package objects will still be changed so we will reload
// them afterwards (see below).
//
+ // After the plan execution simulation, save the packages being built
+ // (selected non-system packages) for the subsequent dependency
+ // hierarchies verification.
+ //
+ bool changed;
+ vector<pair<database&, shared_ptr<selected_package>>> build_pkgs;
{
vector<build_package> tmp (pkgs.begin (), pkgs.end ());
build_package_list bl (tmp.begin (), tmp.end ());
- execute_plan (o, c, db, bl, true /* simulate */);
+ changed = execute_plan (o,
+ bl,
+ &unsatisfied_depts,
+ find_prereq_database);
+
+ if (changed)
+ {
+ for (build_package& p: bl)
+ {
+ shared_ptr<selected_package>& sp (p.selected);
+
+ if (sp != nullptr)
+ {
+ if (!sp->system ())
+ build_pkgs.emplace_back (p.db, move (sp));
+ }
+ else
+ assert (p.action && *p.action == build_package::drop);
+ }
+ }
}
// Return nullopt if no changes to the dependency are necessary. This
// value covers both the "no change is required" and the "no
// recommendation available" cases.
//
- auto eval_dep = [&db, &dep_pkgs, &rec_pkgs] (
- const shared_ptr<selected_package>& sp,
- bool ignore_unsatisfiable = true) -> optional<evaluate_result>
+ auto eval_dep = [&dep_pkgs,
+ &rec_pkgs,
+ &o,
+ &existing_deps,
+ &deorphaned_deps,
+ &pkgs,
+ cache = upgrade_dependencies_cache {}] (
+ database& db,
+ const shared_ptr<selected_package>& sp,
+ bool ignore_unsatisfiable = true) mutable
+ -> optional<evaluate_result>
{
optional<evaluate_result> r;
// See if there is an optional dependency upgrade recommendation.
//
if (!sp->hold_package)
- r = evaluate_dependency (db, dep_pkgs, sp, ignore_unsatisfiable);
+ r = evaluate_dependency (o,
+ db,
+ sp,
+ dep_pkgs,
+ o.no_move (),
+ existing_deps,
+ deorphaned_deps,
+ pkgs,
+ ignore_unsatisfiable);
// If none, then see for the recursive dependency upgrade
// recommendation.
@@ -4102,7 +6324,15 @@ namespace bpkg
// configured as such for a reason.
//
if (!r && !sp->system () && !rec_pkgs.empty ())
- r = evaluate_recursive (db, rec_pkgs, sp, ignore_unsatisfiable);
+ r = evaluate_recursive (o,
+ db,
+ sp,
+ rec_pkgs,
+ existing_deps,
+ deorphaned_deps,
+ pkgs,
+ ignore_unsatisfiable,
+ cache);
// Translate the "no change" result to nullopt.
//
@@ -4112,16 +6342,18 @@ namespace bpkg
// The empty version means that the package must be dropped.
//
const version ev;
- auto target_version = [&ev] (const shared_ptr<available_package>& ap,
- bool sys) -> const version&
+ auto target_version = [&ev]
+ (database& db,
+ const shared_ptr<available_package>& ap,
+ bool sys) -> const version&
{
if (ap == nullptr)
return ev;
if (sys)
{
- assert (ap->system_version () != nullptr);
- return *ap->system_version ();
+ assert (ap->system_version (db) != nullptr);
+ return *ap->system_version (db);
}
return ap->version;
@@ -4134,15 +6366,18 @@ namespace bpkg
{
bool s (false);
+ database& db (i->db);
+ const package_name& nm (i->name);
+
// Here we scratch if evaluate changed its mind or if the resulting
// version doesn't match what we expect it to be.
//
- if (auto sp = db.find<selected_package> (i->name))
+ if (auto sp = db.find<selected_package> (nm))
{
- const version& dv (target_version (i->available, i->system));
+ const version& dv (target_version (db, i->available, i->system));
- if (optional<evaluate_result> r = eval_dep (sp))
- s = dv != target_version (r->available, r->system) ||
+ if (optional<evaluate_result> r = eval_dep (db, sp))
+ s = dv != target_version (db, r->available, r->system) ||
i->system != r->system;
else
s = dv != sp->version || i->system != sp->system ();
@@ -4152,14 +6387,44 @@ namespace bpkg
if (s)
{
- scratch = true; // Rebuild the plan from scratch.
+ scratch_exe = true; // Rebuild the plan from scratch.
+
+ package_key pk (db, nm);
+
+ auto j (find (existing_deps.begin (), existing_deps.end (), pk));
+ if (j != existing_deps.end ())
+ existing_deps.erase (j);
+
+ deorphaned_deps.erase (pk);
+
i = deps.erase (i);
}
else
++i;
}
- if (!scratch)
+ if (scratch_exe)
+ l5 ([&]{trace << "one of dependency evaluation decisions has "
+ << "changed, re-collecting from scratch";});
+
+ // If the execute_plan() call was noop, there are no user expectations
+ // regarding any dependency, and no upgrade is requested, then the
+ // only possible refinement outcome can be recommendations to drop
+ // unused dependencies (that the user has refused to drop on the
+ // previous build or drop command run). Thus, if the --keep-unused|-K
+ // or --no-refinement option is also specified, then we omit the
+ // need_refinement() call altogether and assume that no refinement is
+ // required.
+ //
+ if (!changed && dep_pkgs.empty () && rec_pkgs.empty ())
+ {
+ assert (!scratch_exe); // No reason to change any previous decision.
+
+ if (o.keep_unused () || o.no_refinement ())
+ refine = false;
+ }
+
+ if (!scratch_exe && refine)
{
// First, we check if the refinement is required, ignoring the
// unsatisfiable dependency version constraints. If we end up
@@ -4169,12 +6434,17 @@ namespace bpkg
// make sure that the unsatisfiable dependency, if left, is
// reported.
//
- auto need_refinement = [&eval_dep, &deps, rec_pkgs, &db, &o] (
- bool diag = false) -> bool
+ auto need_refinement = [&eval_dep,
+ &deps,
+ &rec_pkgs,
+ &dep_dbs,
+ &existing_deps,
+ &deorphaned_deps,
+ &o] (bool diag = false) -> bool
{
// Examine the new dependency set for any up/down-grade/drops.
//
- bool r (false); // Presumably no more refinments are necessary.
+ bool r (false); // Presumably no more refinements are necessary.
using query = query<selected_package>;
@@ -4183,22 +6453,46 @@ namespace bpkg
if (rec_pkgs.empty ())
q = q && !query::hold_package;
- for (shared_ptr<selected_package> sp:
- pointer_result (db.query<selected_package> (q)))
+ // It seems right to only evaluate dependencies in the explicitly
+ // linked configurations, recursively. Indeed, we shouldn't be
+ // up/down-grading or dropping packages in configurations that
+ // only contain dependents, some of which we may only reconfigure.
+ //
+ for (database& db: dep_dbs)
{
- if (optional<evaluate_result> er = eval_dep (sp, !diag))
+ for (shared_ptr<selected_package> sp:
+ pointer_result (db.query<selected_package> (q)))
{
- // Skip unused if we were instructed to keep them.
- //
- if (o.keep_unused () && er->available == nullptr)
- continue;
+ if (optional<evaluate_result> er = eval_dep (db, sp, !diag))
+ {
+ // Skip unused if we were instructed to keep them.
+ //
+ if (o.keep_unused () && er->available == nullptr)
+ continue;
- if (!diag)
- deps.push_back (dep {sp->name,
- move (er->available),
- move (er->repository_fragment),
- er->system});
- r = true;
+ if (!diag)
+ {
+ deps.push_back (dep {er->db,
+ sp->name,
+ move (er->available),
+ move (er->repository_fragment),
+ er->system,
+ er->existing,
+ er->upgrade,
+ er->orphan.has_value ()});
+
+ if (er->existing)
+ existing_deps.emplace_back (er->db, sp->name);
+
+ if (er->orphan)
+ {
+ deorphaned_deps[package_key (er->db, sp->name)] =
+ move (*er->orphan);
+ }
+ }
+
+ r = true;
+ }
}
}
@@ -4207,8 +6501,251 @@ namespace bpkg
refine = need_refinement ();
+ // If no further refinement is necessary, then perform the
+ // diagnostics run. Otherwise, if any dependency configuration
+ // negotiation has been performed during the current plan refinement
+ // iteration, then rebuild the plan from scratch (see above for
+ // details). Also rebuild it from from scratch if any unsatisfied
+ // dependents have been ignored, since their unsatisfied constraints
+ // are now added to the dependencies' build_package::constraints
+ // lists.
+ //
if (!refine)
need_refinement (true /* diag */);
+ else if (!postponed_cfgs.empty () || !unsatisfied_depts.empty ())
+ scratch_exe = true;
+ }
+
+ // Note that we prevent building multiple instances of the same
+ // package dependency in different configurations (of the same type)
+ // while creating the build plan. However, we may potentially end up
+ // with the same dependency in multiple configurations since we do not
+ // descend into prerequisites of already configured packages which
+ // require no up/downgrade.
+ //
+ // To prevent this, we additionally verify that none of the dependency
+ // hierarchies of the packages being built contains the same runtime
+ // dependency, built in multiple configurations.
+ //
+ // Note that we also fail for a system dependency configured in
+ // multiple configurations, since these configurations can potentially
+ // be configured differently and so these system packages can refer to
+ // different targets.
+ //
+ if (changed && !refine)
+ {
+ // Verify the specified package dependency hierarchy and return the
+ // set of packages plus their runtime dependencies, including
+ // indirect ones. Fail if a dependency cycle is detected.
+ //
+ // Also add the result into the `package_prereqs` map, to use it as
+ // a cache and for subsequent additional dependency verification.
+ //
+ // Note that all the encountered dependency sub-hierarchies that
+ // reside in configurations of different types (or beneath them) are
+ // also verified but not included into the resulting set.
+ //
+ using prerequisites = set<lazy_shared_ptr<selected_package>,
+ compare_lazy_ptr_id>;
+
+ map<package_key, prerequisites> package_prereqs;
+ small_vector<config_selected_package, 16> chain;
+
+ auto verify_dependencies = [&package_prereqs, &chain]
+ (database& db,
+ shared_ptr<selected_package> sp,
+ const auto& verify_dependencies)
+ -> const prerequisites&
+ {
+ // Return the cached value, if present.
+ //
+ package_key pk {db, sp->name};
+ {
+ auto i (package_prereqs.find (pk));
+
+ if (i != package_prereqs.end ())
+ return i->second;
+ }
+
+ // Make sure there is no dependency cycle.
+ //
+ config_selected_package csp {db, sp};
+ {
+ auto i (find (chain.begin (), chain.end (), csp));
+
+ if (i != chain.end ())
+ {
+ diag_record dr (fail);
+ dr << "dependency cycle detected involving package " << *sp
+ << db;
+
+ // Note: push_back() can invalidate the iterator.
+ //
+ size_t j (i - chain.begin ());
+
+ for (chain.push_back (csp); j != chain.size () - 1; ++j)
+ dr << info << *chain[j].package << chain[j].db
+ << " depends on "
+ << *chain[j + 1].package << chain[j + 1].db;
+ }
+ }
+
+ chain.push_back (csp);
+
+ // Verify all prerequisites, but only collect those corresponding
+ // to the runtime dependencies.
+ //
+ // Indeed, we don't care if a linked host configuration contains a
+ // configured package that we also have configured in our target
+ // configuration. It's also fine if some of our runtime
+ // dependencies from different configurations build-time depend on
+ // the same package (of potentially different versions) configured
+ // in different host configurations.
+ //
+ // Note, however, that we cannot easily determine if the
+ // prerequisite corresponds to the runtime or build-time
+ // dependency, since we don't store this information for
+ // prerequisites. The current implementation relies on the fact
+ // that the build-time dependency configuration type (host or
+ // build2) differs from the dependent configuration type (target
+ // is a common case) and doesn't work well, for example, for the
+ // self-hosted configurations. For them it can fail erroneously.
+ // We can potentially fix that by additionally storing the
+ // build-time flag for a prerequisite. However, let's first see if
+ // it ever becomes a problem.
+ //
+ prerequisites r;
+ const package_prerequisites& prereqs (sp->prerequisites);
+
+ for (const auto& prereq: prereqs)
+ {
+ const lazy_shared_ptr<selected_package>& p (prereq.first);
+ database& pdb (p.database ());
+
+ // Validate prerequisite sub-hierarchy also in configuration of
+ // different type but do not collect it.
+ //
+ const prerequisites& ps (
+ verify_dependencies (pdb, p.load (), verify_dependencies));
+
+ if (pdb.type != db.type)
+ continue;
+
+ // Collect prerequisite sub-hierarchy, checking that none of the
+ // packages are already collected.
+ //
+ for (const lazy_shared_ptr<selected_package>& p: ps)
+ {
+ // Note: compare_id_lazy_ptr only considers package names.
+ //
+ auto i (r.find (p));
+
+ if (i != r.end ())
+ {
+ database& db1 (p.database ());
+ database& db2 (i->database ());
+
+ if (db1 != db2)
+ {
+ bool indirect (prereqs.find (p) == prereqs.end ());
+
+ fail << "package " << p.object_id ()
+ << (indirect ? " indirectly" : "") << " required by "
+ << *sp << db << " is configured in multiple "
+ << "configurations" <<
+ info << *p.load () << db1 <<
+ info << *i->load () << db2;
+ }
+ }
+ else
+ r.insert (p);
+ }
+ }
+
+ chain.pop_back ();
+
+ // Collect the dependent package itself.
+ //
+ r.insert (lazy_shared_ptr<selected_package> (db, move (sp)));
+
+ // Cache the resulting package prerequisites set and return a
+ // reference to it.
+ //
+ auto j (package_prereqs.emplace (move (pk), move (r)));
+ assert (j.second); // A package cannot depend on itself.
+
+ return j.first->second;
+ };
+
+ for (auto& p: build_pkgs)
+ verify_dependencies (p.first,
+ move (p.second),
+ verify_dependencies);
+
+ // Now, verify that none of the build2 modules may simultaneously be
+ // built in multiple configurations which belong to the same linked
+ // configuration cluster.
+ //
+ // For that we use the `package_prereqs` map: its key set refers to
+ // all the packages potentially involved into the build (explicitly
+ // or implicitly).
+ //
+ {
+ // List of module packages together with the linked configuration
+ // clusters they belong to.
+ //
+ vector<pair<package_key, linked_databases>> build2_mods;
+
+ for (const auto& pp: package_prereqs)
+ {
+ const package_key& pk (pp.first);
+
+ // Skip packages other than the build2 modules.
+ //
+ if (!build2_module (pk.name))
+ continue;
+
+ // Skip build2 modules configured as system.
+ //
+ {
+ shared_ptr<selected_package> sp (
+ pk.db.get ().find<selected_package> (pk.name));
+
+ assert (sp != nullptr);
+
+ if (sp->system ())
+ continue;
+ }
+
+ // Make sure the module's database doesn't belong to any other
+ // cluster this module is also configured in.
+ //
+ for (const auto& m: build2_mods)
+ {
+ if (m.first.name != pk.name)
+ continue;
+
+ // The `package_prereqs` map can only contain the same package
+ // twice if databases differ.
+ //
+ assert (m.first.db != pk.db);
+
+ const linked_databases& lcc (m.second);
+
+ if (find (lcc.begin (), lcc.end (), pk.db) != lcc.end ())
+ {
+ fail << "building build system module " << pk.name
+ << " in multiple configurations" <<
+ info << m.first.db.get ().config_orig <<
+ info << pk.db.get ().config_orig;
+ }
+ }
+
+ // Add the module and its cluster to the list.
+ //
+ build2_mods.emplace_back (pk, pk.db.get ().cluster_configs ());
+ }
+ }
}
// Rollback the changes to the database and reload the changed
@@ -4216,13 +6753,18 @@ namespace bpkg
//
t.rollback ();
{
- transaction t (db);
+ transaction t (mdb);
// First reload all the selected_package object that could have been
// modified (conceptually, we should only modify what's on the
// plan). And in case of drop the object is removed from the session
// so we need to bring it back.
//
+ // Make sure that selected packages are only owned by the session
+ // and the build package list.
+ //
+ build_pkgs.clear ();
+
// Note: we use the original pkgs list since the executed ones may
// contain newly created (but now gone) selected_package objects.
//
@@ -4230,64 +6772,314 @@ namespace bpkg
{
assert (p.action);
+ database& pdb (p.db);
+
if (*p.action == build_package::drop)
{
assert (p.selected != nullptr);
ses.cache_insert<selected_package> (
- db, p.selected->name, p.selected);
+ pdb, p.selected->name, p.selected);
}
if (p.selected != nullptr)
- db.reload (*p.selected);
+ pdb.reload (*p.selected);
}
// Now remove all the newly created selected_package objects from
// the session. The tricky part is to distinguish newly created ones
// from newly loaded (and potentially cached).
//
- if (selected_packages* sp = selected_packages_session ())
+ for (bool rescan (true); rescan; )
{
- for (bool rescan (true); rescan; )
- {
- rescan = false;
+ rescan = false;
- for (auto i (sp->begin ()); i != sp->end (); )
+ for (const auto& dps: ses.map ())
+ {
+ if (selected_packages* sps = sp_session (dps.second))
{
- bool erased (false);
- auto j (old_sp.find (i->first));
-
+ auto j (old_sp.find (dps.first)); // Find the database.
+
+ // Note that if a database has been introduced only during
+ // simulation, then we could just clear all its selected
+ // packages in one shot. Let's however, be cautious and remove
+ // them iteratively to make sure that none of them are left at
+ // the end (no more rescan is necessary). If any of them is
+ // left, then that would mean that is is referenced from
+ // somewhere besides the session object, which would be a bug.
+ //
if (j == old_sp.end ())
{
- if (i->second.use_count () == 1)
+ if (!sps->empty ())
+ {
+ for (auto i (sps->begin ()); i != sps->end (); )
+ {
+ if (i->second.use_count () == 1)
+ {
+ // This might cause another object's use count to drop.
+ //
+ i = sps->erase (i);
+ rescan = true;
+ }
+ else
+ ++i;
+ }
+ }
+
+ continue;
+ }
+
+ const selected_packages& osp (j->second);
+
+ for (auto i (sps->begin ()); i != sps->end (); )
+ {
+ bool erased (false);
+ auto j (osp.find (i->first));
+
+ if (j == osp.end ())
+ {
+ if (i->second.use_count () == 1)
+ {
+ // This might cause another object's use count to drop.
+ //
+ i = sps->erase (i);
+ erased = true;
+ rescan = true;
+ }
+ }
+ // It may also happen that the object was erased from the
+ // database and then recreated. In this case we restore the
+ // pointer that is stored in the session.
+ //
+ else if (i->second != j->second)
{
// This might cause another object's use count to drop.
//
- i = sp->erase (i);
- erased = true;
+ i->second = j->second;
rescan = true;
}
+
+ if (!erased)
+ ++i;
}
- // It may also happen that the object was erased from the
- // database and then recreated. In this case we restore the
- // pointer that is stored in the session.
- //
- else if (i->second != j->second)
+ }
+ }
+
+ // Verify that all the selected packages of the newly introduced
+ // during simulation databases are erased (see above for the
+ // verification reasoning).
+ //
+ if (!rescan)
+ {
+ for (const auto& dps: ses.map ())
+ {
+ if (const selected_packages* sps = sp_session (dps.second))
{
- // This might cause another object's use count to drop.
- //
- i->second = j->second;
- rescan = true;
+ if (old_sp.find (dps.first) == old_sp.end ())
+ assert (sps->empty ());
}
-
- if (!erased)
- ++i;
}
}
}
+ // Re-link the private configurations that were created during the
+ // collection of the package builds with their parent
+ // configurations. Note that these links were lost on the previous
+ // transaction rollback.
+ //
+ for (const pair<database&, dir_path>& pc: priv_cfgs)
+ cfg_link (pc.first,
+ pc.first.config / pc.second,
+ true /* relative */,
+ nullopt /* name */,
+ true /* sys_rep */);
+
t.commit ();
}
+
+ if (!refine)
+ {
+ // Cleanup the package build collecting state, preparing for the
+ // re-collection from the very beginning.
+ //
+ auto prepare_recollect = [&refine,
+ &scratch_exe,
+ &deps,
+ &existing_deps,
+ &deorphaned_deps] ()
+ {
+ refine = true;
+ scratch_exe = true;
+
+ deps.clear ();
+ existing_deps.clear ();
+ deorphaned_deps.clear ();
+ };
+
+ // Issue diagnostics and fail if any existing dependents are not
+ // satisfied with their dependencies.
+ //
+ // But first, try to resolve the first encountered unsatisfied
+ // constraint by replacing the collected unsatisfactory dependency
+ // or some of its dependents with some other available package
+ // version. This version, while not being the best possible choice,
+ // must be satisfactory for all its new and existing dependents. If
+ // succeed, punch the replacement version into the command line and
+ // recollect from the very beginning (see unsatisfied_dependents for
+ // details).
+ //
+ if (!unsatisfied_depts.empty ())
+ {
+ if (!cmdline_refine_index) // Not command line adjustments refinement?
+ {
+ const unsatisfied_dependent& dpt (unsatisfied_depts.front ());
+
+ assert (!dpt.ignored_constraints.empty ());
+
+ const ignored_constraint& ic (dpt.ignored_constraints.front ());
+
+ const build_package* p (pkgs.entered_build (ic.dependency));
+ assert (p != nullptr); // The dependency must be collected.
+
+ l5 ([&]{trace << "try to replace unsatisfactory dependency "
+ << p->available_name_version_db () << " with some "
+ << "other version";});
+
+ optional<cmdline_adjustment> a;
+ vector<package_key> unsatisfied_dpts;
+ set<const build_package*> visited_dpts;
+
+ if ((a = try_replace_dependency (o,
+ *p,
+ pkgs,
+ hold_pkgs,
+ dep_pkgs,
+ cmdline_adjs,
+ unsatisfied_dpts,
+ "unsatisfactory dependency")) ||
+ (a = try_replace_dependent (o,
+ *p,
+ &ic.unsatisfied_constraints,
+ pkgs,
+ cmdline_adjs,
+ unsatisfied_dpts,
+ hold_pkgs,
+ dep_pkgs,
+ visited_dpts)) ||
+ !cmdline_adjs.empty ())
+ {
+ if (a)
+ {
+ cmdline_adjs.push (move (*a));
+ }
+ else
+ {
+ cmdline_adjustment a (cmdline_adjs.pop ());
+
+ l5 ([&]{trace << "cannot replace any package, rolling back "
+ << "latest command line adjustment ("
+ << cmdline_adjs.to_string (a) << ')';});
+ }
+
+ prepare_recollect ();
+ }
+ else
+ unsatisfied_depts.diag (pkgs); // Issue the diagnostics and fail.
+ }
+ else // We are in the command line adjustments refinement cycle.
+ {
+ // Since we have failed to collect, then the currently dropped
+ // command line adjustment is essential. Thus, push it back to
+ // the stack, drop the next one, and retry. If this is the last
+ // adjustment in the stack, then we assume that no further
+ // refinement is possible and we just recollect, assuming that
+ // this recollection will be successful.
+ //
+ assert (cmdline_refine_adjustment); // Wouldn't be here otherwise.
+
+ l5 ([&]{trace << "attempt to refine command line adjustments by "
+ << "rolling back adjustment "
+ << cmdline_adjs.to_string (
+ *cmdline_refine_adjustment)
+ << " failed, pushing it back";});
+
+ cmdline_adjs.push (move (*cmdline_refine_adjustment));
+
+ // Index of the being previously dropped adjustment must be
+ // valid.
+ //
+ assert (*cmdline_refine_index != cmdline_adjs.size ());
+
+ if (++(*cmdline_refine_index) != cmdline_adjs.size ())
+ {
+ cmdline_refine_adjustment = cmdline_adjs.pop (true /* front */);
+
+ l5 ([&]{trace << "continue with command line adjustments "
+ << "refinement cycle by rolling back adjustment "
+ << cmdline_adjs.to_string (
+ *cmdline_refine_adjustment);});
+ }
+ else
+ {
+ cmdline_refine_adjustment = nullopt;
+
+ l5 ([&]{trace << "cannot further refine command line "
+ << "adjustments, performing final collection";});
+ }
+
+ prepare_recollect ();
+ }
+ }
+ //
+ // If the collection was successful, then see if we still need to
+ // perform the command line adjustments refinement.
+ //
+ else if (cmdline_adjs.tried () &&
+ (!cmdline_refine_index ||
+ *cmdline_refine_index != cmdline_adjs.size ()))
+ {
+ // If some command line adjustment is currently being dropped,
+ // that means that this adjustment is redundant.
+ //
+ bool initial (!cmdline_refine_index);
+
+ if (!initial)
+ {
+ assert (cmdline_refine_adjustment);
+
+ l5 ([&]{trace << "command line adjustment "
+ << cmdline_adjs.to_string (
+ *cmdline_refine_adjustment)
+ << " is redundant, dropping it";});
+
+ cmdline_refine_adjustment = nullopt;
+ cmdline_refine_index = nullopt;
+ }
+
+ // We cannot remove all the adjustments during the refinement.
+ // Otherwise, we shouldn't be failing in the first place.
+ //
+ assert (!cmdline_adjs.empty ());
+
+ // If there is just a single adjustment left, then there is
+ // nothing to refine anymore.
+ //
+ if (cmdline_adjs.size () != 1)
+ {
+ cmdline_refine_adjustment = cmdline_adjs.pop (true /* front */);
+ cmdline_refine_index = 0;
+
+ l5 ([&]{trace << (initial ? "start" : "re-start") << " command "
+ << "line adjustments refinement cycle by rolling "
+ << "back first adjustment ("
+ << cmdline_adjs.to_string (
+ *cmdline_refine_adjustment)
+ << ')';});
+
+ prepare_recollect ();
+ }
+ }
+ }
}
}
@@ -4295,129 +7087,319 @@ namespace bpkg
// While at it, detect if we have any dependents that the user may want to
// update.
//
+ // For the packages being printed also print the configuration specified
+ // by the user, dependents, and via the reflect clauses. For that we will
+ // use the package skeletons, initializing them if required. Note that for
+ // a system package the skeleton may already be initialized during the
+ // dependency negotiation process. Also note that the freshly-initialized
+ // skeletons will be reused during the plan execution.
+ //
bool update_dependents (false);
// We need the plan and to ask for the user's confirmation only if some
- // implicit action (such as building prerequisite or reconfiguring
- // dependent package) is to be taken or there is a selected package which
- // version must be changed. But if the user explicitly requested it with
- // --plan, then we print it as long as it is not empty.
+ // implicit action (such as building prerequisite, reconfiguring dependent
+ // package, or installing system/distribution packages) is to be taken or
+ // there is a selected package which version must be changed. But if the
+ // user explicitly requested it with --plan, then we print it as long as
+ // it is not empty.
//
string plan;
+ sha256 csum;
bool need_prompt (false);
- if (o.print_only () || !o.yes () || o.plan_specified ())
+ if (!o.yes () ||
+ o.print_only () ||
+ o.plan_specified () ||
+ o.rebuild_checksum_specified ())
{
- bool first (true); // First entry in the plan.
+ // Map the main system/distribution packages that need to be installed
+ // to the system packages which caused their installation (see
+ // build_package::system_install() for details).
+ //
+ using package_names = vector<reference_wrapper<const package_name>>;
+ using system_map = map<string, package_names>;
+
+ system_map sys_map;
+ // Iterate in the reverse order as we will do for printing the action
+ // lines. This way a sys-install action line will be printed right
+ // before the bpkg action line of a package which appears first in the
+ // sys-install action's 'required by' list.
+ //
for (const build_package& p: reverse_iterate (pkgs))
{
- const shared_ptr<selected_package>& sp (p.selected);
+ if (const system_package_status* s = p.system_install ())
+ {
+ package_names& ps (sys_map[s->system_name]);
- string act;
+ if (find (ps.begin (), ps.end (), p.name ()) == ps.end ())
+ ps.push_back (p.name ());
+ }
+ }
+
+ // Start the transaction since we may query available packages for
+ // skeleton initializations.
+ //
+ transaction t (mdb);
+ bool first (true); // First entry in the plan.
+
+ // Print the bpkg package action lines.
+ //
+ // Also print the sys-install action lines for system/distribution
+ // packages which require installation by the system package manager.
+ // Print them before the respective system package action lines, but
+ // only once per (main) system/distribution package. For example:
+ //
+ // sys-install libssl1.1/1.1.1l (required by sys:libssl, sys:libcrypto)
+ // configure sys:libssl/1.1.1 (required by foo)
+ // configure sys:libcrypto/1.1.1 (required by bar)
+ //
+ for (auto i (pkgs.rbegin ()); i != pkgs.rend (); )
+ {
+ build_package& p (*i);
assert (p.action);
- if (*p.action == build_package::drop)
+ string act;
+
+ const system_package_status* s;
+ system_map::iterator j;
+
+ if ((s = p.system_install ()) != nullptr &&
+ (j = sys_map.find (s->system_name)) != sys_map.end ())
{
- act = "drop " + sp->string () + " (unused)";
+ act = "sys-install ";
+ act += s->system_name;
+ act += '/';
+ act += s->system_version;
+ act += " (required by ";
+
+ bool first (true);
+ for (const package_name& n: j->second)
+ {
+ if (first)
+ first = false;
+ else
+ act += ", ";
+
+ act += "sys:";
+ act += n.string ();
+ }
+
+ act += ')';
+
need_prompt = true;
+
+ // Make sure that we print this sys-install action just once.
+ //
+ sys_map.erase (j);
+
+ // Note that we don't increment i in order to re-iterate this pkgs
+ // entry.
}
else
{
- string cause;
- if (*p.action == build_package::adjust)
- {
- assert (sp != nullptr && (p.reconfigure () || p.unhold ()));
+ ++i;
- // This is a dependent needing reconfiguration.
- //
- // This is an implicit reconfiguration which requires the plan to
- // be printed. Will flag that later when composing the list of
- // prerequisites.
- //
- if (p.reconfigure ())
- {
- act = "reconfigure";
- cause = "dependent of";
-
- if (!o.configure_only ())
- update_dependents = true;
- }
-
- // This is a held package needing unhold.
- //
- if (p.unhold ())
- {
- if (act.empty ())
- act = "unhold";
- else
- act += "/unhold";
- }
+ database& pdb (p.db);
+ const shared_ptr<selected_package>& sp (p.selected);
- act += ' ' + sp->name.string ();
+ if (*p.action == build_package::drop)
+ {
+ act = "drop " + sp->string (pdb) + " (unused)";
+ need_prompt = true;
}
else
{
- // Even if we already have this package selected, we have to
- // make sure it is configured and updated.
+ // Print configuration variables.
//
- if (sp == nullptr)
- act = p.system ? "configure" : "new";
- else if (sp->version == p.available_version ())
+ // The idea here is to only print configuration for those packages
+ // for which we call pkg_configure*() in execute_plan().
+ //
+ package_skeleton* cfg (nullptr);
+
+ string cause;
+ if (*p.action == build_package::adjust)
{
- // If this package is already configured and is not part of the
- // user selection (or we are only configuring), then there is
- // nothing we will be explicitly doing with it (it might still
- // get updated indirectly as part of the user selection update).
+ assert (sp != nullptr && (p.reconfigure () || p.unhold ()));
+
+ // This is a dependent needing reconfiguration.
//
- if (!p.reconfigure () &&
- sp->state == package_state::configured &&
- (!p.user_selection () || o.configure_only ()))
- continue;
+ // This is an implicit reconfiguration which requires the plan
+ // to be printed. Will flag that later when composing the list
+ // of prerequisites.
+ //
+ if (p.reconfigure ())
+ {
+ act = "reconfigure";
+ cause = "dependent of";
- act = p.system
- ? "reconfigure"
- : (p.reconfigure ()
- ? (o.configure_only ()
- ? "reconfigure"
- : "reconfigure/update")
- : "update");
+ if (!o.configure_only ())
+ update_dependents = true;
+ }
+
+ // This is a held package needing unhold.
+ //
+ if (p.unhold ())
+ {
+ if (act.empty ())
+ act = "unhold";
+ else
+ act += "/unhold";
+ }
+
+ act += ' ' + sp->name.string ();
+
+ const string& s (pdb.string);
+ if (!s.empty ())
+ act += ' ' + s;
+
+ // This is an adjustment and so there is no available package
+ // specified for the build package object and thus the skeleton
+ // cannot be present.
+ //
+ assert (p.available == nullptr && !p.skeleton);
+
+ // We shouldn't be printing configurations for plain unholds.
+ //
+ if (p.reconfigure ())
+ {
+ // Since there is no available package specified we need to
+ // find it (or create a transient one).
+ //
+ cfg = &p.init_skeleton (o,
+ true /* load_old_dependent_config */,
+ find_available (o, pdb, sp));
+ }
}
else
{
- act = p.system
- ? "reconfigure"
- : sp->version < p.available_version ()
- ? "upgrade"
- : "downgrade";
+ assert (p.available != nullptr); // This is a package build.
- need_prompt = true;
- }
+ bool replace (p.replace ());
- if (p.unhold ())
- act += "/unhold";
+ // Even if we already have this package selected, we have to
+ // make sure it is configured and updated.
+ //
+ if (sp == nullptr)
+ {
+ act = p.system ? "configure" : "new";
- act += ' ' + p.available_name_version ();
- cause = "required by";
- }
+ // For a new non-system package the skeleton must already be
+ // initialized.
+ //
+ assert (p.system || p.skeleton.has_value ());
- string rb;
- if (!p.user_selection ())
- {
- for (const package_name& n: p.required_by)
- rb += ' ' + n.string ();
+ // Initialize the skeleton if it is not initialized yet.
+ //
+ cfg = &(p.skeleton ? *p.skeleton : p.init_skeleton (o));
+ }
+ else if (sp->version == p.available_version ())
+ {
+ // If this package is already configured and is not part of
+ // the user selection (or we are only configuring), then there
+ // is nothing we will be explicitly doing with it (it might
+ // still get updated indirectly as part of the user selection
+ // update).
+ //
+ if (!p.reconfigure () &&
+ sp->state == package_state::configured &&
+ (!p.user_selection () ||
+ o.configure_only () ||
+ p.configure_only ()))
+ continue;
+
+ act = p.system
+ ? "reconfigure"
+ : (p.reconfigure ()
+ ? (o.configure_only () || p.configure_only ()
+ ? (replace ? "replace" : "reconfigure")
+ : (replace ? "replace/update" : "reconfigure/update"))
+ : "update");
- // If not user-selected, then there should be another (implicit)
- // reason for the action.
+ if (p.reconfigure ())
+ {
+ // Initialize the skeleton if it is not initialized yet.
+ //
+ cfg = &(p.skeleton ? *p.skeleton : p.init_skeleton (o));
+ }
+ }
+ else
+ {
+ act += p.system
+ ? "reconfigure"
+ : (sp->version < p.available_version ()
+ ? (replace ? "replace/upgrade" : "upgrade")
+ : (replace ? "replace/downgrade" : "downgrade"));
+
+ // For a non-system package up/downgrade the skeleton must
+ // already be initialized.
+ //
+ assert (p.system || p.skeleton.has_value ());
+
+ // Initialize the skeleton if it is not initialized yet.
+ //
+ cfg = &(p.skeleton ? *p.skeleton : p.init_skeleton (o));
+
+ need_prompt = true;
+ }
+
+ if (p.unhold ())
+ act += "/unhold";
+
+ act += ' ' + p.available_name_version_db ();
+ cause = p.required_by_dependents ? "required by" : "dependent of";
+
+ if (p.configure_only ())
+ update_dependents = true;
+ }
+
+ // Also list dependents for the newly built user-selected
+ // dependencies.
//
- assert (!rb.empty ());
+ bool us (p.user_selection ());
+ string rb;
+ if (!us || (!p.user_selection (hold_pkgs) && sp == nullptr))
+ {
+ // Note: if we are ever tempted to truncate this, watch out for
+ // the --rebuild-checksum functionality which uses this. But
+ // then it's not clear this information is actually important:
+ // can a dependent-dependency structure change without any of
+ // the package versions changing? Doesn't feel like it should.
+ //
+ for (const package_version_key& pvk: p.required_by)
+ {
+ // Skip the command-line, etc dependents and don't print the
+ // package version (which is not always available; see
+ // build_package::required_by for details).
+ //
+ if (pvk.version) // Is it a real package?
+ {
+ rb += (rb.empty () ? " " : ", ") +
+ pvk.string (true /* ignore_version */);
+ }
+ }
- need_prompt = true;
- }
+ // If not user-selected, then there should be another (implicit)
+ // reason for the action.
+ //
+ assert (!rb.empty ());
+ }
+
+ if (!rb.empty ())
+ act += " (" + cause + rb + ')';
- if (!rb.empty ())
- act += " (" + cause + rb + ')';
+ if (cfg != nullptr && !cfg->empty_print ())
+ {
+ ostringstream os;
+ cfg->print_config (os, o.print_only () ? " " : " ");
+ act += '\n';
+ act += os.str ();
+ }
+
+ if (!us)
+ need_prompt = true;
+ }
}
if (first)
@@ -4441,7 +7423,20 @@ namespace bpkg
// Print indented for better visual separation.
//
plan += (plan.empty () ? " " : "\n ") + act;
+
+ if (o.rebuild_checksum_specified ())
+ csum.append (act);
}
+
+ t.commit ();
+ }
+
+ if (o.rebuild_checksum_specified ())
+ {
+ cout << csum.string () << endl;
+
+ if (o.rebuild_checksum () == csum.string ())
+ return o.noop_exit_specified () ? o.noop_exit () : 0;
}
if (o.print_only ())
@@ -4466,13 +7461,14 @@ namespace bpkg
// Ok, we have "all systems go". The overall action plan is as follows.
//
- // 1. disfigure up/down-graded, reconfigured [left to right]
- // 2. purge up/down-graded [right to left]
- // 3.a fetch/unpack new, up/down-graded
- // 3.b checkout new, up/down-graded
- // 4. configure all
- // 5. unhold unheld
- // 6. build user selection [right to left]
+ // 1. sys-install not installed system/distribution
+ // 2. disfigure up/down-graded, reconfigured [left to right]
+ // 3. purge up/down-graded [right to left]
+ // 4.a fetch/unpack new, up/down-graded, replaced
+ // 4.b checkout new, up/down-graded, replaced
+ // 5. configure all
+ // 6. unhold unheld
+ // 7. build user selection [right to left]
//
// Note that for some actions, e.g., purge or fetch, the order is not
// really important. We will, however, do it right to left since that
@@ -4486,10 +7482,13 @@ namespace bpkg
// prerequsites got upgraded/downgraded and that the user may want to in
// addition update (that update_dependents flag above).
//
- execute_plan (o, c, db, pkgs, false /* simulate */);
+ bool noop (!execute_plan (o,
+ pkgs,
+ nullptr /* simulate */,
+ find_prereq_database));
if (o.configure_only ())
- return 0;
+ return noop && o.noop_exit_specified () ? o.noop_exit () : 0;
// update
//
@@ -4500,18 +7499,25 @@ namespace bpkg
// First add the user selection.
//
+ // Only update user-selected packages which are specified on the command
+ // line as build to hold. Note that the dependency package will be updated
+ // implicitly via their dependents, if the latter are updated.
+ //
for (const build_package& p: reverse_iterate (pkgs))
{
assert (p.action);
- if (*p.action != build_package::build)
+ if (*p.action != build_package::build || p.configure_only ())
continue;
+ database& db (p.db);
const shared_ptr<selected_package>& sp (p.selected);
if (!sp->system () && // System package doesn't need update.
- p.user_selection ())
- upkgs.push_back (pkg_command_vars {sp,
+ p.user_selection (hold_pkgs))
+ upkgs.push_back (pkg_command_vars {db.config_orig,
+ !multi_config () && db.main (),
+ sp,
strings () /* vars */,
false /* cwd */});
}
@@ -4525,97 +7531,243 @@ namespace bpkg
{
assert (p.action);
- if (*p.action == build_package::adjust && p.reconfigure ())
- upkgs.push_back (pkg_command_vars {p.selected,
+ database& db (p.db);
+
+ // Note: don't update the re-evaluated and re-collected dependents
+ // unless they are reconfigured.
+ //
+ if ((*p.action == build_package::adjust && p.reconfigure ()) ||
+ (*p.action == build_package::build &&
+ ((p.flags & build_package::build_repoint) != 0 ||
+ ((p.flags & (build_package::build_reevaluate |
+ build_package::build_recollect)) != 0 &&
+ p.reconfigure ()))))
+ upkgs.push_back (pkg_command_vars {db.config_orig,
+ !multi_config () && db.main (),
+ p.selected,
strings () /* vars */,
false /* cwd */});
}
}
- pkg_update (c, o, o.for_ (), strings (), upkgs);
+ pkg_update (o, o.for_ (), strings (), upkgs);
if (verb && !o.no_result ())
{
for (const pkg_command_vars& pv: upkgs)
- text << "updated " << *pv.pkg;
+ text << "updated " << pv.string ();
}
return 0;
}
- static void
+ static bool
execute_plan (const pkg_build_options& o,
- const dir_path& c,
- database& db,
build_package_list& build_pkgs,
- bool simulate)
+ unsatisfied_dependents* simulate,
+ const function<find_database_function>& fdb)
{
tracer trace ("execute_plan");
l4 ([&]{trace << "simulate: " << (simulate ? "yes" : "no");});
- uint16_t verbose (!simulate ? verb : 0);
+ // If unsatisfied dependents are specified then we are in the simulation
+ // mode and thus simulate can be used as bool.
+
+ bool r (false);
+ uint16_t verb (!simulate ? bpkg::verb : 0);
+
+ bool result (verb && !o.no_result ());
+ bool progress (!result &&
+ ((verb == 1 && !o.no_progress () && stderr_term) ||
+ o.progress ()));
+
+ size_t prog_i, prog_n, prog_percent;
+
+ // sys-install
+ //
+ // Install the system/distribution packages required by the respective
+ // system packages (see build_package::system_install() for details).
+ //
+ if (!simulate && o.sys_install ())
+ {
+ // Collect the names of all the system packages being managed by the
+ // system package manager (as opposed to user/fallback), suppressing
+ // duplicates.
+ //
+ vector<package_name> ps;
+
+ for (build_package& p: build_pkgs)
+ {
+ if (p.system_status () &&
+ find (ps.begin (), ps.end (), p.name ()) == ps.end ())
+ {
+ ps.push_back (p.name ());
+ }
+ }
+
+ // Install the system/distribution packages.
+ //
+ if (!ps.empty ())
+ {
+ // Otherwise, we wouldn't get any package statuses.
+ //
+ assert (sys_pkg_mgr && *sys_pkg_mgr != nullptr);
+
+ (*sys_pkg_mgr)->install (ps);
+ }
+ }
// disfigure
//
- for (build_package& p: build_pkgs)
+ // Note: similar code in pkg-drop.
+ //
+ auto disfigure_pred = [] (const build_package& p)
{
// We are only interested in configured packages that are either being
// up/down-graded, need reconfiguration (e.g., dependents), or dropped.
//
+ if (*p.action != build_package::drop && !p.reconfigure ())
+ return false;
+
+ return true;
+ };
+
+ if (progress)
+ {
+ prog_i = 0;
+ prog_n = static_cast<size_t> (count_if (build_pkgs.begin (),
+ build_pkgs.end (),
+ disfigure_pred));
+ prog_percent = 100;
+ }
+
+ // On the package reconfiguration we will try to resolve dependencies to
+ // the same prerequisites (see pkg_configure() for details). For that, we
+ // will save prerequisites before disfiguring a package. Note, though,
+ // that this is not required for the recursively collected packages since
+ // the dependency alternatives are already selected for them.
+ //
+ map<const build_package*, vector<package_name>> previous_prerequisites;
+
+ for (build_package& p: build_pkgs)
+ {
assert (p.action);
- if (*p.action != build_package::drop && !p.reconfigure ())
+ if (!disfigure_pred (p))
continue;
+ database& pdb (p.db);
shared_ptr<selected_package>& sp (p.selected);
+ assert (sp != nullptr); // Shouldn't be here otherwise.
+
// Each package is disfigured in its own transaction, so that we
// always leave the configuration in a valid state.
//
- transaction t (db, !simulate /* start */);
+ transaction t (pdb, !simulate /* start */);
- // Reset the flag if the package being unpacked is not an external one.
+ // Figure out if an external package is being replaced with another
+ // external.
//
- if (p.keep_out && !simulate)
+ bool external (false);
+ if (!simulate)
{
- const shared_ptr<available_package>& ap (p.available);
- const package_location& pl (ap->locations[0]);
+ external = (sp->external () && p.external ());
- if (pl.repository_fragment.object_id () == "") // Special root.
- p.keep_out = !exists (pl.location); // Directory case.
- else
- {
+ // Reset the keep_out flag if the package being unpacked is not
+ // external.
+ //
+ if (p.keep_out && !external)
p.keep_out = false;
+ }
- // See if the package comes from the directory-based repository, and
- // so is external.
- //
- // Note that such repository fragments are always preferred over
- // others (see below).
+ // Save prerequisites before disfiguring the package.
+ //
+ // Note that we add the prerequisites list to the map regardless if
+ // there are any prerequisites or not to, in particular, indicate the
+ // package reconfiguration mode to the subsequent
+ // pkg_configure_prerequisites() call (see the function documentation
+ // for details).
+ //
+ if (*p.action != build_package::drop && !p.dependencies && !p.system)
+ {
+ vector<package_name>& ps (previous_prerequisites[&p]);
+
+ if (!sp->prerequisites.empty ())
+ {
+ ps.reserve (sp->prerequisites.size ());
+
+ for (const auto& pp: sp->prerequisites)
+ ps.push_back (pp.first.object_id ());
+ }
+ }
+
+ // For an external package being replaced with another external, keep
+ // the configuration unless requested not to with --disfigure.
+ //
+ bool disfigure (p.disfigure || !external);
+
+ // If the skeleton was not initialized yet (this is an existing package
+ // reconfiguration and no configuration was printed as a part of the
+ // plan, etc), then initialize it now. Whether the skeleton is newly
+ // initialized or not, make sure that the current configuration is
+ // loaded, unless the package project is not being disfigured.
+ //
+ if (*p.action != build_package::drop && !p.system)
+ {
+ if (!p.skeleton)
+ {
+ // If there is no available package specified for the build package
+ // object, then we need to find it (or create a transient one).
//
- for (const package_location& l: ap->locations)
- {
- if (l.repository_fragment.load ()->location.directory_based ())
- {
- p.keep_out = true;
- break;
- }
- }
+ p.init_skeleton (o,
+ true /* load_old_dependent_config */,
+ (p.available == nullptr
+ ? find_available (o, pdb, sp)
+ : nullptr));
}
+
+ if (disfigure)
+ p.skeleton->load_old_config ();
}
// Commits the transaction.
//
- pkg_disfigure (c, o, t, sp, !p.keep_out, simulate);
+ pkg_disfigure (o, pdb, t,
+ sp,
+ !p.keep_out /* clean */,
+ disfigure,
+ simulate);
+
+ r = true;
assert (sp->state == package_state::unpacked ||
sp->state == package_state::transient);
- if (verbose && !o.no_result ())
- text << (sp->state == package_state::transient
- ? "purged "
- : "disfigured ") << *sp;
+ if (result || progress)
+ {
+ const char* what (sp->state == package_state::transient
+ ? "purged"
+ : "disfigured");
+ if (result)
+ text << what << ' ' << *sp << pdb;
+ else if (progress)
+ {
+ size_t p ((++prog_i * 100) / prog_n);
+
+ if (prog_percent != p)
+ {
+ prog_percent = p;
+
+ diag_progress_lock pl;
+ diag_progress = ' ';
+ diag_progress += to_string (p);
+ diag_progress += "% of packages ";
+ diag_progress += what;
+ }
+ }
+ }
// Selected system package is now gone from the database. Before we drop
// the object we need to make sure the hold state is preserved in the
@@ -4633,14 +7785,26 @@ namespace bpkg
}
}
+ // Clear the progress if shown.
+ //
+ if (progress)
+ {
+ diag_progress_lock pl;
+ diag_progress.clear ();
+ }
+
// purge, fetch/unpack|checkout
//
+ pkg_checkout_cache checkout_cache (o);
for (build_package& p: reverse_iterate (build_pkgs))
{
assert (p.action);
+ database& pdb (p.db);
+
shared_ptr<selected_package>& sp (p.selected);
const shared_ptr<available_package>& ap (p.available);
+ const lazy_shared_ptr<repository_fragment>& af (p.repository_fragment);
// Purge the dropped or system package, fetch/unpack or checkout the
// other one.
@@ -4654,11 +7818,15 @@ namespace bpkg
//
if (sp != nullptr)
{
- transaction t (db, !simulate /* start */);
- pkg_purge (c, t, sp, simulate); // Commits the transaction.
+ assert (!sp->system ());
- if (verbose && !o.no_result ())
- text << "purged " << *sp;
+ transaction t (pdb, !simulate /* start */);
+ pkg_purge (pdb, t, sp, simulate); // Commits the transaction.
+
+ r = true;
+
+ if (result)
+ text << "purged " << *sp << pdb;
sp = nullptr;
}
@@ -4683,11 +7851,13 @@ namespace bpkg
{
if (sp != nullptr && !sp->system ())
{
- transaction t (db, !simulate /* start */);
- pkg_purge (c, t, sp, simulate); // Commits the transaction.
+ transaction t (pdb, !simulate /* start */);
+ pkg_purge (pdb, t, sp, simulate); // Commits the transaction.
+
+ r = true;
- if (verbose && !o.no_result ())
- text << "purged " << *sp;
+ if (result)
+ text << "purged " << *sp << pdb;
if (!p.hold_package)
p.hold_package = sp->hold_package;
@@ -4702,9 +7872,11 @@ namespace bpkg
}
// Fetch or checkout if this is a new package or if we are
- // up/down-grading.
+ // up/down-grading or replacing.
//
- if (sp == nullptr || sp->version != p.available_version ())
+ if (sp == nullptr ||
+ sp->version != p.available_version () ||
+ p.replace ())
{
sp = nullptr; // For the directory case below.
@@ -4714,7 +7886,7 @@ namespace bpkg
if (pl.repository_fragment.object_id () != "") // Special root?
{
- transaction t (db, !simulate /* start */);
+ transaction t (pdb, !simulate /* start */);
// Go through package repository fragments to decide if we should
// fetch, checkout or unpack depending on the available repository
@@ -4726,19 +7898,22 @@ namespace bpkg
for (const package_location& l: ap->locations)
{
- const repository_location& rl (
- l.repository_fragment.load ()->location);
-
- if (!basis || rl.local ()) // First or local?
+ if (!rep_masked_fragment (l.repository_fragment))
{
- basis = rl.basis ();
+ const repository_location& rl (
+ l.repository_fragment.load ()->location);
- if (rl.directory_based ())
- break;
+ if (!basis || rl.local ()) // First or local?
+ {
+ basis = rl.basis ();
+
+ if (rl.directory_based ())
+ break;
+ }
}
}
- assert (basis);
+ assert (basis); // Shouldn't be here otherwise.
// All calls commit the transaction.
//
@@ -4747,7 +7922,8 @@ namespace bpkg
case repository_basis::archive:
{
sp = pkg_fetch (o,
- c,
+ pdb,
+ af.database (),
t,
ap->id.name,
p.available_version (),
@@ -4758,28 +7934,33 @@ namespace bpkg
case repository_basis::version_control:
{
sp = p.checkout_root
- ? pkg_checkout (o,
- c,
- t,
- ap->id.name,
- p.available_version (),
- *p.checkout_root,
- true /* replace */,
- p.checkout_purge,
- simulate)
- : pkg_checkout (o,
- c,
- t,
- ap->id.name,
- p.available_version (),
- true /* replace */,
- simulate);
+ ? pkg_checkout (checkout_cache,
+ o,
+ pdb,
+ af.database (),
+ t,
+ ap->id.name,
+ p.available_version (),
+ *p.checkout_root,
+ true /* replace */,
+ p.checkout_purge,
+ simulate)
+ : pkg_checkout (checkout_cache,
+ o,
+ pdb,
+ af.database (),
+ t,
+ ap->id.name,
+ p.available_version (),
+ true /* replace */,
+ simulate);
break;
}
case repository_basis::directory:
{
sp = pkg_unpack (o,
- c,
+ pdb,
+ af.database (),
t,
ap->id.name,
p.available_version (),
@@ -4793,11 +7974,11 @@ namespace bpkg
//
else if (exists (pl.location))
{
- transaction t (db, !simulate /* start */);
+ transaction t (pdb, !simulate /* start */);
sp = pkg_fetch (
o,
- c,
+ pdb,
t,
pl.location, // Archive path.
true, // Replace
@@ -4807,10 +7988,12 @@ namespace bpkg
if (sp != nullptr) // Actually fetched or checked out something?
{
+ r = true;
+
assert (sp->state == package_state::fetched ||
sp->state == package_state::unpacked);
- if (verbose && !o.no_result ())
+ if (result)
{
const repository_location& rl (sp->repository_fragment);
@@ -4826,19 +8009,19 @@ namespace bpkg
case repository_basis::archive:
{
assert (sp->state == package_state::fetched);
- dr << "fetched " << *sp;
+ dr << "fetched " << *sp << pdb;
break;
}
case repository_basis::directory:
{
assert (sp->state == package_state::unpacked);
- dr << "using " << *sp << " (external)";
+ dr << "using " << *sp << pdb << " (external)";
break;
}
case repository_basis::version_control:
{
assert (sp->state == package_state::unpacked);
- dr << "checked out " << *sp;
+ dr << "checked out " << *sp << pdb;
break;
}
}
@@ -4853,98 +8036,489 @@ namespace bpkg
{
if (sp != nullptr)
{
- transaction t (db, !simulate /* start */);
+ transaction t (pdb, !simulate /* start */);
// Commits the transaction.
//
- sp = pkg_unpack (o, c, t, ap->id.name, simulate);
+ sp = pkg_unpack (o, pdb, t, ap->id.name, simulate);
- if (verbose && !o.no_result ())
- text << "unpacked " << *sp;
+ if (result)
+ text << "unpacked " << *sp << pdb;
}
else
{
const package_location& pl (ap->locations[0]);
assert (pl.repository_fragment.object_id () == ""); // Special root.
- transaction t (db, !simulate /* start */);
+ transaction t (pdb, !simulate /* start */);
sp = pkg_unpack (o,
- c,
+ pdb,
t,
path_cast<dir_path> (pl.location),
true, // Replace.
false, // Don't purge; commits the transaction.
simulate);
- if (verbose && !o.no_result ())
- text << "using " << *sp << " (external)";
+ if (result)
+ text << "using " << *sp << pdb << " (external)";
}
+ r = true;
+
assert (sp->state == package_state::unpacked);
}
break; // Get out from the breakout loop.
}
}
+ checkout_cache.clear (); // Detect errors.
// configure
//
+ auto configure_pred = [] (const build_package& p)
+ {
+ // Skip package drops.
+ //
+ if (*p.action == build_package::drop)
+ return false;
+
+ // We configure everything that isn't already configured.
+ //
+ if (p.selected != nullptr &&
+ p.selected->state == package_state::configured)
+ return false;
+
+ return true;
+ };
+
+ // On the first pass collect all the build_package's to be configured and
+ // calculate their configure_prerequisites_result's.
+ //
+ struct configure_package
+ {
+ reference_wrapper<build_package> pkg;
+
+ // These are unused for system packages.
+ //
+ configure_prerequisites_result res;
+ build2::variable_overrides ovrs;
+ };
+ vector<configure_package> configure_packages;
+ configure_packages.reserve (build_pkgs.size ());
+
+ // While at it also collect global configuration variable overrides from
+ // each configure_prerequisites_result::config_variables and merge them
+ // into configure_global_vars.
+ //
+ // @@ TODO: Note that the current global override semantics is quite
+ // broken in that we don't force reconfiguration of all the packages.
+ //
+#ifndef BPKG_OUTPROC_CONFIGURE
+ strings configure_global_vars;
+#endif
+
+ // Return the "would be" state of packages that would be configured
+ // by this stage.
+ //
+ function<find_package_state_function> configured_state (
+ [&configure_packages] (const shared_ptr<selected_package>& sp)
+ -> optional<pair<package_state, package_substate>>
+ {
+ for (const configure_package& cp: configure_packages)
+ {
+ const build_package& p (cp.pkg);
+
+ if (p.selected == sp)
+ return make_pair (
+ package_state::configured,
+ p.system ? package_substate::system : package_substate::none);
+ }
+
+ return nullopt;
+ });
+
for (build_package& p: reverse_iterate (build_pkgs))
{
assert (p.action);
+ if (!configure_pred (p))
+ continue;
+
shared_ptr<selected_package>& sp (p.selected);
const shared_ptr<available_package>& ap (p.available);
- if (*p.action == build_package::drop) // Skip package drops.
- continue;
-
- // Configure the package.
+ // Collect the package.
//
// At this stage the package is either selected, in which case it's a
// source code one, or just available, in which case it is a system
// one. Note that a system package gets selected as being configured.
//
+ // NOTE: remember to update the preparation of the plan to be presented
+ // to the user if changing anything here.
+ //
assert (sp != nullptr || p.system);
- // We configure everything that isn't already configured.
+ database& pdb (p.db);
+ transaction t (pdb, !simulate /* start */);
+
+ // Show how we got here if things go wrong, for example selecting a
+ // prerequisite is ambiguous due to the dependency package being
+ // configured in multiple linked configurations.
//
- if (sp != nullptr && sp->state == package_state::configured)
- continue;
+ auto g (
+ make_exception_guard (
+ [&p] ()
+ {
+ info << "while configuring " << p.name () << p.db;
+ }));
- transaction t (db, !simulate /* start */);
+ configure_prerequisites_result cpr;
+ if (p.system)
+ {
+ // We have no choice but to configure system packages on the first
+ // pass since otherwise there will be no selected package for
+ // pkg_configure_prerequisites() to find. Luckily they have no
+ // dependencies and so can be configured in any order. We will print
+ // their progress/result on the second pass in the proper order.
+ //
+ // Note: commits the transaction.
+ //
+ sp = pkg_configure_system (ap->id.name,
+ p.available_version (),
+ pdb,
+ t);
+ }
+ else
+ {
+ // Should only be called for packages whose prerequisites are saved.
+ //
+ auto prereqs = [&p, &previous_prerequisites] ()
+ {
+ auto i (previous_prerequisites.find (&p));
+ assert (i != previous_prerequisites.end ());
+ return &i->second;
+ };
+
+ // In the simulation mode unconstrain all the unsatisfactory
+ // dependencies, if any, while configuring the dependent (see
+ // build_packages::collect_dependents() for details).
+ //
+ // Note: must be called at most once.
+ //
+ auto unconstrain_deps = [simulate,
+ &p,
+ &trace,
+ deps = vector<package_key> ()] () mutable
+ {
+ if (simulate)
+ {
+ unsatisfied_dependent* ud (
+ simulate->find_dependent (package_key (p.db, p.name ())));
+
+ if (ud != nullptr)
+ {
+ assert (deps.empty ());
+
+ deps.reserve (ud->ignored_constraints.size ());
+
+ for (const auto& c: ud->ignored_constraints)
+ {
+ l5 ([&]{trace << "while configuring dependent " << p.name ()
+ << p.db << " in simulation mode unconstrain ("
+ << c.dependency << ' ' << c.constraint << ')';});
+
+ deps.emplace_back (c.dependency);
+ }
+ }
+ }
+
+ return !deps.empty () ? &deps : nullptr;
+ };
+
+ if (ap != nullptr)
+ {
+ assert (*p.action == build_package::build);
+
+ // If the package prerequisites builds are collected, then use the
+ // resulting package skeleton and the pre-selected dependency
+ // alternatives.
+ //
+ // Note that we may not collect the package prerequisites builds if
+ // the package is already configured but we still need to
+ // reconfigure it due, for example, to an upgrade of its dependency.
+ // In this case we pass to pkg_configure() the newly created package
+ // skeleton which contains the package configuration variables
+ // specified on the command line but (naturally) no reflection
+ // configuration variables. Note, however, that in this case
+ // pkg_configure() call will evaluate the reflect clauses itself and
+ // so the proper reflection variables will still end up in the
+ // package configuration.
+ //
+ // @@ Note that if we ever allow the user to override the
+ // alternative selection, this will break (and also if the user
+ // re-configures the package manually). Maybe that a good reason
+ // not to allow this? Or we could store this information in the
+ // database.
+ //
+ if (p.dependencies)
+ {
+ assert (p.skeleton);
+
+ cpr = pkg_configure_prerequisites (o,
+ pdb,
+ t,
+ *p.dependencies,
+ &*p.alternatives,
+ move (*p.skeleton),
+ nullptr /* prev_prerequisites */,
+ simulate,
+ fdb,
+ configured_state,
+ unconstrain_deps ());
+ }
+ else
+ {
+ assert (p.skeleton); // Must be initialized before disfiguring.
+
+ cpr = pkg_configure_prerequisites (o,
+ pdb,
+ t,
+ ap->dependencies,
+ nullptr /* alternatives */,
+ move (*p.skeleton),
+ prereqs (),
+ simulate,
+ fdb,
+ configured_state,
+ unconstrain_deps ());
+ }
+ }
+ else // Existing dependent.
+ {
+ // This is an adjustment of a dependent which cannot be system
+ // (otherwise it wouldn't be a dependent) and cannot become system
+ // (otherwise it would be a build).
+ //
+ assert (*p.action == build_package::adjust && !sp->system ());
+
+ // Must be in the unpacked state since it was disfigured on the
+ // first pass (see above).
+ //
+ assert (sp->state == package_state::unpacked);
+
+ // The skeleton must be initialized before disfiguring and the
+ // package can't be system.
+ //
+ assert (p.skeleton && p.skeleton->available != nullptr);
+
+ const dependencies& deps (p.skeleton->available->dependencies);
+
+ // @@ Note that on reconfiguration the dependent looses the
+ // potential configuration variables specified by the user on
+ // some previous build, which can be quite surprising. Should we
+ // store this information in the database?
+ //
+ // Note: this now works for external packages via package
+ // skeleton (which extracts user configuration).
+ //
+ cpr = pkg_configure_prerequisites (o,
+ pdb,
+ t,
+ deps,
+ nullptr /* alternatives */,
+ move (*p.skeleton),
+ prereqs (),
+ simulate,
+ fdb,
+ configured_state,
+ unconstrain_deps ());
+ }
- // Note that pkg_configure() commits the transaction.
+ t.commit ();
+
+ if (verb >= 5 && !simulate && !cpr.config_variables.empty ())
+ {
+ diag_record dr (trace);
+
+ dr << sp->name << pdb << " configuration variables:";
+
+ for (const string& cv: cpr.config_variables)
+ dr << "\n " << cv;
+ }
+
+ if (!simulate)
+ {
+#ifndef BPKG_OUTPROC_CONFIGURE
+ auto& gvs (configure_global_vars);
+
+ // Note that we keep global overrides in cpr.config_variables for
+ // diagnostics and skip them in var_override_function below.
+ //
+ for (const string& v: cpr.config_variables)
+ {
+ // Each package should have exactly the same set of global
+ // overrides by construction since we don't allow package-
+ // specific global overrides.
+ //
+ if (v[0] == '!')
+ {
+ if (find (gvs.begin (), gvs.end (), v) == gvs.end ())
+ gvs.push_back (v);
+ }
+ }
+#endif
+ // Add config.config.disfigure unless already disfigured (see the
+ // high-level pkg_configure() version for background).
+ //
+ if (ap == nullptr || !p.disfigure)
+ {
+ cpr.config_variables.push_back (
+ "config.config.disfigure='config." + sp->name.variable () + "**'");
+ }
+ }
+ }
+
+ configure_packages.push_back (configure_package {p, move (cpr), {}});
+ }
+
+ // Reuse the build state to avoid reloading the dependencies over and over
+ // again. This is a valid optimization since we are configuring in the
+ // dependency-dependent order.
+ //
+ unique_ptr<build2::context> configure_ctx;
+
+#ifndef BPKG_OUTPROC_CONFIGURE
+ if (!simulate)
+ {
+ using build2::context;
+ using build2::variable_override;
+
+ function<context::var_override_function> vof (
+ [&configure_packages] (context& ctx, size_t& i)
+ {
+ for (configure_package& cp: configure_packages)
+ {
+ for (const string& v: cp.res.config_variables)
+ {
+ if (v[0] == '!') // Skip global overrides (see above).
+ continue;
+
+ pair<char, variable_override> p (
+ ctx.parse_variable_override (v, i++, false /* buildspec */));
+
+ variable_override& vo (p.second);
+
+ // @@ TODO: put absolute scope overrides into global_vars.
+ //
+ assert (!(p.first == '!' || (vo.dir && vo.dir->absolute ())));
+
+ cp.ovrs.push_back (move (vo));
+ }
+ }
+ });
+
+ configure_ctx = pkg_configure_context (
+ o, move (configure_global_vars), vof);
+
+ // Only global in configure_global_vars.
//
- if (p.system)
- sp = pkg_configure_system (ap->id.name, p.available_version (), t);
- else if (ap != nullptr)
- pkg_configure (c, o, t, sp, ap->dependencies, p.config_vars, simulate);
- else // Dependent.
+ assert (configure_ctx->var_overrides.empty ());
+ }
+#endif
+
+ if (progress)
+ {
+ prog_i = 0;
+ prog_n = configure_packages.size ();
+ prog_percent = 100;
+ }
+
+ for (configure_package& cp: configure_packages)
+ {
+ build_package& p (cp.pkg);
+
+ const shared_ptr<selected_package>& sp (p.selected);
+
+ // Configure the package (system already configured).
+ //
+ // NOTE: remember to update the preparation of the plan to be presented
+ // to the user if changing anything here.
+ //
+ database& pdb (p.db);
+
+ if (!p.system)
{
- // Must be in the unpacked state since it was disfigured on the first
- // pass (see above).
+ const shared_ptr<available_package>& ap (p.available);
+
+ transaction t (pdb, !simulate /* start */);
+
+ // Show how we got here if things go wrong.
//
- assert (sp->state == package_state::unpacked);
-
- package_manifest m (
- pkg_verify (sp->effective_src_root (c),
- true /* ignore_unknown */,
- [&sp] (version& v) {v = sp->version;}));
-
- pkg_configure (c,
- o,
- t,
- sp,
- convert (move (m.dependencies)),
- p.config_vars,
- simulate);
+ auto g (
+ make_exception_guard (
+ [&p] ()
+ {
+ info << "while configuring " << p.name () << p.db;
+ }));
+
+ // Note that pkg_configure() commits the transaction.
+ //
+ if (ap != nullptr)
+ {
+ pkg_configure (o,
+ pdb,
+ t,
+ sp,
+ move (cp.res),
+ configure_ctx,
+ cp.ovrs,
+ simulate);
+ }
+ else // Dependent.
+ {
+ pkg_configure (o,
+ pdb,
+ t,
+ sp,
+ move (cp.res),
+ configure_ctx,
+ cp.ovrs,
+ simulate);
+ }
}
+ r = true;
+
assert (sp->state == package_state::configured);
- if (verbose && !o.no_result ())
- text << "configured " << *sp;
+ if (result)
+ text << "configured " << *sp << pdb;
+ else if (progress)
+ {
+ size_t p ((++prog_i * 100) / prog_n);
+
+ if (prog_percent != p)
+ {
+ prog_percent = p;
+
+ diag_progress_lock pl;
+ diag_progress = ' ';
+ diag_progress += to_string (p);
+ diag_progress += "% of packages configured";
+ }
+ }
+ }
+
+#ifndef BPKG_OUTPROC_CONFIGURE
+ configure_ctx.reset (); // Free.
+#endif
+
+ // Clear the progress if shown.
+ //
+ if (progress)
+ {
+ diag_progress_lock pl;
+ diag_progress.clear ();
}
// Update the hold state.
@@ -4959,6 +8533,8 @@ namespace bpkg
if (*p.action == build_package::drop)
continue;
+ database& pdb (p.db);
+
const shared_ptr<selected_package>& sp (p.selected);
assert (sp != nullptr);
@@ -4979,19 +8555,23 @@ namespace bpkg
sp->hold_package = hp;
sp->hold_version = hv;
- transaction t (db, !simulate /* start */);
- db.update (sp);
+ transaction t (pdb, !simulate /* start */);
+ pdb.update (sp);
t.commit ();
- if (verbose > 1)
+ r = true;
+
+ if (verb > 1)
{
if (hp)
- text << "holding package " << sp->name;
+ text << "holding package " << sp->name << pdb;
if (hv)
- text << "holding version " << *sp;
+ text << "holding version " << *sp << pdb;
}
}
}
+
+ return r;
}
}
diff --git a/bpkg/pkg-checkout.cxx b/bpkg/pkg-checkout.cxx
index 3b99496..81efdc2 100644
--- a/bpkg/pkg-checkout.cxx
+++ b/bpkg/pkg-checkout.cxx
@@ -10,23 +10,26 @@
#include <bpkg/package-odb.hxx>
#include <bpkg/database.hxx>
#include <bpkg/checksum.hxx>
+#include <bpkg/rep-mask.hxx>
#include <bpkg/diagnostics.hxx>
#include <bpkg/manifest-utility.hxx>
#include <bpkg/pkg-purge.hxx>
#include <bpkg/pkg-verify.hxx>
-#include <bpkg/pkg-configure.hxx>
using namespace std;
using namespace butl;
namespace bpkg
{
+ // pkg_checkout()
+ //
static void
checkout (const common_options& o,
const repository_location& rl,
const dir_path& dir,
- const shared_ptr<available_package>& ap)
+ const shared_ptr<available_package>& ap,
+ database& db)
{
switch (rl.type ())
{
@@ -41,9 +44,9 @@ namespace bpkg
// Print the progress indicator to attribute the possible fetching
// progress.
//
- if (verb && !o.no_progress ())
+ if ((verb && !o.no_progress ()) || o.progress ())
text << "checking out "
- << package_string (ap->id.name, ap->version);
+ << package_string (ap->id.name, ap->version) << db;
git_checkout_submodules (o, rl, dir);
}
@@ -58,19 +61,23 @@ namespace bpkg
// For some platforms/repository types the working tree needs to be
// temporary "fixed up" for the build2 operations to work properly on it.
//
- static bool
+ static optional<bool>
fixup (const common_options& o,
const repository_location& rl,
const dir_path& dir,
- bool revert = false)
+ bool revert = false,
+ bool ie = false)
{
- bool r (false);
+ optional<bool> r;
switch (rl.type ())
{
case repository_type::git:
{
- r = git_fixup_worktree (o, dir, revert);
+ if (!revert && !ie)
+ git_verify_symlinks (o, dir);
+
+ r = git_fixup_worktree (o, dir, revert, ie);
break;
}
case repository_type::pkg:
@@ -83,8 +90,10 @@ namespace bpkg
// Return the selected package object which may replace the existing one.
//
static shared_ptr<selected_package>
- pkg_checkout (const common_options& o,
- dir_path c,
+ pkg_checkout (pkg_checkout_cache& cache,
+ const common_options& o,
+ database& pdb,
+ database& rdb,
transaction& t,
package_name n,
version v,
@@ -95,12 +104,13 @@ namespace bpkg
{
tracer trace ("pkg_checkout");
- database& db (t.database ());
- tracer_guard tg (db, trace);
+ tracer_guard tg (pdb, trace); // NOTE: sets tracer for the whole cluster.
+
+ const dir_path& c (pdb.config_orig);
// See if this package already exists in this configuration.
//
- shared_ptr<selected_package> p (db.find<selected_package> (n));
+ shared_ptr<selected_package> p (pdb.find<selected_package> (n));
if (p != nullptr)
{
@@ -121,13 +131,13 @@ namespace bpkg
}
}
- check_any_available (c, t);
+ check_any_available (rdb, t);
- // Note that here we compare including the revision (see pkg-fetch()
+ // Note that here we compare including the revision (see pkg_fetch()
// implementation for more details).
//
shared_ptr<available_package> ap (
- db.find<available_package> (available_package_id (n, v)));
+ rdb.find<available_package> (available_package_id (n, v)));
if (ap == nullptr)
fail << "package " << n << " " << v << " is not available";
@@ -139,14 +149,17 @@ namespace bpkg
for (const package_location& l: ap->locations)
{
- const repository_location& rl (l.repository_fragment.load ()->location);
-
- if (rl.version_control_based () && (pl == nullptr || rl.local ()))
+ if (!rep_masked_fragment (l.repository_fragment))
{
- pl = &l;
+ const repository_location& rl (l.repository_fragment.load ()->location);
+
+ if (rl.version_control_based () && (pl == nullptr || rl.local ()))
+ {
+ pl = &l;
- if (rl.local ())
- break;
+ if (rl.local ())
+ break;
+ }
}
}
@@ -161,8 +174,6 @@ namespace bpkg
const repository_location& rl (pl->repository_fragment->location);
auto_rmdir rmd;
- optional<string> mc;
-
const dir_path& ord (output_root ? *output_root : c);
dir_path d (ord / dir_path (n.string () + '-' + v.string ()));
@@ -173,10 +184,7 @@ namespace bpkg
// (or interruption) the user will need to run bpkg-rep-fetch to restore
// the missing repository.
//
- bool fs_changed (false);
-
if (!simulate)
- try
{
if (exists (d))
fail << "package directory " << d << " already exists";
@@ -185,45 +193,84 @@ namespace bpkg
// if the previous checkout have failed or been interrupted.
//
dir_path sd (repository_state (rl));
- dir_path rd (c / repos_dir / sd);
+ dir_path rd (rdb.config_orig / repos_dir / sd);
- if (!exists (rd))
- fail << "missing repository directory for package " << n << " " << v
- << " in configuration " << c <<
- info << "run 'bpkg rep-fetch' to repair";
+ // Use the temporary directory from the repository information source
+ // configuration, so that we can always move the repository into and out
+ // of it (note that if they appear on different filesystems that won't
+ // be possible).
+ //
+ auto ti (tmp_dirs.find (rdb.config_orig));
+ assert (ti != tmp_dirs.end ());
+ const dir_path& tdir (ti->second);
- // The repository temporary directory.
+ // Try to reuse the cached repository (moved to the temporary directory
+ // with some fragment checked out and fixed up).
//
- auto_rmdir rmt (temp_dir / sd);
- const dir_path& td (rmt.path);
+ pkg_checkout_cache::state_map& cm (cache.map_);
+ auto i (cm.find (rd));
+
+ if (i == cm.end () || i->second.rl.fragment () != rl.fragment ())
+ {
+ // Restore the repository if some different fragment is checked out.
+ //
+ if (i != cm.end ())
+ cache.erase (i);
+
+ // Checkout and cache the fragment.
+ //
+ if (!exists (rd))
+ fail << "missing repository directory for package " << n << " " << v
+ << " in its repository information configuration "
+ << rdb.config_orig <<
+ info << "run 'bpkg rep-fetch' to repair";
+
+ // The repository temporary directory.
+ //
+ auto_rmdir rmt (tdir / sd, !keep_tmp);
+
+ // Move the repository to the temporary directory.
+ //
+ {
+ const dir_path& td (rmt.path);
- if (exists (td))
- rm_r (td);
+ if (exists (td))
+ rm_r (td);
+
+ mv (rd, td);
+ }
+
+ // Pre-insert the incomplete repository entry into the cache and
+ // "finalize" it by setting the fixed up value later, after the
+ // repository fragment checkout succeeds. Until then the repository
+ // may not be restored in its permanent place.
+ //
+ using state = pkg_checkout_cache::state;
+
+ i = cm.emplace (rd, state {move (rmt), rl, nullopt}).first;
+
+ // Checkout the repository fragment and fix up the working tree.
+ //
+ state& s (i->second);
+ const dir_path& td (s.rmt.path);
+
+ checkout (o, rl, td, ap, pdb);
+ s.fixedup = fixup (o, rl, td);
+ }
// The temporary out of source directory that is required for the dist
// meta-operation.
//
- auto_rmdir rmo (temp_dir / dir_path (n.string ()));
+ auto_rmdir rmo (tdir / dir_path (n.string ()), !keep_tmp);
const dir_path& od (rmo.path);
if (exists (od))
rm_r (od);
- // Finally, move the repository to the temporary directory and proceed
- // with the checkout.
- //
- mv (rd, td);
- fs_changed = true;
-
- // Checkout the repository fragment and fix up the working tree.
- //
- checkout (o, rl, td, ap);
- bool fixedup (fixup (o, rl, td));
-
// Calculate the package path that points into the checked out fragment
// directory.
//
- dir_path pd (td / path_cast<dir_path> (pl->location));
+ dir_path pd (i->second.rmt.path / path_cast<dir_path> (pl->location));
// Form the buildspec.
//
@@ -254,45 +301,18 @@ namespace bpkg
// of our dependencies.
//
- // At verbosity level 1 we want our (nicer) progress header but the
- // build system's actual progress.
+ // If the verbosity level is less than 2, then we want our (nicer)
+ // progress header but the build system's actual progress.
//
- if (verb == 1 && !o.no_progress ())
- text << "distributing " << n << '/' << v;
+ if ((verb == 1 && !o.no_progress ()) || (verb == 0 && o.progress ()))
+ text << "distributing " << n << '/' << v << pdb;
run_b (o,
verb_b::progress,
"--no-external-modules",
"!config.dist.bootstrap=true",
- "config.dist.root='" + ord.representation () + "'",
+ "config.dist.root='" + ord.representation () + '\'',
bspec);
-
- // Revert the fix-ups.
- //
- if (fixedup)
- fixup (o, rl, td, true /* revert */);
-
- // Manipulations over the repository are now complete, so we can return
- // it to its permanent location.
- //
- mv (td, rd);
- fs_changed = false;
-
- rmt.cancel ();
-
- mc = sha256 (o, d / manifest_file);
- }
- catch (const failed&)
- {
- if (fs_changed)
- {
- // We assume that the diagnostics has already been issued.
- //
- warn << "repository state is now broken" <<
- info << "run 'bpkg rep-fetch' to repair";
- }
-
- throw;
}
if (p != nullptr)
@@ -301,7 +321,7 @@ namespace bpkg
// replacing. Once this is done, there is no going back. If things go
// badly, we can't simply abort the transaction.
//
- pkg_purge_fs (c, t, p, simulate);
+ pkg_purge_fs (pdb, t, p, simulate);
// Note that if the package name spelling changed then we need to update
// it, to make sure that the subsequent commands don't fail and the
@@ -310,31 +330,34 @@ namespace bpkg
//
if (p->name.string () != n.string ())
{
- db.erase (p);
+ pdb.erase (p);
p = nullptr;
}
}
- // Make the package and configuration paths absolute and normalized.
- // If the package is inside the configuration, use the relative path.
- // This way we can move the configuration around.
+ // Make the package path absolute and normalized. If the package is inside
+ // the configuration, use the relative path. This way we can move the
+ // configuration around.
//
- normalize (c, "configuration");
normalize (d, "package");
- if (d.sub (c))
- d = d.leaf (c);
+ if (d.sub (pdb.config))
+ d = d.leaf (pdb.config);
if (p != nullptr)
{
+ // Note: we can be replacing an external package and thus we reset the
+ // manifest/subprojects and buildfiles checksums.
+ //
p->version = move (v);
p->state = package_state::unpacked;
p->repository_fragment = rl;
p->src_root = move (d);
p->purge_src = purge;
- p->manifest_checksum = move (mc);
+ p->manifest_checksum = nullopt;
+ p->buildfiles_checksum = nullopt;
- db.update (p);
+ pdb.update (p);
}
else
{
@@ -352,11 +375,12 @@ namespace bpkg
false,
move (d), // Source root.
purge, // Purge directory.
- move (mc),
+ nullopt, // No manifest/subprojects checksum.
+ nullopt, // No buildfiles checksum.
nullopt, // No output directory yet.
{}}); // No prerequisites captured yet.
- db.persist (p);
+ pdb.persist (p);
}
t.commit ();
@@ -366,8 +390,10 @@ namespace bpkg
}
shared_ptr<selected_package>
- pkg_checkout (const common_options& o,
- const dir_path& c,
+ pkg_checkout (pkg_checkout_cache& cache,
+ const common_options& o,
+ database& pdb,
+ database& rdb,
transaction& t,
package_name n,
version v,
@@ -376,8 +402,10 @@ namespace bpkg
bool purge,
bool simulate)
{
- return pkg_checkout (o,
- c,
+ return pkg_checkout (cache,
+ o,
+ pdb,
+ rdb,
t,
move (n),
move (v),
@@ -388,16 +416,20 @@ namespace bpkg
}
shared_ptr<selected_package>
- pkg_checkout (const common_options& o,
- const dir_path& c,
+ pkg_checkout (pkg_checkout_cache& cache,
+ const common_options& o,
+ database& pdb,
+ database& rdb,
transaction& t,
package_name n,
version v,
bool replace,
bool simulate)
{
- return pkg_checkout (o,
- c,
+ return pkg_checkout (cache,
+ o,
+ pdb,
+ rdb,
t,
move (n),
move (v),
@@ -415,7 +447,7 @@ namespace bpkg
dir_path c (o.directory ());
l4 ([&]{trace << "configuration: " << c;});
- database db (open (c, trace));
+ database db (c, trace, true /* pre_attach */);
transaction t (db);
session s;
@@ -433,11 +465,15 @@ namespace bpkg
fail << "package version expected" <<
info << "run 'bpkg help pkg-checkout' for more information";
+ pkg_checkout_cache checkout_cache (o);
+
// Commits the transaction.
//
if (o.output_root_specified ())
- p = pkg_checkout (o,
- c,
+ p = pkg_checkout (checkout_cache,
+ o,
+ db /* pdb */,
+ db /* rdb */,
t,
move (n),
move (v),
@@ -446,17 +482,84 @@ namespace bpkg
o.output_purge (),
false /* simulate */);
else
- p = pkg_checkout (o,
- c,
+ p = pkg_checkout (checkout_cache,
+ o,
+ db /* pdb */,
+ db /* rdb */,
t,
move (n),
move (v),
o.replace (),
false /* simulate */);
+ checkout_cache.clear (); // Detect errors.
+
if (verb && !o.no_result ())
text << "checked out " << *p;
return 0;
}
+
+ // pkg_checkout_cache
+ //
+ pkg_checkout_cache::
+ ~pkg_checkout_cache ()
+ {
+ if (!map_.empty () && !clear (true /* ignore_errors */))
+ {
+ // We assume that the diagnostics has already been issued.
+ //
+ warn << "repository state is now broken" <<
+ info << "run 'bpkg rep-fetch' to repair";
+ }
+ }
+
+ bool pkg_checkout_cache::
+ clear (bool ie)
+ {
+ while (!map_.empty ())
+ {
+ if (!erase (map_.begin (), ie))
+ return false;
+ }
+
+ return true;
+ }
+
+ bool pkg_checkout_cache::
+ erase (state_map::iterator i, bool ie)
+ {
+ state& s (i->second);
+
+ // Bail out if the entry is incomplete.
+ //
+ if (!s.fixedup)
+ {
+ assert (ie); // Only makes sense in the ignore errors mode.
+ return false;
+ }
+
+ // Revert the fix-ups.
+ //
+ // But first make the entry incomplete, so on error we don't try to
+ // restore the partially restored repository later.
+ //
+ bool f (*s.fixedup);
+
+ s.fixedup = nullopt;
+
+ if (f && !fixup (options_, s.rl, s.rmt.path, true /* revert */, ie))
+ return false;
+
+ // Manipulations over the repository are now complete, so we can return it
+ // to the permanent location.
+ //
+ if (!mv (s.rmt.path, i->first, ie))
+ return false;
+
+ s.rmt.cancel ();
+
+ map_.erase (i);
+ return true;
+ }
}
diff --git a/bpkg/pkg-checkout.hxx b/bpkg/pkg-checkout.hxx
index 47b1ad0..b775b07 100644
--- a/bpkg/pkg-checkout.hxx
+++ b/bpkg/pkg-checkout.hxx
@@ -4,6 +4,8 @@
#ifndef BPKG_PKG_CHECKOUT_HXX
#define BPKG_PKG_CHECKOUT_HXX
+#include <map>
+
#include <libbpkg/manifest.hxx> // version
#include <libbpkg/package-name.hxx>
@@ -18,14 +20,76 @@ namespace bpkg
int
pkg_checkout (const pkg_checkout_options&, cli::scanner& args);
+ // Checked out repository fragments cache.
+ //
+ // Needs to be passed to pkg_checkout() calls.
+ //
+ class pkg_checkout_cache
+ {
+ public:
+ // The options reference is assumed to be valid till the end of the cache
+ // object lifetime.
+ //
+ pkg_checkout_cache (const common_options& o): options_ (o) {}
+
+ // Restore the cached repositories in their permanent locations (move back
+ // from the temporary directory, fixup, etc) and erase the entries.
+ //
+ // Note that the destructor will clear the cache but will ignore any
+ // errors. To detect such errors, call clear() explicitly.
+ //
+ bool
+ clear (bool ignore_errors = false);
+
+ // Call clear() in the ignore errors mode and issue the "repository is now
+ // broken" warning on failure.
+ //
+ ~pkg_checkout_cache ();
+
+ // pkg_checkout () implementation details.
+ //
+ public:
+ struct state
+ {
+ auto_rmdir rmt; // The repository temporary directory.
+ repository_location rl; // The repository location.
+
+ // nullopt if the repository fragment checkout failed in the middle and
+ // the repository cannot be restored in its permanent location (we will
+ // call such entry incomplete). True if the repository directory was
+ // fixed up.
+ //
+ optional<bool> fixedup;
+ };
+
+ using state_map = std::map<dir_path, state>;
+
+ state_map map_;
+ const common_options& options_;
+
+ // Restore the repository in its permanent location and erase the cache
+ // entry. On error issue diagnostics and return false in the ignore errors
+ // mode and throw failed otherwise. Note that erasing an incomplete entry
+ // is an error.
+ //
+ bool
+ erase (state_map::iterator, bool ignore_errors = false);
+ };
+
+ // Note that for the following functions both package and repository
+ // information configurations need to be passed.
+ //
+
// Check out the package from a version control-based repository into a
// directory other than the configuration directory and commit the
// transaction. Return the selected package object which may replace the
// existing one.
//
shared_ptr<selected_package>
- pkg_checkout (const common_options&,
- const dir_path& configuration,
+ pkg_checkout (pkg_checkout_cache&,
+ const common_options&,
+ database& pdb,
+ database& rdb,
transaction&,
package_name,
version,
@@ -39,8 +103,10 @@ namespace bpkg
// existing one.
//
shared_ptr<selected_package>
- pkg_checkout (const common_options&,
- const dir_path& configuration,
+ pkg_checkout (pkg_checkout_cache&,
+ const common_options&,
+ database& pdb,
+ database& rdb,
transaction&,
package_name,
version,
diff --git a/bpkg/pkg-clean.hxx b/bpkg/pkg-clean.hxx
index 07dced9..e6dfcbe 100644
--- a/bpkg/pkg-clean.hxx
+++ b/bpkg/pkg-clean.hxx
@@ -23,6 +23,7 @@ namespace bpkg
o.all (),
o.all_pattern (),
false /* package_cwd */,
+ true /* allow_host_type */,
args);
}
}
diff --git a/bpkg/pkg-command.cxx b/bpkg/pkg-command.cxx
index 11f10f0..20e5230 100644
--- a/bpkg/pkg-command.cxx
+++ b/bpkg/pkg-command.cxx
@@ -3,7 +3,10 @@
#include <bpkg/pkg-command.hxx>
-#include <libbutl/path-pattern.mxx>
+#include <libbutl/path-pattern.hxx>
+
+#include <libbuild2/file.hxx>
+#include <libbuild2/context.hxx>
#include <bpkg/package.hxx>
#include <bpkg/package-odb.hxx>
@@ -18,7 +21,6 @@ namespace bpkg
{
void
pkg_command (const string& cmd,
- const dir_path& c,
const common_options& o,
const string& cmd_v,
const strings& cvars,
@@ -55,6 +57,8 @@ namespace bpkg
}
};
+ unique_ptr<build2::context> ctx; // Create lazily.
+
for (const pkg_command_vars& pv: ps)
{
if (!pv.vars.empty () || pv.cwd)
@@ -75,12 +79,49 @@ namespace bpkg
const shared_ptr<selected_package>& p (pv.pkg);
- assert (p->state == package_state::configured);
- assert (p->out_root); // Should be present since configured.
+ assert (p->state == package_state::configured &&
+ p->substate != package_substate::system);
+ assert (p->out_root &&
+ p->src_root); // Should be present since configured, not system.
- dir_path out_root (p->effective_out_root (c));
+ dir_path out_root (p->effective_out_root (pv.config_orig));
l4 ([&]{trace << p->name << " out_root: " << out_root;});
+ // Figure out if the source directory is forwarded to this out_root. If
+ // it is, then we need to build via src_root. Failed that, backlinks
+ // won't be created.
+ //
+ if (*p->out_root != *p->src_root)
+ {
+ dir_path src_root (p->effective_src_root (pv.config_orig));
+
+ // For us to switch to src_root, it should not only be configured as
+ // forwarded, but also be forwarded to our out_root. So we actually
+ // need to first check if the build/bootstrap/out-root.build (or its
+ // alt naming equivalent) exists and, if so, extract the out_root
+ // value and compare it to ours. This is all done by bootstrap_fwd()
+ // from libbuild2 so seeing that we act as a special build system
+ // driver, we might as well use that. Note that this could potentially
+ // be improved by only creating context if the file exists.
+ //
+ try
+ {
+ if (ctx == nullptr)
+ ctx.reset (new build2::context ());
+
+ optional<bool> altn;
+ if (build2::bootstrap_fwd (*ctx, src_root, altn) == out_root)
+ {
+ out_root = move (src_root);
+ l4 ([&]{trace << p->name << " src_root: " << out_root;});
+ }
+ }
+ catch (const build2::failed&)
+ {
+ throw failed (); // Assume the diagnostics has already been issued.
+ }
+ }
+
if (bspec.back () != '(')
bspec += ' ';
@@ -115,10 +156,19 @@ namespace bpkg
collect_dependencies (const shared_ptr<selected_package>& p,
bool recursive,
bool package_cwd,
- vector<pkg_command_vars>& ps)
+ vector<pkg_command_vars>& ps,
+ bool allow_host_type)
{
for (const auto& pr: p->prerequisites)
{
+ if (!allow_host_type)
+ {
+ database& db (pr.first.database ());
+
+ if (db.type == host_config_type || db.type == build2_config_type)
+ continue;
+ }
+
shared_ptr<selected_package> d (pr.first.load ());
// The selected package can only be configured if all its dependencies
@@ -133,14 +183,24 @@ namespace bpkg
[&d] (const pkg_command_vars& i) {return i.pkg == d;}) ==
ps.end ())
{
+ database& db (pr.first.database ());
+
// Note: no package-specific variables (global ones still apply).
//
- ps.push_back (pkg_command_vars {d,
- strings () /* vars */,
- package_cwd});
+ ps.push_back (
+ pkg_command_vars {
+ db.config_orig,
+ db.main (),
+ d,
+ strings () /* vars */,
+ package_cwd});
if (recursive)
- collect_dependencies (d, recursive, package_cwd, ps);
+ collect_dependencies (d,
+ recursive,
+ package_cwd,
+ ps,
+ allow_host_type);
}
}
}
@@ -154,6 +214,7 @@ namespace bpkg
bool all,
const strings& all_patterns,
bool package_cwd,
+ bool allow_host_type,
cli::group_scanner& args)
{
tracer trace ("pkg_command");
@@ -194,7 +255,7 @@ namespace bpkg
if (args.group ().more ())
fail << "unexpected options group for variable '" << a << "'";
- cvars.push_back (move (a));
+ cvars.push_back (move (trim (a)));
}
else
{
@@ -209,7 +270,7 @@ namespace bpkg
if (a.find ('=') == string::npos)
fail << "unexpected group argument '" << a << "'";
- vars.push_back (move (a));
+ vars.push_back (move (trim (a)));
}
pkg_args.push_back (pkg_arg {move (n), move (vars)});
@@ -248,7 +309,16 @@ namespace bpkg
vector<pkg_command_vars> ps;
{
- database db (open (c, trace));
+ database db (c, trace, true /* pre_attach */);
+
+ if (!allow_host_type && (db.type == host_config_type ||
+ db.type == build2_config_type))
+ {
+ fail << "unable to " << cmd << " from " << db.type
+ << " configuration" <<
+ info << "use target configuration instead";
+ }
+
transaction t (db);
// We need to suppress duplicate dependencies for the recursive command
@@ -256,23 +326,33 @@ namespace bpkg
//
session ses;
- auto add = [&ps, recursive, immediate, package_cwd] (
+ auto add =
+ [&db, &ps, allow_host_type, recursive, immediate, package_cwd] (
const shared_ptr<selected_package>& p,
strings vars)
{
- ps.push_back (pkg_command_vars {p, move (vars), package_cwd});
+ ps.push_back (
+ pkg_command_vars {db.config_orig,
+ db.main (),
+ p,
+ move (vars),
+ package_cwd});
// Note that it can only be recursive or immediate but not both.
//
if (recursive || immediate)
- collect_dependencies (p, recursive, package_cwd, ps);
+ collect_dependencies (p,
+ recursive,
+ package_cwd,
+ ps,
+ allow_host_type);
};
if (all || !all_patterns.empty ())
{
using query = query<selected_package>;
- query q (query::hold_package &&
+ query q (query::hold_package &&
query::state == "configured" &&
query::substate != "system");
@@ -310,13 +390,13 @@ namespace bpkg
<< "configuration " << c;
if (p->state != package_state::configured)
- fail << "package " << a.name << " is " << p->state <<
+ fail << "package " << a.name << db << " is " << p->state <<
info << "expected it to be configured";
if (p->substate == package_substate::system)
- fail << "cannot " << cmd << " system package " << a.name;
+ fail << "cannot " << cmd << " system package " << a.name << db;
- l4 ([&]{trace << *p;});
+ l4 ([&]{trace << *p << db;});
add (p, move (a.vars));
}
@@ -325,14 +405,27 @@ namespace bpkg
t.commit ();
}
- pkg_command (cmd, c, o, cmd_v, cvars, ps);
+ pkg_command (cmd, o, cmd_v, cvars, ps);
if (verb && !o.no_result ())
{
for (const pkg_command_vars& pv: ps)
- text << cmd << (cmd.back () != 'e' ? "ed " : "d ") << *pv.pkg;
+ text << cmd << (cmd.back () != 'e' ? "ed " : "d ") << pv.string ();
}
return 0;
}
+
+ // pkg_command_vars
+ //
+ string pkg_command_vars::
+ string () const
+ {
+ std::string r (pkg->string ());
+
+ if (!config_main)
+ r += " [" + config_orig.representation () + ']';
+
+ return r;
+ }
}
diff --git a/bpkg/pkg-command.hxx b/bpkg/pkg-command.hxx
index 40a55f2..2de9b73 100644
--- a/bpkg/pkg-command.hxx
+++ b/bpkg/pkg-command.hxx
@@ -20,6 +20,12 @@ namespace bpkg
// The command can also be performed recursively for all or immediate
// dependencies of the specified or all the held packages.
//
+ // If allow_host_type is false, then fail if the current configuration is of
+ // the host or build2 type. Also skip the build-time dependencies in the
+ // recursive mode in this case.
+ //
+ // Note: loads selected packages.
+ //
int
pkg_command (const string& cmd, // Without the 'pkg-' prefix.
const configuration_options&,
@@ -29,19 +35,39 @@ namespace bpkg
bool all,
const strings& all_patterns,
bool package_cwd,
+ bool allow_host_type,
cli::group_scanner& args);
struct pkg_command_vars
{
+ // Configuration information.
+ //
+ // Used to derive the package out_root directory, issue diagnostics, etc.
+ //
+ // Note that we cannot store the database reference here since it can be
+ // closed by the time this information is used. Instead, we save the
+ // required information.
+ //
+ dir_path config_orig; // Database's config_orig.
+ bool config_main; // True if database is main.
+
shared_ptr<selected_package> pkg;
- strings vars; // Package-specific command line vars.
+ strings vars; // Package-specific command line vars.
bool cwd; // Change the working directory to the package directory.
+
+ // Return the selected package name/version followed by the configuration
+ // directory, unless this is the current configuration. For example:
+ //
+ // libfoo/1.1.0
+ // libfoo/1.1.0 [cfg/]
+ //
+ std::string
+ string () const;
};
void
pkg_command (const string& cmd,
- const dir_path& configuration,
const common_options&,
const string& cmd_variant,
const strings& common_vars,
diff --git a/bpkg/pkg-configure.cxx b/bpkg/pkg-configure.cxx
index cd55575..eb5b85b 100644
--- a/bpkg/pkg-configure.cxx
+++ b/bpkg/pkg-configure.cxx
@@ -3,11 +3,23 @@
#include <bpkg/pkg-configure.hxx>
+#include <libbuild2/types.hxx>
+#include <libbuild2/utility.hxx>
+#include <libbuild2/diagnostics.hxx>
+
+#include <libbuild2/file.hxx>
+#include <libbuild2/scope.hxx>
+#include <libbuild2/operation.hxx>
+#include <libbuild2/config/operation.hxx>
+
+#include <bpkg/bpkg.hxx> // build2_init(), etc
+
#include <bpkg/package.hxx>
#include <bpkg/package-odb.hxx>
#include <bpkg/database.hxx>
#include <bpkg/diagnostics.hxx>
#include <bpkg/satisfaction.hxx>
+#include <bpkg/package-query.hxx>
#include <bpkg/manifest-utility.hxx>
#include <bpkg/pkg-verify.hxx>
@@ -18,100 +30,555 @@ using namespace butl;
namespace bpkg
{
- package_prerequisites
+ static optional<version_constraint> absent_constraint;
+
+ configure_prerequisites_result
pkg_configure_prerequisites (const common_options& o,
- transaction& t,
+ database& db,
+ transaction&,
const dependencies& deps,
- const package_name& package)
+ const vector<size_t>* alts,
+ package_skeleton&& ps,
+ const vector<package_name>* prev_prereqs,
+ bool simulate,
+ const function<find_database_function>& fdb,
+ const function<find_package_state_function>& fps,
+ const vector<package_key>* unconstrain_deps)
{
- package_prerequisites r;
+ tracer trace ("pkg_configure_prerequisites");
+
+ // Unconstraining dependencies are only allowed in the simulation mode.
+ //
+ assert (unconstrain_deps == nullptr || simulate);
+
+ // No use case for both being specified.
+ //
+ assert (alts == nullptr || prev_prereqs == nullptr);
+
+ tracer_guard tg (db, trace);
+
+ package_prerequisites prereqs;
+ vector<size_t> dep_alts;
+ strings vars;
+
+ // Notes on the buildfile clauses evaluation:
+ //
+ // - In the manual configuration mode (alts == NULL, prev_prereqs == NULL)
+ // we always evaluate the enable and reflect clauses. We, however, fail
+ // if any of the prefer or require clauses are specified in any of the
+ // enabled dependency alternatives, assuming that this package didn't
+ // negotiate its preferences/requirements for the dependency
+ // configurations.
+ //
+ // Note that evaluating the require and prefer clauses in this case is
+ // meaningless since we don't reconfigure the dependencies nor negotiate
+ // configurations with other dependents. What we should probably do is
+ // load configurations of the dependencies and use them while evaluating
+ // the dependent's enable and reflect clauses as we go along. Probably
+ // we should still evaluate the accept clauses to make sure that the
+ // dependency is configured acceptably for the dependent.
+ //
+ // - In the pre-selected alternatives mode (alts != NULL, prev_prereqs ==
+ // NULL) we don't evaluate the enable, prefer, and require clauses since
+ // they have already been evaluated as a part of the dependency
+ // alternatives selection and the dependency configurations negotiation.
+ // We, however always evaluate the reflect clauses.
+ //
+ // - In the reconfiguration mode (prev_prereqs != NULL, alts == NULL) we
+ // don't evaluate the prefer and require clauses, assuming that was done
+ // on some previous pkg-build run when this package and its dependencies
+ // have been configured. But because of this we may not evaluate the
+ // enable and reflect clauses which refer to dependency configuration
+ // variables. If such clauses are present, then this is considered an
+ // implementation error since such packages should be handled in the
+ // above pre-selected alternatives mode.
+ //
+ bool manual (alts == nullptr && prev_prereqs == nullptr);
+
+ // In the reconfiguration mode keep track of configuration variable
+ // prefixes (in the 'config.<dependency>.' form) for dependencies in the
+ // selected alternatives with the prefer or require clauses specified and
+ // fail if any enable or reflect clause refers to them.
+ //
+ // Note that the enable and reflect clauses may only refer to dependency
+ // configuration variables of already selected alternatives with the
+ // prefer or require clauses specified.
+ //
+ vector<string> banned_var_prefixes;
+
+ auto verify_banned_vars = [&ps,
+ &banned_var_prefixes] (const string& clause,
+ const char* what)
+ {
+ for (const string& p: banned_var_prefixes)
+ {
+ if (clause.find (p) != string::npos)
+ {
+ fail << "unable to reconfigure dependent " << ps.package.name
+ << " with " << what << " clause that refers to dependency "
+ << "configuration variables" <<
+ info << "please report in https://github.com/build2/build2/issues/302";
+ }
+ }
+ };
+
+ // Alternatives argument must be parallel to the dependencies argument if
+ // specified.
+ //
+ assert (alts == nullptr || alts->size () == deps.size ());
- database& db (t.database ());
+ dep_alts.reserve (deps.size ());
- for (const dependency_alternatives_ex& da: deps)
+ for (size_t di (0); di != deps.size (); ++di)
{
- assert (!da.conditional); //@@ TODO
+ // Skip the toolchain build-time dependencies and dependencies without
+ // enabled alternatives.
+ //
+ const dependency_alternatives_ex& das (deps[di]);
+
+ if (das.empty ())
+ {
+ dep_alts.push_back (0);
+ continue;
+ }
+
+ small_vector<pair<reference_wrapper<const dependency_alternative>,
+ size_t>,
+ 2> edas;
- bool satisfied (false);
- for (const dependency& d: da)
+ if (alts == nullptr)
{
- const package_name& n (d.name);
+ if (toolchain_buildtime_dependency (o, das, &ps.package.name))
+ {
+ dep_alts.push_back (0);
+ continue;
+ }
- if (da.buildtime)
+ for (size_t i (0); i != das.size (); ++i)
{
- // Handle special names.
+ const dependency_alternative& da (das[i]);
+
+ // Evaluate the dependency alternative enable clause, if present,
+ // unless it refers to any banned variables in which case we fail.
//
- if (n == "build2")
+ if (da.enable)
{
- if (d.constraint)
- satisfy_build2 (o, package, d);
+ if (!banned_var_prefixes.empty ())
+ verify_banned_vars (*da.enable, "enable");
- satisfied = true;
- break;
+ if (!ps.evaluate_enable (*da.enable, make_pair (di, i)))
+ continue;
}
- else if (n == "bpkg")
- {
- if (d.constraint)
- satisfy_bpkg (o, package, d);
- satisfied = true;
- break;
- }
- // else
- //
- // @@ TODO: in the future we would need to at least make sure the
- // build and target machines are the same. See also pkg-build.
+ if (manual && (da.prefer || da.require))
+ fail << "manual configuration of dependents with prefer or "
+ << "require clauses is not yet supported";
+
+ edas.push_back (make_pair (ref (da), i));
}
- if (shared_ptr<selected_package> dp = db.find<selected_package> (n))
+ if (edas.empty ())
{
- if (dp->state != package_state::configured)
- continue;
+ dep_alts.push_back (0);
+ continue;
+ }
+ }
+ else
+ {
+ // Must only contain the selected alternative.
+ //
+ assert (das.size () == 1);
- if (!satisfies (dp->version, d.constraint))
- continue;
+ edas.push_back (make_pair (ref (das.front ()), (*alts)[di]));
+ }
+
+ // Pick the first alternative with dependencies that can all be resolved
+ // to the configured packages, satisfying the respective constraints.
+ //
+ // If the list of the former prerequisites is specified, then first try
+ // to select an alternative in the "recreate dependency decisions" mode,
+ // filtering out alternatives where dependencies do not all belong to
+ // this list. If we end up with no alternative selected, then retry in
+ // the "make dependency decisions" mode and select the alternative
+ // regardless of the former prerequisites.
+ //
+ assert (!edas.empty ());
- auto p (r.emplace (dp, d.constraint));
+ for (const vector<package_name>* pps (prev_prereqs);;)
+ {
+ const pair<reference_wrapper<const dependency_alternative>,
+ size_t>* selected_alt (nullptr);
- // Currently we can only capture a single constraint, so if we
- // already have a dependency on this package and one constraint is
- // not a subset of the other, complain.
+ for (const auto& eda: edas)
+ {
+ const dependency_alternative& da (eda.first);
+
+ // Cache the selected packages which correspond to the alternative
+ // dependencies, pairing them with the respective constraints. If
+ // the alternative turns out to be fully resolvable, we will add the
+ // cached packages into the dependent's prerequisites map.
//
- if (!p.second)
- {
- auto& c (p.first->second);
+ small_vector<
+ pair<lazy_shared_ptr<selected_package>, prerequisite_info>,
+ 1> prerequisites;
- bool s1 (satisfies (c, d.constraint));
- bool s2 (satisfies (d.constraint, c));
+ dependency_alternative::const_iterator b (da.begin ());
+ dependency_alternative::const_iterator i (b);
+ dependency_alternative::const_iterator e (da.end ());
- if (!s1 && !s2)
- fail << "multiple dependencies on package " << n <<
- info << n << " " << *c <<
- info << n << " " << *d.constraint;
+ assert (b != e);
+
+ for (; i != e; ++i)
+ {
+ const dependency& d (*i);
+ const package_name& n (d.name);
+
+ database* ddb (fdb ? fdb (db, n, das.buildtime) : nullptr);
+
+ pair<shared_ptr<selected_package>, database*> spd (
+ ddb != nullptr
+ ? make_pair (ddb->find<selected_package> (n), ddb)
+ : find_dependency (db, n, das.buildtime));
+
+ const shared_ptr<selected_package>& dp (spd.first);
+
+ if (dp == nullptr)
+ break;
+
+ database& pdb (*spd.second);
+
+ optional<pair<package_state, package_substate>> dps;
+ if (fps != nullptr)
+ dps = fps (dp);
+
+ const optional<version_constraint>* dc (&d.constraint);
+
+ // Unconstrain this dependency, if requested.
+ //
+ if (unconstrain_deps != nullptr)
+ {
+ const vector<package_key>& uds (*unconstrain_deps);
+ if (find (uds.begin (), uds.end (), package_key (pdb, n)) !=
+ uds.end ())
+ {
+ dc = &absent_constraint;
+ }
+ }
+
+ if ((dps ? dps->first : dp->state) != package_state::configured ||
+ !satisfies (dp->version, *dc) ||
+ (pps != nullptr &&
+ find (pps->begin (), pps->end (), dp->name) == pps->end ()))
+ break;
+
+ // See the package_prerequisites definition for details on
+ // creating the map keys with the database passed.
+ //
+ prerequisites.emplace_back (
+ lazy_shared_ptr<selected_package> (pdb, dp),
+ prerequisite_info {*dc});
+ }
+
+ // Try the next alternative if there are unresolved dependencies for
+ // this alternative.
+ //
+ if (i != e)
+ continue;
- if (s2 && !s1)
- c = d.constraint;
+ // Now add the selected packages resolved for the alternative into
+ // the dependent's prerequisites map and skip the remaining
+ // alternatives.
+ //
+ for (auto& pr: prerequisites)
+ {
+ const package_name& pn (pr.first.object_id ());
+ const prerequisite_info& pi (pr.second);
+
+ auto p (prereqs.emplace (pr.first, pi));
+
+ // Currently we can only capture a single constraint, so if we
+ // already have a dependency on this package and one constraint is
+ // not a subset of the other, complain.
+ //
+ if (!p.second)
+ {
+ auto& c1 (p.first->second.constraint);
+ auto& c2 (pi.constraint);
+
+ bool s1 (satisfies (c1, c2));
+ bool s2 (satisfies (c2, c1));
+
+ if (!s1 && !s2)
+ fail << "multiple dependencies on package " << pn <<
+ info << pn << " " << *c1 <<
+ info << pn << " " << *c2;
+
+ if (s2 && !s1)
+ c1 = c2;
+ }
+
+ // If the prerequisite is configured in the linked configuration,
+ // then add the respective config.import.* variable.
+ //
+ if (!simulate)
+ {
+ database& pdb (pr.first.database ());
+
+ if (pdb != db)
+ {
+ shared_ptr<selected_package> sp (pr.first.load ());
+
+ optional<pair<package_state, package_substate>> ps;
+ if (fps != nullptr)
+ ps = fps (sp);
+
+ if (ps
+ ? ps->second != package_substate::system
+ : !sp->system ())
+ {
+ // @@ Note that this doesn't work for build2 modules that
+ // require bootstrap. For their dependents we need to
+ // specify the import variable as a global override,
+ // whenever required (configure, update, etc).
+ //
+ // This, in particular, means that if we build a package
+ // that doesn't have direct build2 module dependencies
+ // but some of its (potentially indirect) dependencies
+ // do, then we still need to specify the !config.import.*
+ // global overrides for all of the involved build2
+ // modules. Implementation of that feels too hairy at the
+ // moment, so let's handle all the build2 modules
+ // uniformly for now.
+ //
+ // Also note that such modules are marked with `requires:
+ // bootstrap` in their manifest.
+ //
+ // Note that we currently don't support global overrides
+ // in the shared build2 context (but could probably do,
+ // if necessary).
+ //
+
+ dir_path od;
+ if (ps)
+ {
+ // There is no out_root for a would-be configured package.
+ // So we calculate it like in pkg_configure() below (yeah,
+ // it's an ugly hack).
+ //
+ od = sp->external ()
+ ? pdb.config / dir_path (sp->name.string ())
+ : pdb.config / dir_path (sp->name.string () + '-' +
+ sp->version.string ());
+ }
+ else
+ od = sp->effective_out_root (pdb.config);
+
+ // We tried to use global overrides to recreate the original
+ // behavior of not warning about unused config.import.*
+ // variables (achived via the config.config.persist value in
+ // amalgamation). Even though it's probably misguided (we
+ // don't actually save the unused values anywhere, just
+ // don't warn about them).
+ //
+ // Can we somehow cause a clash, say if the same package
+ // comes from different configurations? Yeah, we probably
+ // can. So could add it as undermined (?), detect a clash,
+ // and "fallforward" to the correct behavior.
+ //
+ // But we can clash with an absent value -- that is, we
+ // force importing from a wrong configuration where without
+ // any import things would have been found in the same
+ // amalgamation. Maybe we could detect that (no import
+ // for the same package -- but it could be for a package
+ // we are not configuring).
+ //
+ vars.push_back ("config.import." + sp->name.variable () +
+ "='" + od.representation () + '\'');
+ }
+ }
+ }
}
- satisfied = true;
+ selected_alt = &eda;
break;
}
+
+ // Fail if no dependency alternative is selected, unless we are in the
+ // "recreate dependency decisions" mode. In the latter case fall back
+ // to the "make dependency decisions" mode and retry.
+ //
+ if (selected_alt == nullptr)
+ {
+ if (pps != nullptr)
+ {
+ pps = nullptr;
+ continue;
+ }
+
+ fail << "unable to satisfy dependency on " << das;
+ }
+
+ const dependency_alternative& da (selected_alt->first);
+
+ // In the reconfiguration mode ban the usage of the selected
+ // alternative dependency configuration variables in the subsequent
+ // enable and reflect clauses, unless we are also unconstraining
+ // dependencies (which indicates it's a relaxed mode that precedes
+ // a drop or failure with better diagnostics).
+ //
+ if (alts == nullptr && !manual &&
+ unconstrain_deps == nullptr &&
+ (da.prefer || da.require))
+ {
+ for (const dependency& d: da)
+ banned_var_prefixes.push_back (
+ "config." + d.name.variable () + '.');
+ }
+
+ // Evaluate the selected dependency alternative reflect clause, if
+ // present, unless it refers to any banned variables in which case we
+ // fail.
+ //
+ if (da.reflect)
+ {
+ if (!banned_var_prefixes.empty ())
+ verify_banned_vars (*da.reflect, "reflect");
+
+ ps.evaluate_reflect (*da.reflect,
+ make_pair (di, selected_alt->second));
+ }
+
+ dep_alts.push_back (selected_alt->second + 1);
+
+ // The dependency alternative is selected and its dependencies are
+ // resolved to the selected packages. So proceed to the next depends
+ // value.
+ //
+ break;
}
+ }
+
+ // Make sure we didn't miss any selected dependency alternative.
+ //
+ assert (dep_alts.size () == deps.size ());
+
+ // Add the rest of the configuration variables (user overrides, reflects,
+ // etc) as well as their sources.
+ //
+ vector<config_variable> srcs;
+ string checksum;
+
+ if (!simulate)
+ {
+ checksum = ps.config_checksum ();
+
+ pair<strings, vector<config_variable>> rvs (move (ps).collect_config ());
- if (!satisfied)
- fail << "no configured package satisfies dependency on " << da;
+ strings& vs (rvs.first);
+ srcs = move (rvs.second);
+
+ if (!vs.empty ())
+ {
+ if (vars.empty ())
+ vars = move (vs);
+ else
+ {
+ vars.reserve (vars.size () + vs.size ());
+
+ for (string& v: vs)
+ vars.push_back (move (v));
+ }
+ }
}
- return r;
+ return configure_prerequisites_result {move (prereqs),
+ move (dep_alts),
+ move (vars),
+ move (srcs),
+ move (checksum)};
+ }
+
+
+ unique_ptr<build2::context>
+ pkg_configure_context (
+ const common_options& o,
+ strings&& cmd_vars,
+ const function<build2::context::var_override_function>& var_ovr_func)
+ {
+ using namespace build2;
+
+ // Initialize the build system.
+ //
+ // Note that this takes into account --build-option and default options
+ // files (which may have global overrides and which end up in
+ // build2_cmd_vars).
+ //
+ if (!build2_sched.started ())
+ build2_init (o);
+
+ // Re-tune the scheduler for parallel execution (see build2_init()
+ // for details).
+ //
+ if (build2_sched.tuned ())
+ build2_sched.tune (0);
+
+ auto merge_cmd_vars = [&cmd_vars] () -> const strings&
+ {
+ if (cmd_vars.empty ())
+ return build2_cmd_vars;
+
+ if (!build2_cmd_vars.empty ())
+ cmd_vars.insert (cmd_vars.begin (),
+ build2_cmd_vars.begin (), build2_cmd_vars.end ());
+
+ return cmd_vars;
+ };
+
+ // Shouldn't we shared the module context with package skeleton
+ // contexts? Maybe we don't have to since we don't build modules in
+ // them concurrently (in a sence, we didn't share it when we were
+ // invoking the build system driver).
+ //
+ unique_ptr<context> ctx (
+ new context (build2_sched,
+ build2_mutexes,
+ build2_fcache,
+ nullopt /* match_only */,
+ false /* no_external_modules */,
+ false /* dry_run */,
+ false /* no_diag_buffer */,
+ false /* keep_going */,
+ merge_cmd_vars (),
+ context::reserves {
+ 30000 /* targets */,
+ 1100 /* variables */},
+ nullptr /* module_context */,
+ nullptr /* inherited_mudules_lock */,
+ var_ovr_func));
+
+ // Set the current meta-operation once per context so that we don't reset
+ // ctx->current_on. Note that this function also sets ctx->current_mname
+ // and var_build_meta_operation on global scope.
+ //
+ ctx->current_meta_operation (config::mo_configure);
+ ctx->current_oname = string (); // default
+
+ return ctx;
}
void
- pkg_configure (const dir_path& c,
- const common_options& o,
+ pkg_configure (const common_options& o,
+ database& db,
transaction& t,
const shared_ptr<selected_package>& p,
- const dependencies& deps,
- const strings& vars,
+ configure_prerequisites_result&& cpr,
+#ifndef BPKG_OUTPROC_CONFIGURE
+ const unique_ptr<build2::context>& pctx,
+ const build2::variable_overrides& ovrs,
+#else
+ const unique_ptr<build2::context>&,
+ const build2::variable_overrides&, // Still in cpr.config_variables.
+#endif
bool simulate)
{
tracer trace ("pkg_configure");
@@ -119,30 +586,53 @@ namespace bpkg
assert (p->state == package_state::unpacked);
assert (p->src_root); // Must be set since unpacked.
- database& db (t.database ());
tracer_guard tg (db, trace);
+#ifndef BPKG_OUTPROC_CONFIGURE
+ const dir_path& c (db.config); // Absolute.
+#else
+ const dir_path& c (db.config_orig); // Relative.
+#endif
+
dir_path src_root (p->effective_src_root (c));
// Calculate package's out_root.
//
+ // Note: see a version of this in pkg_configure_prerequisites().
+ //
dir_path out_root (
p->external ()
? c / dir_path (p->name.string ())
- : c / dir_path (p->name.string () + "-" + p->version.string ()));
+ : c / dir_path (p->name.string () + '-' + p->version.string ()));
l4 ([&]{trace << "src_root: " << src_root << ", "
<< "out_root: " << out_root;});
- // Verify all our prerequisites are configured and populate the
- // prerequisites list.
- //
- assert (p->prerequisites.empty ());
+ assert (p->prerequisites.empty () && p->dependency_alternatives.empty ());
- p->prerequisites = pkg_configure_prerequisites (o, t, deps, p->name);
+ p->prerequisites = move (cpr.prerequisites);
+ p->dependency_alternatives = move (cpr.dependency_alternatives);
+ // Mark the section as loaded, so dependency alternatives are updated.
+ //
+ p->dependency_alternatives_section.load ();
+
+ // Configure.
+ //
if (!simulate)
{
+ // Original implementation that runs the standard build system driver.
+ //
+ // Note that the semantics doesn't match 100%. In particular, in the
+ // in-process implementation we enter overrides with global visibility
+ // in each project instead of the amalgamation (which is probably more
+ // accurate, since we don't re-configure the amalgamation nor some
+ // dependencies which could be affected by such overrides). In a sense,
+ // we enter them as if they were specified with the special .../ scope
+ // (but not with the % project visibility -- they must still be visible
+ // in subprojects).
+ //
+#ifdef BPKG_OUTPROC_CONFIGURE
// Form the buildspec.
//
string bspec;
@@ -158,20 +648,211 @@ namespace bpkg
l4 ([&]{trace << "buildspec: " << bspec;});
- // Configure.
- //
try
{
- run_b (o, verb_b::quiet, vars, bspec);
+ run_b (o, verb_b::quiet, cpr.config_variables, bspec);
}
catch (const failed&)
{
+ // See below for comments.
+ //
+ p->out_root = out_root.leaf ();
+ p->state = package_state::broken;
+ pkg_disfigure (o, db, t, p, true, true, false);
+ throw;
+ }
+#else
+ // Print the out-process command line in the verbose mode.
+ //
+ if (verb >= 2)
+ {
+ string bspec;
+
+ // Use path representation to get canonical trailing slash.
+ //
+ if (src_root == out_root)
+ bspec = "configure('" + out_root.representation () + "')";
+ else
+ bspec = "configure('" +
+ src_root.representation () + "'@'" +
+ out_root.representation () + "')";
+
+ print_b (o, verb_b::quiet, cpr.config_variables, bspec);
+ }
+
+ try
+ {
+ // Note: no bpkg::failed should be thrown from this block.
+ //
+ using namespace build2;
+ using build2::fail;
+ using build2::info;
+ using build2::endf;
+ using build2::location;
+
+ // The build2_init() function initializes the build system verbosity
+ // as if running with verb_b::normal while we need verb_b::quiet. So
+ // we temporarily adjust the build2 verbosity (see map_verb_b() for
+ // details).
+ //
+ auto verbg (make_guard ([ov = build2::verb] () {build2::verb = ov;}));
+ if (bpkg::verb == 1)
+ build2::verb = 0;
+
+ context& ctx (*pctx);
+
+ // Bootstrap and load the project.
+ //
+ // Note: in many ways similar to package_skeleton code.
+ //
+ scope& rs (*create_root (ctx, out_root, src_root)->second.front ());
+
+ // If we are configuring in the dependency order (as we should), then
+ // it feels like the only situation where we can end up with an
+ // already bootstrapped project is an unspecified dependency. Note
+ // that this is a hard fail since it would have been loaded without
+ // the proper configuration.
+ //
+ if (bootstrapped (rs))
+ {
+ fail << p->name << db << " loaded ahead of its dependents" <<
+ info << "likely unspecified dependency on package " << p->name;
+ }
+
+ optional<bool> altn;
+ value& v (bootstrap_out (rs, altn));
+
+ if (!v)
+ v = src_root;
+ else
+ {
+ dir_path& p (cast<dir_path> (v));
+
+ if (src_root != p)
+ {
+ // @@ Fuzzy if need this or can do as package skeleton (seeing
+ // that we know we are re-configuring).
+ //
+ ctx.new_src_root = src_root;
+ ctx.old_src_root = move (p);
+ p = src_root;
+ }
+ }
+
+ setup_root (rs, false /* forwarded */);
+
+ // Note: we already know our amalgamation.
+ //
+ bootstrap_pre (rs, altn);
+ bootstrap_src (rs, altn,
+ c.relative (out_root) /* amalgamation */,
+ true /* subprojects */);
+
+ create_bootstrap_outer (rs, true /* subprojects */);
+ bootstrap_post (rs);
+
+ values mparams;
+ const meta_operation_info& mif (config::mo_configure);
+ const operation_info& oif (op_default);
+
+ // Skip configure_pre() and configure_operation_pre() calls since we
+ // don't pass any parameteres and pass default operation. We also know
+ // that op_default has no pre/post operations, naturally.
+
+ // Find the root buildfile. Note that the implied buildfile logic does
+ // not apply (our target is the project root directory).
+ //
+ optional<path> bf (find_buildfile (src_root, src_root, altn));
+
+ if (!bf)
+ fail << "no buildfile in " << src_root;
+
+ // Enter project-wide overrides.
+ //
+ // Note that the use of the root scope as amalgamation makes sure
+ // scenarious like below work correctly (see above for background).
+ //
+ // bpkg create -d cfg cc config.cc.coptions=-Wall
+ // bpkg build { config.cc.coptions+=-g }+ libfoo
+ // { config.cc.coptions+=-O }+ libbar
+ //
+ ctx.enter_project_overrides (rs, out_root, ovrs, &rs);
+
+ // The goal here is to be more or less semantically equivalent to
+ // configuring several projects at once. Except that here we have
+ // interleaving load/match instead of first all load then all
+ // match. But presumably this shouldn't be a problem (we can already
+ // have match interrupted by load and the "island append" requirement
+ // should hold here as well).
+ //
+ // Note that either way we will be potentially re-matching the same
+ // dependency targets multiple times (see build2::configure_execute()
+ // for details).
+ //
+ const path_name bsn ("<buildspec>");
+ const location loc (bsn, 0, 0);
+
+ // out_root/dir{./}
+ //
+ target_key tk {
+ &dir::static_type,
+ &out_root,
+ &empty_dir_path,
+ &empty_string,
+ nullopt};
+
+ action_targets tgs;
+ mif.load (mparams, rs, *bf, out_root, src_root, loc);
+ mif.search (mparams, rs, rs, *bf, tk, loc, tgs);
+
+ ctx.current_operation (oif, nullptr);
+ action a (ctx.current_action ());
+
+ mif.match (mparams, a, tgs, 2 /* diag */, true /* progress */);
+ mif.execute (mparams, a, tgs, 2 /* diag */, true /* progress */);
+
+ // Note: no operation_post/meta_operation_post for configure.
+
+ // Here is a tricky part: if this is a normal package, then it will be
+ // discovered as a subproject of the bpkg configuration when we load
+ // it for the first time (because they are all unpacked). However, if
+ // this is a package with src_root!=out_root (such as an external
+ // package or a package with a custom checkout_root) then there could
+ // be no out_root directory for it in the bpkg configuration yet. As a
+ // result, we need to manually add it as a newly discovered
+ // subproject.
+ //
+ if (!rs.out_eq_src ())
+ {
+ scope* as (rs.parent_scope ()->root_scope ());
+ assert (as != nullptr); // No bpkg configuration?
+
+ // Kept NULL if there are no subprojects, so we may need to
+ // initialize it (see build2::bootstrap_src() for details).
+ //
+ subprojects* sp (*as->root_extra->subprojects);
+ if (sp == nullptr)
+ {
+ value& v (as->vars.assign (*ctx.var_subprojects));
+ v = subprojects {};
+ sp = *(as->root_extra->subprojects = &cast<subprojects> (v));
+ }
+
+ const project_name& n (**rs.root_extra->project);
+
+ if (sp->find (n) == sp->end ())
+ sp->emplace (n, out_root.leaf ());
+ }
+ }
+ catch (const build2::failed&)
+ {
+ // Assume the diagnostics has already been issued.
+
// If we failed to configure the package, make sure we revert
// it back to the unpacked state by running disfigure (it is
// valid to run disfigure on an un-configured build). And if
// disfigure fails as well, then the package will be set into
// the broken state.
- //
// Indicate to pkg_disfigure() we are partially configured.
//
@@ -180,9 +861,19 @@ namespace bpkg
// Commits the transaction.
//
- pkg_disfigure (c, o, t, p, true /* clean */, false /* simulate */);
- throw;
+ pkg_disfigure (o, db, t,
+ p,
+ true /* clean */,
+ true /* disfigure */,
+ false /* simulate */);
+
+
+ throw bpkg::failed ();
}
+#endif
+
+ p->config_variables = move (cpr.config_sources);
+ p->config_checksum = move (cpr.config_checksum);
}
p->out_root = out_root.leaf ();
@@ -192,14 +883,81 @@ namespace bpkg
t.commit ();
}
+ void
+ pkg_configure (const common_options& o,
+ database& db,
+ transaction& t,
+ const shared_ptr<selected_package>& p,
+ const dependencies& deps,
+ const vector<size_t>* alts,
+ package_skeleton&& ps,
+ const vector<package_name>* pps,
+ bool disfigured,
+ bool simulate,
+ const function<find_database_function>& fdb)
+ {
+ configure_prerequisites_result cpr (
+ pkg_configure_prerequisites (o,
+ db,
+ t,
+ deps,
+ alts,
+ move (ps),
+ pps,
+ simulate,
+ fdb,
+ nullptr));
+
+ if (!simulate)
+ {
+ // Unless this package has been completely disfigured, disfigure all the
+ // package configuration variables to reset all the old values to
+ // defaults (all the new user/dependent/reflec values, including old
+ // user, are returned by collect_config() and specified as overrides).
+ // Note that this semantics must be consistent with how we load things
+ // in the package skeleton during configuration negotiation.
+ //
+ // Note also that this means we don't really use the dependent and
+ // reflect sources that we save in the database. But let's keep them
+ // for the completeness of information (maybe could be useful during
+ // configuration reset or some such).
+ //
+ if (!disfigured)
+ {
+ // Note: must be quoted to preserve the pattern.
+ //
+ cpr.config_variables.push_back (
+ "config.config.disfigure='config." + p->name.variable () + "**'");
+ }
+ }
+
+ unique_ptr<build2::context> ctx;
+
+#ifndef BPKG_OUTPROC_CONFIGURE
+ if (!simulate)
+ ctx = pkg_configure_context (o, move (cpr.config_variables));
+#endif
+
+ pkg_configure (o,
+ db,
+ t,
+ p,
+ move (cpr),
+ ctx,
+ (ctx != nullptr
+ ? ctx->var_overrides
+ : build2::variable_overrides {}),
+ simulate);
+ }
+
shared_ptr<selected_package>
pkg_configure_system (const package_name& n,
const version& v,
+ database& db,
transaction& t)
{
tracer trace ("pkg_configure_system");
- database& db (t.database ());
tracer_guard tg (db, trace);
shared_ptr<selected_package> p (
@@ -216,6 +974,7 @@ namespace bpkg
nullopt, // No source directory.
false,
nullopt, // No manifest checksum.
+ nullopt, // No buildfiles checksum.
nullopt, // No output directory.
{}}); // No prerequisites.
@@ -252,7 +1011,7 @@ namespace bpkg
}
if (!sep && a.find ('=') != string::npos)
- vars.push_back (move (a));
+ vars.push_back (move (trim (a)));
else if (n.empty ())
n = move (a);
else
@@ -269,7 +1028,7 @@ namespace bpkg
if (ps == package_scheme::sys && !vars.empty ())
fail << "configuration variables specified for a system package";
- database db (open (c, trace));
+ database db (c, trace, true /* pre_attach */);
transaction t (db);
session s;
@@ -297,7 +1056,7 @@ namespace bpkg
if (filter_one (root, db.query<available_package> (q)).first == nullptr)
fail << "unknown package " << n;
- p = pkg_configure_system (n, v.empty () ? wildcard_version : v, t);
+ p = pkg_configure_system (n, v.empty () ? wildcard_version : v, db, t);
}
else
{
@@ -315,16 +1074,47 @@ namespace bpkg
l4 ([&]{trace << *p;});
- package_manifest m (pkg_verify (p->effective_src_root (c),
- true /* ignore_unknown */,
- [&p] (version& v) {v = p->version;}));
+ // Let's not bother trying to find an available package for this
+ // selected package, which may potentially not be present in this
+ // configuration (but instead be present in the configuration we are
+ // linked to, etc) and create a transient available package outright.
+ //
+ shared_ptr<available_package> ap (make_available (o, db, p));
+
+ optional<dir_path> src_root (p->external () ? p->src_root : nullopt);
+
+ optional<dir_path> out_root (src_root
+ ? dir_path (db.config) /= p->name.string ()
+ : optional<dir_path> ());
- pkg_configure (c,
- o,
+ // Note on the disfigure logic: while we don't know whether the package
+ // has been disfigured with --keep-config or not, it has already been
+ // done physically and if without --keep-config, then config.build has
+ // been removed and config_variables cleaned. As a result, we can just
+ // proceed as disfigure=false and disfigure=true will be taken care
+ // automatically (because then things have been removed/cleaned).
+ //
+ pkg_configure (o,
+ db,
t,
p,
- convert (move (m.dependencies)),
- vars,
+ ap->dependencies,
+ nullptr /* alternatives */,
+ package_skeleton (o,
+ package_key (db, ap->id.name),
+ false /* system */,
+ ap,
+ move (vars),
+ false /* disfigure */,
+ &p->config_variables,
+ move (src_root),
+ move (out_root),
+ nullopt /* old_src_root */,
+ nullopt /* old_out_root */,
+ package_skeleton::load_config_user |
+ package_skeleton::load_config_dependent),
+ nullptr /* prerequisites */,
+ false /* disfigured */,
false /* simulate */);
}
diff --git a/bpkg/pkg-configure.hxx b/bpkg/pkg-configure.hxx
index b708df5..cd74786 100644
--- a/bpkg/pkg-configure.hxx
+++ b/bpkg/pkg-configure.hxx
@@ -7,12 +7,16 @@
#include <libbpkg/manifest.hxx> // version
#include <libbpkg/package-name.hxx>
+#include <libbuild2/context.hxx>
+#include <libbuild2/variable.hxx> // variable_overrides
+
#include <bpkg/types.hxx>
#include <bpkg/forward.hxx> // transaction, selected_package
#include <bpkg/utility.hxx>
#include <bpkg/package.hxx> // package_prerequisites,
// dependencies.
+#include <bpkg/package-skeleton.hxx>
#include <bpkg/pkg-configure-options.hxx>
namespace bpkg
@@ -20,35 +24,170 @@ namespace bpkg
int
pkg_configure (const pkg_configure_options&, cli::scanner& args);
- // Note: all of the following functions expect the package dependency
- // constraints to be complete.
+ // Configure a system package and commit the transaction.
+ //
+ shared_ptr<selected_package>
+ pkg_configure_system (const package_name&,
+ const version&,
+ database&,
+ transaction&);
+
+ // The custom search function. If specified, it is called by pkg_configure()
+ // to obtain the database to search for the prerequisite in, instead of
+ // searching for it in the linked databases, recursively. If the function
+ // returns NULL, then fallback to the recursive search through the linked
+ // databases.
+ //
+ using find_database_function = database* (database&,
+ const package_name&,
+ bool buildtime);
+
+ // Given dependencies of a package, return its prerequisite packages,
+ // 1-based indexes of the selected dependency alternatives (0 for toolchain
+ // build-time dependencies, etc), configuration variables that resulted from
+ // selection of these prerequisites (import, reflection, etc), and sources
+ // of the configuration variables resulted from evaluating the reflect
+ // clauses. Fail if for some of the dependency alternative lists there is no
+ // satisfactory alternative (all its dependencies are configured, satisfy
+ // the respective constraints, etc).
+ //
+ // The package dependency constraints are expected to be complete.
+ //
+ // The dependencies argument may contain pre-selected dependency
+ // alternatives (with the potential empty entries for the toolchain
+ // build-time dependencies or for dependencies with all the alternatives
+ // disabled; see pkg-build for the use-case). In this case the number of
+ // dependency alternatives for each dependency must be 1 (or 0) and the
+ // alternatives argument must be specified. The alternatives argument must
+ // be parallel to the dependencies argument and specify indexes of the
+ // selected alternatives.
+ //
+ // If the dependency alternatives are not pre-selected (alternatives ==
+ // NULL), then for each depends value select the first satisfactory
+ // alternative encountered. If, however, prerequisites corresponding to the
+ // previous configured state of the package are specified
+ // (prev_prerequisites != NULL), then for each depends value try to select
+ // an alternative where dependencies all belong to this list (the "recreate
+ // dependency decisions" mode). Failed that, select an alternative as if no
+ // prerequisites are specified (the "make dependency decisions" mode).
+ //
+ // Note that there are actually 3 possible use cases for
+ // pkg_configure_prerequisites():
+ //
+ // - The package is being configured manually. In this case its dependency
+ // alternatives are not pre-selected and there is no information about its
+ // previous configured state (alternatives == NULL, prev_prerequisites ==
+ // NULL).
+ //
+ // - The package is being built, upgraded, or re-evaluated. In this case its
+ // dependency alternatives are pre-selected, their enable, prefer, and
+ // require clauses are evaluated, and there is no need in the previous
+ // configured state information (alternatives != NULL, prev_prerequisites
+ // == NULL).
+ //
+ // - The package is being reconfigured for a reason other than any of the
+ // mentioned above (dependency up/down-grade/reconfiguration, deorphaning,
+ // pkg-build --disfigure is specified, etc). In this case its dependency
+ // alternatives are not pre-selected but the previous configured state
+ // information is provided (alternatives == NULL, prev_prerequisites !=
+ // NULL).
+ //
+ // - There are no use cases when both dependency alternatives are
+ // pre-selected and the previous configured state information needs to be
+ // provided. Thus, alternatives and prev_prerequisites must never be both
+ // NULL.
+ //
+ // Optionally, remove constraints from the specified dependencies
+ // (unconstrain_deps). Only allowed in the simulation mode.
+ //
+ struct configure_prerequisites_result
+ {
+ package_prerequisites prerequisites;
+ vector<size_t> dependency_alternatives;
+ strings config_variables; // Note: name and value.
+
+ // Only contains sources of configuration variables collected using the
+ // package skeleton, excluding those user-specified variables which are
+ // not the project variables for the specified package (module
+ // configuration variables, etc). Thus, it is not parallel to the
+ // config_variables member.
+ //
+ vector<config_variable> config_sources; // Note: name and source.
+
+ // SHA256 checksum of variables (names and values) referred to by the
+ // config_sources member.
+ //
+ string config_checksum;
+ };
+
+ // Return the "would be" state for packages that would be configured
+ // by this stage.
+ //
+ using find_package_state_function =
+ optional<pair<package_state, package_substate>> (
+ const shared_ptr<selected_package>&);
+
+ // Note: loads selected packages.
+ //
+ configure_prerequisites_result
+ pkg_configure_prerequisites (
+ const common_options&,
+ database&,
+ transaction&,
+ const dependencies&,
+ const vector<size_t>* alternatives,
+ package_skeleton&&,
+ const vector<package_name>* prev_prerequisites,
+ bool simulate,
+ const function<find_database_function>&,
+ const function<find_package_state_function>&,
+ const vector<package_key>* unconstrain_deps = nullptr);
// Configure the package, update its state, and commit the transaction.
//
+ // This is a lower-level version meant for sharing the same build context
+ // to configure multiple packages (in the dependency order).
+ //
+ // Note: variable_overrides must include config.config.disfigure, if
+ // required.
+ //
+ // Note: expects all the non-external packages to be configured to be
+ // already unpacked (for subproject discovery).
+ //
void
- pkg_configure (const dir_path& configuration,
- const common_options&,
+ pkg_configure (const common_options&,
+ database&,
transaction&,
const shared_ptr<selected_package>&,
- const dependencies&,
- const strings& config_vars,
+ configure_prerequisites_result&&,
+ const unique_ptr<build2::context>&,
+ const build2::variable_overrides&,
bool simulate);
- // Configure a system package and commit the transaction.
+ // Create a build context suitable for configuring packages.
//
- shared_ptr<selected_package>
- pkg_configure_system (const package_name&, const version&, transaction&);
-
- // Return package prerequisites given its dependencies. Fail if some of the
- // prerequisites are not configured or don't satisfy the package's
- // dependency constraints. Note that the package argument is used for
- // diagnostics only.
- //
- package_prerequisites
- pkg_configure_prerequisites (const common_options&,
- transaction&,
- const dependencies&,
- const package_name&);
+ unique_ptr<build2::context>
+ pkg_configure_context (
+ const common_options&,
+ strings&& cmd_vars,
+ const function<build2::context::var_override_function>& = nullptr);
+
+ // This is a higher-level version meant for configuring a single package.
+ //
+ // Note: loads selected packages.
+ //
+ void
+ pkg_configure (const common_options&,
+ database&,
+ transaction&,
+ const shared_ptr<selected_package>&,
+ const dependencies&,
+ const vector<size_t>* alternatives,
+ package_skeleton&&,
+ const vector<package_name>* prev_prerequisites,
+ bool disfigured,
+ bool simulate,
+ const function<find_database_function>& = {});
}
#endif // BPKG_PKG_CONFIGURE_HXX
diff --git a/bpkg/pkg-disfigure.cli b/bpkg/pkg-disfigure.cli
index 9f6c63b..0491a16 100644
--- a/bpkg/pkg-disfigure.cli
+++ b/bpkg/pkg-disfigure.cli
@@ -23,9 +23,11 @@ namespace bpkg
source code package is returned to the \cb{unpacked} state. A system
package is removed from the configuration.
- By default \cb{pkg-disfigure} will also clean the package's output
- directory. This behavior can be suppressed with the \cb{--keep-out}
- option, for example, if the package is to be reconfigured."
+ By default \cb{pkg-disfigure} will remove the package's build system
+ configuration (\cb{config.build}) and also clean its output directory.
+ This behavior can be suppressed with the \cb{--keep-config} and
+ \cb{--keep-out} options, respectively, for example, if the package is
+ to be reconfigured."
}
class pkg_disfigure_options: configuration_options
@@ -36,6 +38,12 @@ namespace bpkg
{
"Don't clean the package's output directory."
}
+
+ bool --keep-config
+ {
+ "Don't remove the package's build system configuration
+ (\cb{config.build})."
+ }
};
"
diff --git a/bpkg/pkg-disfigure.cxx b/bpkg/pkg-disfigure.cxx
index 9347bbc..2239314 100644
--- a/bpkg/pkg-disfigure.cxx
+++ b/bpkg/pkg-disfigure.cxx
@@ -15,11 +15,12 @@ using namespace butl;
namespace bpkg
{
void
- pkg_disfigure (const dir_path& c,
- const common_options& o,
+ pkg_disfigure (const common_options& o,
+ database& db,
transaction& t,
const shared_ptr<selected_package>& p,
bool clean,
+ bool disfigure,
bool simulate)
{
assert (p->state == package_state::configured ||
@@ -29,28 +30,30 @@ namespace bpkg
l4 ([&]{trace << *p;});
- database& db (t.database ());
tracer_guard tg (db, trace);
// Check that we have no dependents.
//
if (p->state == package_state::configured)
{
- using query = query<package_dependent>;
-
- auto r (db.query<package_dependent> (query::name == p->name));
-
- if (!r.empty ())
+ diag_record dr;
+ for (database& ddb: db.dependent_configs ())
{
- diag_record dr;
- dr << fail << "package " << p->name << " still has dependents:";
+ auto r (query_dependents (ddb, p->name, db));
- for (const package_dependent& pd: r)
+ if (!r.empty ())
{
- dr << info << "package " << pd.name;
+ if (dr.empty ())
+ dr << fail << "package " << p->name << db << " still has "
+ << "dependents:";
+
+ for (const package_dependent& pd: r)
+ {
+ dr << info << "package " << pd.name << ddb;
- if (pd.constraint)
- dr << " on " << p->name << " " << *pd.constraint;
+ if (pd.constraint)
+ dr << " on " << p->name << " " << *pd.constraint;
+ }
}
}
}
@@ -69,14 +72,19 @@ namespace bpkg
// Since we are no longer configured, clear the prerequisites list.
//
p->prerequisites.clear ();
+ p->dependency_alternatives.clear ();
+
+ // Mark the section as loaded, so dependency alternatives are updated.
+ //
+ p->dependency_alternatives_section.load ();
assert (p->src_root); // Must be set since unpacked.
assert (p->out_root); // Must be set since configured.
if (!simulate)
{
- dir_path src_root (p->effective_src_root (c));
- dir_path out_root (p->effective_out_root (c));
+ dir_path src_root (p->effective_src_root (db.config_orig));
+ dir_path out_root (p->effective_out_root (db.config_orig));
l4 ([&]{trace << "src_root: " << src_root << ", "
<< "out_root: " << out_root;});
@@ -92,9 +100,13 @@ namespace bpkg
if (p->state == package_state::configured)
{
if (clean)
- bspec = "clean('" + rep + "') ";
+ bspec = "clean('" + rep + "')";
- bspec += "disfigure('" + rep + "')";
+ if (disfigure)
+ {
+ bspec += (bspec.empty () ? "" : " ");
+ bspec += "disfigure('" + rep + "')";
+ }
}
else
{
@@ -105,69 +117,77 @@ namespace bpkg
if (src_root == out_root)
bspec = "disfigure('" + rep + "')";
else
- bspec = "disfigure('" + src_root.representation () + "'@'" +
- rep + "')";
- }
+ bspec = "disfigure('" + src_root.representation () + "'@'" + rep +
+ "')";
- l4 ([&]{trace << "buildspec: " << bspec;});
+ disfigure = true; // Make sure the flag matches the behavior.
+ }
- // Disfigure.
+ // Clean and/or disfigure.
//
+ if (!bspec.empty () && exists (out_root))
try
{
- if (exists (out_root))
+ l4 ([&]{trace << "buildspec: " << bspec;});
+
+ // Note that for external packages out_root is only the output
+ // directory. It is also possible that the buildfiles in the source
+ // directory have changed in a way that they don't clean everything.
+ // So in this case we just remove the output directory manually rather
+ // then running 'b clean disfigure'.
+ //
+ // It may also happen that we cannot disfigure the external package'
+ // output directory (the source directory have moved, etc.). If that's
+ // the case, then we fallback to the output directory removal.
+ //
+ if (p->external ())
{
- // Note that for external packages this is just the output
- // directory. It is also possible that the buildfiles in the source
- // directory have changed in a way that they don't clean everything.
- // So in this case we just remove the output directory manually
- // rather then running 'b clean disfigure'.
+ // clean disfigure
//
- // It may also happen that we can not disfigure the external
- // package' output directory (the source directory have moved, etc.).
- // If that's the case, then we fallback to the output directory
- // removal.
+ // true true -- wipe the directory
+ // true false -- try to clean, ignore if failed
+ // false true -- try to disfigure, fallback to wipe if failed
+ // false false -- never get here (bspec is empty)
//
- if (p->external ())
+
+ if (!clean || !disfigure)
{
- if (!clean)
+ auto_fd dev_null (open_null ());
+
+ // Redirect stderr to /dev/null. Note that we don't expect
+ // anything to be written to stdout.
+ //
+ process pr (start_b (o,
+ 1 /* stdout */,
+ dev_null /* stderr */,
+ verb_b::quiet,
+ bspec));
+
+ // If the disfigure meta-operation failed then we report the
+ // abnormal termination and fallback to the output directory
+ // removal otherwise.
+ //
+ if (!pr.wait ())
{
- auto_fd dev_null (open_null ());
-
- // Redirect stderr to /dev/null. Note that we don't expect
- // anything to be written to stdout.
- //
- process pr (start_b (o,
- 1 /* stdout */, dev_null /* stderr */,
- verb_b::quiet,
- bspec));
-
- // If the disfigure meta-operation failed then we report the
- // abnormal termination and fallback to the output directory
- // removal otherwise.
- //
- if (!pr.wait ())
- {
- const process_exit& e (*pr.exit);
-
- if (!e.normal ())
- fail << "process " << name_b (o) << " " << e;
-
- clean = true;
- }
- }
+ const process_exit& e (*pr.exit);
+
+ if (!e.normal ())
+ fail << "process " << name_b (o) << " " << e;
- if (clean)
- rm_r (out_root);
+ clean = true;
+ }
}
- else
- run_b (o, verb_b::quiet, bspec);
+
+ if (clean && disfigure)
+ rm_r (out_root);
}
+ else
+ run_b (o, verb_b::quiet, bspec);
// Make sure the out directory is gone unless it is the same as src,
- // or we didn't clean it.
+ // or we didn't clean or disfigure it.
//
- if (out_root != src_root && clean && exists (out_root))
+ if (out_root != src_root && clean && disfigure && exists (out_root))
fail << "package output directory " << out_root << " still exists";
}
catch (const failed&)
@@ -179,10 +199,16 @@ namespace bpkg
db.update (p);
t.commit ();
- info << "package " << p->name << " is now broken; "
+ info << "package " << p->name << db << " is now broken; "
<< "use 'pkg-purge' to remove";
throw;
}
+
+ if (disfigure)
+ {
+ p->config_variables.clear ();
+ p->config_checksum.clear ();
+ }
}
p->out_root = nullopt;
@@ -207,7 +233,7 @@ namespace bpkg
package_name n (parse_package_name (args.next (),
false /* allow_version */));
- database db (open (c, trace));
+ database db (c, trace, true /* pre_attach */);
transaction t (db);
shared_ptr<selected_package> p (db.find<selected_package> (n));
@@ -221,7 +247,11 @@ namespace bpkg
// Commits the transaction.
//
- pkg_disfigure (c, o, t, p, !o.keep_out (), false /* simulate */);
+ pkg_disfigure (o, db, t,
+ p,
+ !o.keep_out () /* clean */,
+ !o.keep_config () /* disfigure */,
+ false /* simulate */);
assert (p->state == package_state::unpacked ||
p->state == package_state::transient);
diff --git a/bpkg/pkg-disfigure.hxx b/bpkg/pkg-disfigure.hxx
index 5121050..fab56a0 100644
--- a/bpkg/pkg-disfigure.hxx
+++ b/bpkg/pkg-disfigure.hxx
@@ -15,18 +15,19 @@ namespace bpkg
int
pkg_disfigure (const pkg_disfigure_options&, cli::scanner& args);
- // Disfigure the package, update its state, and commit the
- // transaction. If the package state is broken, then this
- // is taken to mean it hasn't been successfully configured
- // and no clean prior to disfigure is necessary (or possible,
- // for that matter).
+ // Disfigure the package, update its state, and commit the transaction. If
+ // the package state is broken, then this is taken to mean it hasn't been
+ // successfully configured and no clean prior to disfigure is necessary (or
+ // possible, for that matter). If disfigure is false, then don't actually
+ // disfigure the package in the build system sense.
//
void
- pkg_disfigure (const dir_path& configuration,
- const common_options&,
+ pkg_disfigure (const common_options&,
+ database&,
transaction&,
const shared_ptr<selected_package>&,
bool clean,
+ bool disfigure,
bool simulate);
}
diff --git a/bpkg/pkg-drop.cli b/bpkg/pkg-drop.cli
index ac282d6..75ee04d 100644
--- a/bpkg/pkg-drop.cli
+++ b/bpkg/pkg-drop.cli
@@ -14,17 +14,21 @@ namespace bpkg
"\h|SYNOPSIS|
- \c{\b{bpkg pkg-drop}|\b{drop} [<options>] <pkg>...}
+ \c{\b{bpkg pkg-drop}|\b{drop} [<options>] <pkg>...\n
+ \b{bpkg pkg-drop}|\b{drop} [<options>] \b{--all}|\b{-a}\n
+ \b{bpkg pkg-drop}|\b{drop} [<options>] (\b{--all-pattern} <pattern>)...}
\h|DESCRIPTION|
- The \cb{pkg-drop} command drops one or more packages from the
- configuration. If the packages being dropped still have dependent
- packages, then those will have to be dropped as well and you will be
- prompted to confirm. And if the packages being dropped have dependency
- packages that would otherwise no longer be used, then they will be
- dropped as well unless the \c{\b{--keep-unused}|\b{-K}} option is
- specified.
+ The \cb{pkg-drop} command drops from the configuration the specified
+ packages (the first form), all the held packages (the second form, see
+ \l{bpkg-pkg-status(1)}), or all the held packages that match any of the
+ specified wildcard patterns (the third form). If the packages being
+ dropped still have dependent packages, then those will have to be dropped
+ as well and you will be prompted to confirm. And if the packages being
+ dropped have dependency packages that would otherwise no longer be used,
+ then they will be dropped as well unless the \c{\b{--keep-unused}|\b{-K}}
+ option is specified.
The \cb{pkg-drop} command also supports several options (described below)
that allow you to control the amount of work that will be done."
@@ -34,6 +38,19 @@ namespace bpkg
{
"\h|PKG-DROP OPTIONS|"
+ bool --all|-a
+ {
+ "Drop all held packages."
+ }
+
+ strings --all-pattern
+ {
+ "<pattern>",
+ "Drop held packages that match the specified wildcard pattern. Repeat
+ this option to match multiple patterns. Note that you may need to quote
+ the pattern to prevent expansion by your shell."
+ }
+
bool --yes|-y
{
"Assume the answer to all prompts is \cb{yes}. Note that this option
@@ -64,6 +81,13 @@ namespace bpkg
"Issue an error if attempting to drop dependent packages."
}
+ uint16_t --dependent-exit
+ {
+ "<code>",
+ "Silently exit with the specified error code if attempting to drop
+ dependent packages."
+ }
+
bool --disfigure-only
{
"Disfigure all the packages but don't purge."
diff --git a/bpkg/pkg-drop.cxx b/bpkg/pkg-drop.cxx
index 6ea6769..d8fa4ea 100644
--- a/bpkg/pkg-drop.cxx
+++ b/bpkg/pkg-drop.cxx
@@ -7,6 +7,8 @@
#include <list>
#include <iostream> // cout
+#include <libbutl/path-pattern.hxx>
+
#include <bpkg/package.hxx>
#include <bpkg/package-odb.hxx>
#include <bpkg/database.hxx>
@@ -33,6 +35,7 @@ namespace bpkg
struct drop_package
{
+ database& db;
shared_ptr<selected_package> package;
drop_reason reason;
};
@@ -41,45 +44,50 @@ namespace bpkg
//
struct dependent_name
{
+ database& db;
package_name name;
+ database& prq_db;
package_name prq_name; // Prerequisite package name.
};
using dependent_names = vector<dependent_name>;
- // A "dependency-ordered" list of packages and their prerequisites.
- // That is, every package on the list only possibly depending on the
- // ones after it. In a nutshell, the usage is as follows: we first add
- // the packages specified by the user (the "user selection"). We then
- // collect all the dependent packages of the user selection, if any.
- // These will either have to be dropped as well or we cannot continue.
- // If the user gave the go ahead to drop the dependents, then, for our
- // purposes, this list of dependents can from now own be treated as if
- // it was a part of the user selection. The next step is to collect all
- // the non-held prerequisites of the user selection with the goal of
- // figuring out which ones will no longer be needed and offering to
- // drop them as well. This part is a bit tricky and has to be done in
- // three steps: We first collect all the prerequisites that we could
- // possibly be dropping. We then order all the packages. And, finally,
- // we filter out prerequisites that we cannot drop. See the comment to
- // the call to collect_prerequisites() for details on why it has to be
- // done this way.
+ // A "dependency-ordered" list of packages and their prerequisites. That is,
+ // every package on the list only possibly depending on the ones after it.
+ // In a nutshell, the usage is as follows: we first add the packages
+ // specified by the user (the "user selection"). We then collect all the
+ // dependent packages of the user selection, if any. These will either have
+ // to be dropped as well or we cannot continue and need to either issue
+ // diagnostics and fail or exit with the specified (via --dependent-exit)
+ // code. If the user gave the go ahead to drop the dependents, then, for our
+ // purposes, this list of dependents can from now own be treated as if it
+ // was a part of the user selection. The next step is to collect all the
+ // non-held prerequisites of the user selection with the goal of figuring
+ // out which ones will no longer be needed and offering to drop them as
+ // well. This part is a bit tricky and has to be done in three steps: We
+ // first collect all the prerequisites that we could possibly be dropping.
+ // We then order all the packages. And, finally, we filter out prerequisites
+ // that we cannot drop. See the comment to the call to
+ // collect_prerequisites() for details on why it has to be done this way.
//
struct drop_packages: list<reference_wrapper<drop_package>>
{
// Collect a package to be dropped, by default, as a user selection.
//
bool
- collect (shared_ptr<selected_package> p, drop_reason r = drop_reason::user)
+ collect (database& db,
+ shared_ptr<selected_package> p,
+ drop_reason r = drop_reason::user)
{
package_name n (p->name); // Because of move(p) below.
- return map_.emplace (move (n), data_type {end (), {move (p), r}}).second;
+ return map_.emplace (package_key {db, move (n)},
+ data_type {end (), {db, move (p), r}}).second;
}
- // Collect all the dependets of the user selection returning the list
+ // Collect all the dependents of the user selection returning the list
// of their names. Dependents of dependents are collected recursively.
//
dependent_names
- collect_dependents (database& db)
+ collect_dependents ()
{
dependent_names dns;
@@ -91,7 +99,7 @@ namespace bpkg
//
if (dp.reason != drop_reason::dependent &&
dp.package->state == package_state::configured)
- collect_dependents (db, dns, dp.package);
+ collect_dependents (pr.first.db, dp.package, dns);
}
return dns;
@@ -99,21 +107,22 @@ namespace bpkg
void
collect_dependents (database& db,
- dependent_names& dns,
- const shared_ptr<selected_package>& p)
+ const shared_ptr<selected_package>& p,
+ dependent_names& dns)
{
- using query = query<package_dependent>;
-
- for (auto& pd: db.query<package_dependent> (query::name == p->name))
+ for (database& ddb: db.dependent_configs ())
{
- const package_name& dn (pd.name);
-
- if (map_.find (dn) == map_.end ())
+ for (auto& pd: query_dependents_cache (ddb, p->name, db))
{
- shared_ptr<selected_package> dp (db.load<selected_package> (dn));
- dns.push_back (dependent_name {dn, p->name});
- collect (dp, drop_reason::dependent);
- collect_dependents (db, dns, dp);
+ const package_name& dn (pd.name);
+
+ if (map_.find (ddb, dn) == map_.end ())
+ {
+ shared_ptr<selected_package> dp (ddb.load<selected_package> (dn));
+ dns.push_back (dependent_name {ddb, dn, db, p->name});
+ collect (ddb, dp, drop_reason::dependent);
+ collect_dependents (ddb, dp, dns);
+ }
}
}
}
@@ -123,7 +132,7 @@ namespace bpkg
// are collected recursively.
//
bool
- collect_prerequisites (database& db)
+ collect_prerequisites ()
{
bool r (false);
@@ -136,29 +145,30 @@ namespace bpkg
if ((dp.reason == drop_reason::user ||
dp.reason == drop_reason::dependent) &&
dp.package->state == package_state::configured)
- r = collect_prerequisites (db, dp.package) || r;
+ r = collect_prerequisites (dp.package) || r;
}
return r;
}
bool
- collect_prerequisites (database& db, const shared_ptr<selected_package>& p)
+ collect_prerequisites (const shared_ptr<selected_package>& p)
{
bool r (false);
for (const auto& pair: p->prerequisites)
{
const lazy_shared_ptr<selected_package>& lpp (pair.first);
+ database& pdb (lpp.database ());
- if (map_.find (lpp.object_id ()) == map_.end ())
+ if (map_.find (pdb, lpp.object_id ()) == map_.end ())
{
shared_ptr<selected_package> pp (lpp.load ());
if (!pp->hold_package) // Prune held packages.
{
- collect (pp, drop_reason::prerequisite);
- collect_prerequisites (db, pp);
+ collect (pdb, pp, drop_reason::prerequisite);
+ collect_prerequisites (pp);
r = true;
}
}
@@ -171,11 +181,11 @@ namespace bpkg
// returning its positions.
//
iterator
- order (const package_name& name)
+ order (database& db, const package_name& name)
{
// Every package that we order should have already been collected.
//
- auto mi (map_.find (name));
+ auto mi (map_.find (db, name));
assert (mi != map_.end ());
// If this package is already in the list, then that would also
@@ -214,13 +224,14 @@ namespace bpkg
{
for (const auto& pair: p->prerequisites)
{
+ database& pdb (pair.first.database ());
const package_name& pn (pair.first.object_id ());
// The prerequisites may not necessarily be in the map (e.g.,
// a held package that we prunned).
//
- if (map_.find (pn) != map_.end ())
- update (order (pn));
+ if (map_.find (pdb, pn) != map_.end ())
+ update (order (pdb, pn));
}
}
@@ -231,7 +242,7 @@ namespace bpkg
// true if any remain.
//
bool
- filter_prerequisites (database& db)
+ filter_prerequisites ()
{
bool r (false);
@@ -244,27 +255,32 @@ namespace bpkg
if (dp.reason == drop_reason::prerequisite)
{
const shared_ptr<selected_package>& p (dp.package);
+ database& db (dp.db);
bool keep (true);
// Get our dependents (which, BTW, could only have been before us
// on the list). If they are all in the map, then we can be dropped.
//
- using query = query<package_dependent>;
-
- for (auto& pd: db.query<package_dependent> (query::name == p->name))
+ for (database& ddb: db.dependent_configs ())
{
- if (map_.find (pd.name) == map_.end ())
+ for (auto& pd: query_dependents (ddb, p->name, db))
{
- keep = false;
- break;
+ if (map_.find (ddb, pd.name) == map_.end ())
+ {
+ keep = false;
+ break;
+ }
}
+
+ if (!keep)
+ break;
}
if (!keep)
{
i = erase (i);
- map_.erase (p->name);
+ map_.erase (package_key {db, p->name});
continue;
}
@@ -284,15 +300,24 @@ namespace bpkg
drop_package package;
};
- map<package_name, data_type> map_;
+ class package_map: public map<package_key, data_type>
+ {
+ public:
+ using base_type = map<package_key, data_type>;
+
+ iterator
+ find (database& db, const package_name& pn)
+ {
+ return base_type::find (package_key {db, pn});
+ }
+ };
+ package_map map_;
};
// Drop ordered list of packages.
//
static int
- pkg_drop (const dir_path& c,
- const pkg_drop_options& o,
- database& db,
+ pkg_drop (const pkg_drop_options& o,
const drop_packages& pkgs,
bool drop_prq,
bool need_prompt)
@@ -330,11 +355,11 @@ namespace bpkg
}
if (o.print_only ())
- cout << "drop " << p->name << endl;
+ cout << "drop " << p->name << dp.db << endl;
else if (verb)
// Print indented for better visual separation.
//
- text << " drop " << p->name;
+ text << " drop " << p->name << dp.db;
}
if (o.print_only ())
@@ -347,24 +372,50 @@ namespace bpkg
!(o.yes () || !need_prompt || yn_prompt ("continue? [Y/n]", 'y')))
return 1;
+ bool result (verb && !o.no_result ());
+ bool progress (!result &&
+ ((verb == 1 && !o.no_progress () && stderr_term) ||
+ o.progress ()));
+
+ size_t prog_i, prog_n, prog_percent;
+
// All that's left to do is first disfigure configured packages and
// then purge all of them. We do both left to right (i.e., from more
// dependent to less dependent). For disfigure this order is required.
// For purge, it will be the order closest to the one specified by the
// user.
//
- for (const drop_package& dp: pkgs)
+ // Note: similar code in pkg-build.
+ //
+ auto disfigure_pred = [drop_prq] (const drop_package& dp)
{
// Skip prerequisites if we weren't instructed to drop them.
//
if (dp.reason == drop_reason::prerequisite && !drop_prq)
- continue;
+ return false;
- const shared_ptr<selected_package>& p (dp.package);
+ if (dp.package->state != package_state::configured)
+ return false;
+
+ return true;
+ };
+
+ if (progress)
+ {
+ prog_i = 0;
+ prog_n = static_cast<size_t> (count_if (pkgs.begin (), pkgs.end (),
+ disfigure_pred));
+ prog_percent = 100;
+ }
- if (p->state != package_state::configured)
+ for (const drop_package& dp: pkgs)
+ {
+ if (!disfigure_pred (dp))
continue;
+ database& db (dp.db);
+ const shared_ptr<selected_package>& p (dp.package);
+
// Each package is disfigured in its own transaction, so that we always
// leave the configuration in a valid state.
//
@@ -372,15 +423,46 @@ namespace bpkg
// Commits the transaction.
//
- pkg_disfigure (c, o, t, p, true /* clean */, false /* simulate */);
+ pkg_disfigure (o, db, t,
+ p,
+ true /* clean */,
+ true /* disfigure */,
+ false /* simulate */);
assert (p->state == package_state::unpacked ||
p->state == package_state::transient);
- if (verb && !o.no_result ())
- text << (p->state == package_state::transient
- ? "purged "
- : "disfigured ") << p->name;
+ if (result || progress)
+ {
+ const char* what (p->state == package_state::transient
+ ? "purged"
+ : "disfigured");
+ if (result)
+ text << what << ' ' << p->name << db;
+ else if (progress)
+ {
+ size_t p ((++prog_i * 100) / prog_n);
+
+ if (prog_percent != p)
+ {
+ prog_percent = p;
+
+ diag_progress_lock pl;
+ diag_progress = ' ';
+ diag_progress += to_string (p);
+ diag_progress += "% of packages ";
+ diag_progress += what;
+ }
+ }
+ }
+ }
+
+ // Clear the progress if shown.
+ //
+ if (progress)
+ {
+ diag_progress_lock pl;
+ diag_progress.clear ();
}
if (o.disfigure_only ())
@@ -403,14 +485,16 @@ namespace bpkg
assert (p->state == package_state::fetched ||
p->state == package_state::unpacked);
+ database& db (dp.db);
+
transaction t (db);
// Commits the transaction, p is now transient.
//
- pkg_purge (c, t, p, false /* simulate */);
+ pkg_purge (db, t, p, false /* simulate */);
- if (verb && !o.no_result ())
- text << "purged " << p->name;
+ if (result)
+ text << "purged " << p->name << db;
}
return 0;
@@ -424,19 +508,52 @@ namespace bpkg
const dir_path& c (o.directory ());
l4 ([&]{trace << "configuration: " << c;});
- if (o.yes () && o.no ())
- fail << "both --yes|-y and --no|-n specified";
+ {
+ diag_record dr;
+
+ if (o.yes () && o.no ())
+ {
+ dr << fail << "both --yes|-y and --no|-n specified";
+ }
+ else if (o.drop_dependent () && o.keep_dependent ())
+ {
+ dr << fail << "both --drop-dependent and --keep-dependent|-K "
+ << "specified";
+ }
+ else if (o.drop_dependent () && o.dependent_exit_specified ())
+ {
+ dr << fail << "both --drop-dependent and --dependent-exit "
+ << "specified";
+ }
+ else if (o.keep_dependent () && o.dependent_exit_specified ())
+ {
+ dr << fail << "both --keep-dependent|-K and --dependent-exit "
+ << "specified";
+ }
+ else if (o.all ())
+ {
+ if (o.all_pattern_specified ())
+ dr << fail << "both --all|-a and --all-pattern specified";
+
+ if (args.more ())
+ dr << fail << "both --all|-a and package argument specified";
+ }
+ else if (o.all_pattern_specified ())
+ {
+ if (args.more ())
+ dr << fail << "both --all-pattern and package argument specified";
+ }
+ else if (!args.more ())
+ {
+ dr << fail << "package name argument expected";
+ }
- if (o.drop_dependent () && o.keep_dependent ())
- fail << "both --drop-dependent and --keep-dependent|-K "
- << "specified" <<
- info << "run 'bpkg help pkg-drop' for more information";
+ if (!dr.empty ())
+ dr << info << "run 'bpkg help pkg-drop' for more information";
+ }
- if (!args.more ())
- fail << "package name argument expected" <<
- info << "run 'bpkg help pkg-drop' for more information";
- database db (open (c, trace));
+ database db (c, trace, true /* pre_attach */);
// Note that the session spans all our transactions. The idea here is
// that drop_package objects in the drop_packages list below will be
@@ -464,33 +581,79 @@ namespace bpkg
// by the user.
//
vector<package_name> names;
- while (args.more ())
- {
- package_name n (parse_package_name (args.next (),
- false /* allow_version */));
-
- l4 ([&]{trace << "package " << n;});
-
- shared_ptr<selected_package> p (db.find<selected_package> (n));
- if (p == nullptr)
- fail << "package " << n << " does not exist in configuration " << c;
+ auto add = [&names, &pkgs, &db] (shared_ptr<selected_package>&& p)
+ {
+ package_name n (p->name);
if (p->state == package_state::broken)
fail << "unable to drop broken package " << n <<
info << "use 'pkg-purge --force' to remove";
- if (pkgs.collect (move (p)))
+ if (pkgs.collect (db, move (p)))
names.push_back (move (n));
+ };
+
+ if (o.all () || o.all_pattern_specified ())
+ {
+ using query = query<selected_package>;
+
+ for (shared_ptr<selected_package> p:
+ pointer_result (
+ db.query<selected_package> (query::hold_package)))
+ {
+ l4 ([&]{trace << *p;});
+
+ if (o.all_pattern_specified ())
+ {
+ for (const string& pat: o.all_pattern ())
+ {
+ if (path_match (p->name.string (), pat))
+ {
+ add (move (p));
+ break;
+ }
+ }
+ }
+ else // --all
+ add (move (p));
+ }
+
+ if (names.empty ())
+ info << "nothing to drop";
+ }
+ else
+ {
+ while (args.more ())
+ {
+ package_name n (parse_package_name (args.next (),
+ false /* allow_version */));
+
+ l4 ([&]{trace << "package " << n;});
+
+ shared_ptr<selected_package> p (db.find<selected_package> (n));
+
+ if (p == nullptr)
+ fail << "package " << n << " does not exist in configuration " << c;
+
+ add (move (p));
+ }
}
// The next step is to see if there are any dependents that are not
// already on the list. We will either have to drop those as well or
- // abort.
+ // issue diagnostics and fail or silently indicate that with an exit
+ // code.
//
- dependent_names dnames (pkgs.collect_dependents (db));
+ dependent_names dnames (pkgs.collect_dependents ());
if (!dnames.empty () && !o.drop_dependent ())
{
+ if (o.dependent_exit_specified ())
+ {
+ t.commit ();
+ return o.dependent_exit ();
+ }
+
{
diag_record dr;
@@ -503,7 +666,8 @@ namespace bpkg
<< "as well:";
for (const dependent_name& dn: dnames)
- dr << text << dn.name << " (requires " << dn.prq_name << ")";
+ dr << text << dn.name << dn.db << " (requires " << dn.prq_name
+ << dn.prq_db << ")";
}
if (o.yes ())
@@ -526,7 +690,7 @@ namespace bpkg
// on the latter and, if that's the case and "more" cannot be dropped,
// then neither can "less".
//
- pkgs.collect_prerequisites (db);
+ pkgs.collect_prerequisites ();
// Now that we have collected all the packages we could possibly be
// dropping, arrange them in the "dependency order", that is, with
@@ -540,17 +704,17 @@ namespace bpkg
// on which it depends.
//
for (const package_name& n: names)
- pkgs.order (n);
+ pkgs.order (db, n);
for (const dependent_name& dn: dnames)
- pkgs.order (dn.name);
+ pkgs.order (dn.db, dn.name);
// Filter out prerequisites that we cannot possibly drop (e.g., they
// have dependents other than the ones we are dropping). If there are
// some that we can drop, ask the user for confirmation.
//
- if (pkgs.filter_prerequisites (db) &&
- !o.keep_unused () &&
+ if (pkgs.filter_prerequisites () &&
+ !o.keep_unused () &&
!(drop_prq = o.yes ()) && !o.no ())
{
{
@@ -563,7 +727,7 @@ namespace bpkg
{
if (dp.reason == drop_reason::prerequisite)
dr << text << (dp.package->system () ? "sys:" : "")
- << dp.package->name;
+ << dp.package->name << dp.db;
}
}
@@ -576,6 +740,6 @@ namespace bpkg
t.commit ();
}
- return pkg_drop (c, o, db, pkgs, drop_prq, need_prompt);
+ return pkg_drop (o, pkgs, drop_prq, need_prompt);
}
}
diff --git a/bpkg/pkg-fetch.cxx b/bpkg/pkg-fetch.cxx
index 24883c5..837c968 100644
--- a/bpkg/pkg-fetch.cxx
+++ b/bpkg/pkg-fetch.cxx
@@ -10,6 +10,7 @@
#include <bpkg/package-odb.hxx>
#include <bpkg/checksum.hxx>
#include <bpkg/database.hxx>
+#include <bpkg/rep-mask.hxx>
#include <bpkg/diagnostics.hxx>
#include <bpkg/manifest-utility.hxx>
@@ -24,7 +25,7 @@ namespace bpkg
// Return the selected package object which may replace the existing one.
//
static shared_ptr<selected_package>
- pkg_fetch (dir_path c,
+ pkg_fetch (database& db,
transaction& t,
package_name n,
version v,
@@ -35,27 +36,32 @@ namespace bpkg
{
tracer trace ("pkg_fetch");
- database& db (t.database ());
tracer_guard tg (db, trace);
- // Make the archive and configuration paths absolute and normalized.
- // If the archive is inside the configuration, use the relative path.
- // This way we can move the configuration around.
+ // Make the archive path absolute and normalized. If the archive is
+ // inside the configuration, use the relative path. This way we can move
+ // the configuration around.
//
- normalize (c, "configuration");
normalize (a, "archive");
- if (a.sub (c))
- a = a.leaf (c);
-
+ // Only purge the existing archive if its path differs from the new path.
+ //
shared_ptr<selected_package> p (db.find<selected_package> (n));
+
+ bool purge_archive (p != nullptr &&
+ p->archive &&
+ p->effective_archive (db.config) != a);
+
+ if (a.sub (db.config))
+ a = a.leaf (db.config);
+
if (p != nullptr)
{
// Clean up the source directory and archive of the package we are
- // replacing. Once this is done, there is no going back. If things
- // go badly, we can't simply abort the transaction.
+ // replacing. Once this is done, there is no going back. If things go
+ // badly, we can't simply abort the transaction.
//
- pkg_purge_fs (c, t, p, simulate);
+ pkg_purge_fs (db, t, p, simulate, purge_archive);
// Note that if the package name spelling changed then we need to update
// it, to make sure that the subsequent commands don't fail and the
@@ -96,6 +102,7 @@ namespace bpkg
nullopt, // No source directory yet.
false,
nullopt, // No manifest checksum.
+ nullopt, // No buildfiles checksum.
nullopt, // No output directory yet.
{}}); // No prerequisites captured yet.
@@ -113,14 +120,13 @@ namespace bpkg
// or fetching one.
//
static void
- pkg_fetch_check (const dir_path& c,
- transaction& t,
+ pkg_fetch_check (database& db,
+ transaction&,
const package_name& n,
bool replace)
{
tracer trace ("pkg_fetch_check");
- database& db (t.database ());
tracer_guard tg (db, trace);
if (shared_ptr<selected_package> p = db.find<selected_package> (n))
@@ -131,6 +137,7 @@ namespace bpkg
if (!replace || !s)
{
diag_record dr (fail);
+ const dir_path& c (db.config_orig);
dr << "package " << n << " already exists in configuration " << c <<
info << "version: " << p->version_string ()
@@ -145,7 +152,7 @@ namespace bpkg
shared_ptr<selected_package>
pkg_fetch (const common_options& co,
- const dir_path& c,
+ database& db,
transaction& t,
path a,
bool replace,
@@ -164,18 +171,20 @@ namespace bpkg
package_manifest m (pkg_verify (co,
a,
true /* ignore_unknown */,
- false /* expand_values */));
+ false /* ignore_toolchain */,
+ false /* expand_values */,
+ false /* load_buildfiles */));
l4 ([&]{trace << m.name << " " << m.version;});
// Check/diagnose an already existing package.
//
- pkg_fetch_check (c, t, m.name, replace);
+ pkg_fetch_check (db, t, m.name, replace);
// Use the special root repository fragment as the repository fragment of
// this package.
//
- return pkg_fetch (c,
+ return pkg_fetch (db,
t,
move (m.name),
move (m.version),
@@ -187,23 +196,25 @@ namespace bpkg
shared_ptr<selected_package>
pkg_fetch (const common_options& co,
- const dir_path& c,
+ database& pdb,
+ database& rdb,
transaction& t,
package_name n,
version v,
bool replace,
bool simulate)
{
+ assert (session::has_current ());
+
tracer trace ("pkg_fetch");
- database& db (t.database ());
- tracer_guard tg (db, trace);
+ tracer_guard tg (pdb, trace); // NOTE: sets tracer for the whole cluster.
// Check/diagnose an already existing package.
//
- pkg_fetch_check (c, t, n, replace);
+ pkg_fetch_check (pdb, t, n, replace);
- check_any_available (c, t);
+ check_any_available (rdb, t);
// Note that here we compare including the revision (unlike, say in
// pkg-status). Which means one cannot just specify 1.0.0 and get 1.0.0+1
@@ -211,7 +222,7 @@ namespace bpkg
// a low-level command where some extra precision doesn't hurt.
//
shared_ptr<available_package> ap (
- db.find<available_package> (available_package_id (n, v)));
+ rdb.find<available_package> (available_package_id (n, v)));
if (ap == nullptr)
fail << "package " << n << " " << v << " is not available";
@@ -223,14 +234,17 @@ namespace bpkg
for (const package_location& l: ap->locations)
{
- const repository_location& rl (l.repository_fragment.load ()->location);
-
- if (rl.archive_based () && (pl == nullptr || rl.local ()))
+ if (!rep_masked_fragment (l.repository_fragment))
{
- pl = &l;
+ const repository_location& rl (l.repository_fragment.load ()->location);
+
+ if (rl.archive_based () && (pl == nullptr || rl.local ()))
+ {
+ pl = &l;
- if (rl.local ())
- break;
+ if (rl.local ())
+ break;
+ }
}
}
@@ -243,10 +257,76 @@ namespace bpkg
<< "from " << pl->repository_fragment->name;
auto_rmfile arm;
- path a (c / pl->location.leaf ());
+ path an (pl->location.leaf ());
+ path a (pdb.config_orig / an);
+
+ // Note that in the replace mode we first fetch the new package version
+ // archive and then update the existing selected package object, dropping
+ // the previous package version archive, if present. This way we, in
+ // particular, keep the existing selected package/archive intact if the
+ // fetch operation fails. However, this approach requires to handle
+ // re-fetching (potentially from a different repository) of the same
+ // package version specially.
+ //
+ // Specifically, if we need to overwrite the package archive file, then we
+ // stash the existing archive in the temporary directory and remove it on
+ // success. On failure, we try to move the stashed archive to the original
+ // place. Failed that either, we mark the package as broken.
+ //
+ // (If you are wondering why don't we instead always fetch into a
+ // temporary file, the answer is Windows, where moving a newly created
+ // file may not succeed because it is being scanned by Windows Defender
+ // or some such.)
+ //
+ auto_rmfile earm;
+ shared_ptr<selected_package> sp;
+
+ auto g (
+ make_exception_guard (
+ [&arm, &a, &earm, &sp, &pdb, &t] ()
+ {
+ // Restore stashed archive.
+ //
+ if (!earm.path.empty () && exists (earm.path))
+ {
+ if (mv (earm.path, a, true /* ignore_error */))
+ {
+ earm.cancel ();
+ arm.cancel (); // Note: may not be armed yet, which is ok.
+ }
+ //
+ // Note: can already be marked as broken by pkg_purge_fs().
+ //
+ else if (sp->state != package_state::broken)
+ {
+ sp->state = package_state::broken;
+ pdb.update (sp);
+ t.commit ();
+
+ // Here we assume that mv() has already issued the diagnostics.
+ //
+ info << "package " << sp->name << pdb << " is now broken; "
+ << "use 'pkg-purge --force' to remove";
+ }
+ }
+ }));
if (!simulate)
{
+ // Stash the existing package archive if it needs to be overwritten (see
+ // above for details).
+ //
+ // Note: compare the archive absolute paths.
+ //
+ if (replace &&
+ (sp = pdb.find<selected_package> (n)) != nullptr &&
+ sp->archive &&
+ sp->effective_archive (pdb.config) == pdb.config / an)
+ {
+ earm = tmp_file (pdb.config_orig, n.string () + '-' + v.string ());
+ mv (a, earm.path);
+ }
+
pkg_fetch_archive (
co, pl->repository_fragment->location, pl->location, a);
@@ -256,20 +336,20 @@ namespace bpkg
//
assert (ap->sha256sum);
- const string& sha256sum (sha256 (co, a));
- if (sha256sum != *ap->sha256sum)
+ const string& cs (sha256sum (co, a));
+ if (cs != *ap->sha256sum)
{
fail << "checksum mismatch for " << n << " " << v <<
info << pl->repository_fragment->name << " has " << *ap->sha256sum <<
- info << "fetched archive has " << sha256sum <<
+ info << "fetched archive has " << cs <<
info << "consider re-fetching package list and trying again" <<
info << "if problem persists, consider reporting this to "
- << "the repository maintainer";
+ << "the repository maintainer";
}
}
shared_ptr<selected_package> p (
- pkg_fetch (c,
+ pkg_fetch (pdb,
t,
move (n),
move (v),
@@ -290,7 +370,7 @@ namespace bpkg
dir_path c (o.directory ());
l4 ([&]{trace << "configuration: " << c;});
- database db (open (c, trace));
+ database db (c, trace, true /* pre_attach */);
transaction t (db);
session s;
@@ -305,7 +385,7 @@ namespace bpkg
info << "run 'bpkg help pkg-fetch' for more information";
p = pkg_fetch (o,
- c,
+ db,
t,
path (args.next ()),
o.replace (),
@@ -327,7 +407,8 @@ namespace bpkg
info << "run 'bpkg help pkg-fetch' for more information";
p = pkg_fetch (o,
- c,
+ db /* pdb */,
+ db /* rdb */,
t,
move (n),
move (v),
diff --git a/bpkg/pkg-fetch.hxx b/bpkg/pkg-fetch.hxx
index e9d753b..8607178 100644
--- a/bpkg/pkg-fetch.hxx
+++ b/bpkg/pkg-fetch.hxx
@@ -23,7 +23,7 @@ namespace bpkg
//
shared_ptr<selected_package>
pkg_fetch (const common_options&,
- const dir_path& configuration,
+ database&,
transaction&,
path archive,
bool replace,
@@ -34,9 +34,15 @@ namespace bpkg
// transaction. Return the selected package object which may replace the
// existing one.
//
+ // Note that both package and repository information configurations need to
+ // be passed.
+ //
+ // Also note that it should be called in session.
+ //
shared_ptr<selected_package>
pkg_fetch (const common_options&,
- const dir_path& configuration,
+ database& pdb,
+ database& rdb,
transaction&,
package_name,
version,
diff --git a/bpkg/pkg-install.hxx b/bpkg/pkg-install.hxx
index 120576a..3f257f0 100644
--- a/bpkg/pkg-install.hxx
+++ b/bpkg/pkg-install.hxx
@@ -5,7 +5,6 @@
#define BPKG_PKG_INSTALL_HXX
#include <bpkg/types.hxx>
-#include <bpkg/forward.hxx> // selected_package
#include <bpkg/utility.hxx>
#include <bpkg/pkg-command.hxx>
@@ -13,6 +12,10 @@
namespace bpkg
{
+ // Note that we disallow installing packages from the host/build2
+ // configurations. The reason for that is that otherwise we can fail, trying
+ // to build a package both for install and normally (as a dependency).
+ //
inline int
pkg_install (const pkg_install_options& o, cli::group_scanner& args)
{
@@ -24,6 +27,7 @@ namespace bpkg
o.all (),
o.all_pattern (),
false /* package_cwd */,
+ false /* allow_host_type */,
args);
}
}
diff --git a/bpkg/pkg-purge.cxx b/bpkg/pkg-purge.cxx
index f6589bb..e031b6a 100644
--- a/bpkg/pkg-purge.cxx
+++ b/bpkg/pkg-purge.cxx
@@ -15,7 +15,7 @@ using namespace butl;
namespace bpkg
{
void
- pkg_purge_fs (const dir_path& c,
+ pkg_purge_fs (database& db,
transaction& t,
const shared_ptr<selected_package>& p,
bool simulate,
@@ -26,9 +26,10 @@ namespace bpkg
assert (p->state == package_state::fetched ||
p->state == package_state::unpacked);
- database& db (t.database ());
tracer_guard tg (db, trace);
+ const dir_path& c (db.config_orig);
+
try
{
if (p->purge_src)
@@ -49,6 +50,7 @@ namespace bpkg
//
p->src_root = nullopt;
p->manifest_checksum = nullopt;
+ p->buildfiles_checksum = nullopt;
if (archive)
{
@@ -56,7 +58,7 @@ namespace bpkg
{
if (!simulate)
{
- path a (p->archive->absolute () ? *p->archive : c / *p->archive);
+ path a (p->effective_archive (c));
if (exists (a))
rm (a);
@@ -76,14 +78,14 @@ namespace bpkg
db.update (p);
t.commit ();
- info << "package " << p->name << " is now broken; "
+ info << "package " << p->name << db << " is now broken; "
<< "use 'pkg-purge --force' to remove";
throw;
}
}
void
- pkg_purge (const dir_path& c,
+ pkg_purge (database& db,
transaction& t,
const shared_ptr<selected_package>& p,
bool simulate)
@@ -93,11 +95,10 @@ namespace bpkg
tracer trace ("pkg_purge");
- database& db (t.database ());
tracer_guard tg (db, trace);
assert (!p->out_root);
- pkg_purge_fs (c, t, p, simulate, true);
+ pkg_purge_fs (db, t, p, simulate, true);
db.erase (p);
t.commit ();
@@ -120,7 +121,7 @@ namespace bpkg
package_name n (parse_package_name (args.next (),
false /* allow_version */));
- database db (open (c, trace));
+ database db (c, trace, true /* pre_attach */);
transaction t (db);
shared_ptr<selected_package> p (db.find<selected_package> (n));
@@ -191,7 +192,7 @@ namespace bpkg
if (p->purge_archive)
{
- path a (p->archive->absolute () ? *p->archive : c / *p->archive);
+ path a (p->effective_archive (c));
if (exists (a))
fail << "archive file of broken package " << n << " still exists" <<
@@ -201,7 +202,7 @@ namespace bpkg
else
{
assert (!p->out_root);
- pkg_purge_fs (c, t, p, false /* simulate */, !o.keep ());
+ pkg_purge_fs (db, t, p, false /* simulate */, !o.keep ());
}
// Finally, update the database state.
diff --git a/bpkg/pkg-purge.hxx b/bpkg/pkg-purge.hxx
index 215e468..ac82bf4 100644
--- a/bpkg/pkg-purge.hxx
+++ b/bpkg/pkg-purge.hxx
@@ -19,7 +19,7 @@ namespace bpkg
// transaction. If this fails, set the package state to broken.
//
void
- pkg_purge (const dir_path& configuration,
+ pkg_purge (database&,
transaction&,
const shared_ptr<selected_package>&,
bool simulate);
@@ -29,7 +29,7 @@ namespace bpkg
// set the package state to broken, commit the transaction, and fail.
//
void
- pkg_purge_fs (const dir_path& configuration,
+ pkg_purge_fs (database&,
transaction&,
const shared_ptr<selected_package>&,
bool simulate,
diff --git a/bpkg/pkg-status.cli b/bpkg/pkg-status.cli
index 24e1dc8..084b7a3 100644
--- a/bpkg/pkg-status.cli
+++ b/bpkg/pkg-status.cli
@@ -25,17 +25,20 @@ namespace bpkg
or, if <ver> is specified, package versions. If no packages were
specified, then \cb{pkg-status} prints the status of all the held
packages (which are the packages that were explicitly built; see
- \l{bpkg-pkg-build(1)}). Additionally, the status of immediate or all
- dependencies of the above packages can be printed by specifying the
- \c{\b{--immediate}|\b{-i}} or \c{\b{--recursive}|\b{-r}} options,
- respectively. Note that the status is written to \cb{stdout}, not
- \cb{stderr}.
-
- The status output format is regular with components separated with
- spaces. Each line starts with the package name (and version, if
- specified) followed by one of the status words listed below. Some of
+ \l{bpkg-pkg-build(1)}). The latter mode can be modified to print the
+ status of all the packages by specifying the \c{\b{--all}|\b{-a}} option.
+ Additionally, the status of immediate or all dependencies of the above
+ packages can be printed by specifying the \c{\b{--immediate}|\b{-i}} or
+ \c{\b{--recursive}|\b{-r}} options, respectively. Note that the status is
+ written to \cb{stdout}, not \cb{stderr}.
+
+ The default output format (see the \cb{--stdout-format} common option) is
+ regular with components separated with spaces. Each line starts with the
+ package name followed by one of the status words listed below. Some of
them can be optionally followed by '\cb{,}' (no spaces) and a sub-status
- word.
+ word. Lines corresponding to dependencies from linked configurations will
+ additionally mention the configuration directory in square brackets after
+ the package name.
\dl|
@@ -81,13 +84,17 @@ namespace bpkg
package may or may not be available from the system and that its version
is unknown.
- Similarly, if only the package name was specified, then the \cb{fetched},
- \cb{unpacked}, \cb{configured}, and \cb{broken} status words are followed
- by the version of the package. If newer versions are available, then the
- package version is followed by the \cb{available} status word and the
- list of newer versions. To instead see a list of all versions, including
- the older ones, specify the \c{\b{--old-available}|\b{-o}} option. In
- this case the currently selected version is printed in '\cb{()}'.
+ The \cb{fetched}, \cb{unpacked}, \cb{configured}, and \cb{broken} status
+ words are followed by the version of the package. If the package version
+ was specified, then the \cb{unknown} status word is also followed by the
+ version.
+
+ If the status is \cb{fetched}, \cb{unpacked}, \cb{configured}, or
+ \cb{broken} and newer versions are available, then the package version is
+ followed by the \cb{available} status word and the list of newer
+ versions. To instead see a list of all versions, including the older
+ ones, specify the \c{\b{--old-available}|\b{-o}} option. In this case the
+ currently selected version is printed in '\cb{()}'.
If the package name was specified with the version, then only the status
(such as, \cb{configured}, \cb{available}, etc.) of this version is
@@ -109,22 +116,22 @@ namespace bpkg
libbar unknown
bpkg status libbar/1.0.0
- libbar/1.0.0 unknown
+ libbar unknown 1.0.0
bpkg status libfoo/1.0.0
- !libfoo/1.0.0 configured !1.0.0
+ !libfoo configured !1.0.0
bpkg status libfoo/1.1.0
- libfoo/1.1.0 available 1.1.0
+ libfoo available 1.1.0
bpkg status --system libfoo/1.1.0
- libfoo/1.1.0 available 1.1.0 sys:1.1.0
+ libfoo available 1.1.0 sys:1.1.0
bpkg status libfoo
!libfoo configured !1.0.0 available 1.1.0 1.1.1
bpkg status libfoo/1.1.1 libbar
- libfoo/1.1.1 available 1.1.1
+ libfoo available 1.1.1
libbar unknown
\
@@ -132,7 +139,7 @@ namespace bpkg
\
bpkg status libfoo/1.0.0
- libfoo/1.0.0 unknown
+ libfoo unknown 1.0.0
bpkg status libfoo
libfoo available 1.1.0 1.1.1
@@ -143,9 +150,93 @@ namespace bpkg
\
bpkg status libfoo
- !libfoo configured,system * available 1.1.0 1.1.1
+ !libfoo configured,system !* available 1.1.0 1.1.1
+ \
+
+ Another example of the status output this time including dependencies:
+
+ \
+ bpkg status -r libbaz
+ !libbaz configured 1.0.0
+ libfoo configured 1.0.0
+ bison [.bpkg/host/] configured 1.0.0
+ libbar configured 2.0.0
\
+ If the output format is \cb{json}, then the output is a JSON array of
+ objects which are the serialized representation of the following C++
+ \cb{struct} \cb{package_status}:
+
+ \
+ struct available_version
+ {
+ string version;
+ bool system;
+ bool dependency;
+ };
+
+ struct package_status
+ {
+ string name;
+ optional<string> configuration;
+ optional<string> constraint;
+ string status;
+ optional<string> sub_status;
+ optional<string> version;
+ bool hold_package;
+ bool hold_version;
+ vector<available_version> available_versions;
+ vector<package_status> dependencies;
+ };
+ \
+
+ For example:
+
+ \
+ [
+ {
+ \"name\": \"hello\",
+ \"status\": \"configured\",
+ \"version\": \"1.0.0\",
+ \"hold_package\": true,
+ \"available_versions\": [
+ {
+ \"version\": \"1.0.1\"
+ },
+ {
+ \"version\": \"2.0.0\"
+ }
+ ],
+ \"dependencies\": [
+ {
+ \"name\": \"libhello\",
+ \"status\": \"configured\",
+ \"version\": \"1.0.2\",
+ }
+ ]
+ }
+ ]
+ \
+
+ See the JSON OUTPUT section in \l{bpkg-common-options(1)} for details on
+ the overall properties of this format and the semantics of the
+ \cb{struct} serialization.
+
+ In \cb{package_status}, the \cb{configuration} member contains the
+ absolute directory of a linked configuration if this package resides in a
+ linked configuration. The \cb{constraint} member is present only if the
+ \cb{--constraint} option is specified. The \cb{version} member is absent
+ if the \cb{status} member is \cb{unknown} or \cb{available} and no
+ package version is specified on the command line. If the \cb{sub_status}
+ member is \cb{system}, then the \cb{version} member can be special
+ \cb{*}. The \cb{dependencies} member is present only if the
+ \cb{--immediate|-i} or \cb{--recursive|-r} options are specified.
+
+ In \cb{available_version}, if the \cb{system} member is \cb{true}, then
+ this version is available from the system, in which case the \cb{version}
+ member can be special \cb{?} or \cb{*}. If the \cb{dependency} member is
+ \cb{true}, then this version is only available as a dependency from
+ prerequisite repositories of other repositories.
"
}
@@ -153,6 +244,16 @@ namespace bpkg
{
"\h|PKG-STATUS OPTIONS|"
+ bool --all|-a
+ {
+ "Print the status of all the packages, not just held."
+ }
+
+ bool --link
+ {
+ "Also print the status of held/all packages from linked configurations."
+ }
+
bool --immediate|-i
{
"Also print the status of immediate dependencies."
diff --git a/bpkg/pkg-status.cxx b/bpkg/pkg-status.cxx
index 655ee8b..56503da 100644
--- a/bpkg/pkg-status.cxx
+++ b/bpkg/pkg-status.cxx
@@ -5,10 +5,13 @@
#include <iostream> // cout
+#include <libbutl/json/serializer.hxx>
+
#include <bpkg/package.hxx>
#include <bpkg/package-odb.hxx>
#include <bpkg/database.hxx>
#include <bpkg/diagnostics.hxx>
+#include <bpkg/package-query.hxx>
#include <bpkg/manifest-utility.hxx>
using namespace std;
@@ -18,6 +21,8 @@ namespace bpkg
{
struct package
{
+ database& pdb; // Package database.
+ database& rdb; // Repository info database.
package_name name;
bpkg::version version; // Empty if unspecified.
shared_ptr<selected_package> selected; // NULL if none selected.
@@ -25,100 +30,147 @@ namespace bpkg
};
using packages = vector<package>;
- // If recursive or immediate is true, then print status for dependencies
- // indented by two spaces.
- //
- static void
- pkg_status (const pkg_status_options& o,
- database& db,
- const packages& pkgs,
- string& indent,
- bool recursive,
- bool immediate)
+ struct available_package_status
{
- tracer trace ("pkg_status");
+ shared_ptr<available_package> package;
+
+ // Can only be built as a dependency.
+ //
+ // True if this package version doesn't belong to the repositories that
+ // were explicitly added to the configuration and their complements,
+ // recursively.
+ //
+ bool dependency;
+ };
- for (const package& p: pkgs)
- {
- l4 ([&]{trace << "package " << p.name << "; version " << p.version;});
+ class available_package_statuses: public vector<available_package_status>
+ {
+ public:
+ // Empty if the package is not available from the system. Can be `?`.
+ //
+ string system_package_version;
+
+ // Can only be built as a dependency.
+ //
+ // True if there are no package versions available from the repositories
+ // that were explicitly added to the configuration and their complements,
+ // recursively.
+ //
+ bool dependency = true;
+ };
- // Can't be both.
- //
- assert (p.version.empty () || !p.constraint);
+ static available_package_statuses
+ pkg_statuses (const pkg_status_options& o, const package& p)
+ {
+ database& rdb (p.rdb);
+ const shared_ptr<selected_package>& s (p.selected);
- const shared_ptr<selected_package>& s (p.selected);
+ available_package_statuses r;
+
+ bool known (false);
+
+ shared_ptr<repository_fragment> root (
+ rdb.load<repository_fragment> (""));
+
+ using query = query<available_package>;
+
+ query q (query::id.name == p.name);
+ {
+ auto qr (rdb.query<available_package> (q));
+ known = !qr.empty ();
+ r.dependency = (filter_one (root, move (qr)).first == nullptr);
+ }
- // Look for available packages.
+ if (known)
+ {
+ // If the user specified the version, then only look for that
+ // specific version (we still do it since there might be other
+ // revisions).
//
- // Some of them are only available to upgrade/downgrade as dependencies.
+ if (!p.version.empty ())
+ q = q && compare_version_eq (query::id.version,
+ canonical_version (p.version),
+ p.version.revision.has_value (),
+ false /* iteration */);
+
+ // And if we found an existing package, then only look for versions
+ // greater than to what already exists unless we were asked to show
+ // old versions.
//
- struct apkg
- {
- shared_ptr<available_package> package;
- bool build;
- };
- vector<apkg> apkgs;
-
- // A package with this name is known in available packages potentially
- // for build.
+ // Note that for a system wildcard version we will always show all
+ // available versions (since it is 0).
//
- bool known (false);
- bool build (false);
+ if (s != nullptr && !o.old_available ())
+ q = q && query::id.version > canonical_version (s->version);
+
+ q += order_by_version_desc (query::id.version);
+
+ for (shared_ptr<available_package> ap:
+ pointer_result (rdb.query<available_package> (q)))
{
- shared_ptr<repository_fragment> root (
- db.load<repository_fragment> (""));
+ bool dependency (filter (root, ap) == nullptr);
+ r.push_back (available_package_status {move (ap), dependency});
+ }
- using query = query<available_package>;
+ // The idea is that in the future we will try to auto-discover a system
+ // version. For now we just say "maybe available from the system" even
+ // if the version was specified by the user. We will later compare it if
+ // the user did specify the version.
+ //
+ if (o.system ())
+ r.system_package_version = '?';
- query q (query::id.name == p.name);
+ // Get rid of stubs.
+ //
+ for (auto i (r.begin ()); i != r.end (); ++i)
+ {
+ if (i->package->stub ())
{
- auto r (db.query<available_package> (q));
- known = !r.empty ();
- build = filter_one (root, move (r)).first != nullptr;
+ // All the rest are stubs so bail out.
+ //
+ r.erase (i, r.end ());
+ break;
}
+ }
+ }
- if (known)
- {
- // If the user specified the version, then only look for that
- // specific version (we still do it since there might be other
- // revisions).
- //
- if (!p.version.empty ())
- q = q && compare_version_eq (query::id.version,
- canonical_version (p.version),
- p.version.revision.has_value (),
- false /* iteration */);
+ return r;
+ }
- // And if we found an existing package, then only look for versions
- // greater than to what already exists unless we were asked to show
- // old versions.
- //
- // Note that for a system wildcard version we will always show all
- // available versions (since it is 0).
- //
- if (s != nullptr && !o.old_available ())
- q = q && query::id.version > canonical_version (s->version);
+ static packages
+ pkg_prerequisites (const shared_ptr<selected_package>& s, database& rdb)
+ {
+ packages r;
+ for (const auto& pair: s->prerequisites)
+ {
+ shared_ptr<selected_package> d (pair.first.load ());
+ database& db (pair.first.database ());
+ const optional<version_constraint>& c (pair.second.constraint);
+ r.push_back (package {db, rdb, d->name, version (), move (d), c});
+ }
+ return r;
+ }
+
+ static void
+ pkg_status_lines (const pkg_status_options& o,
+ const packages& pkgs,
+ string& indent,
+ bool recursive,
+ bool immediate)
+ {
+ tracer trace ("pkg_status_lines");
- q += order_by_version_desc (query::id.version);
+ for (const package& p: pkgs)
+ {
+ l4 ([&]{trace << "package " << p.name << "; version " << p.version;});
- // Packages that are in repositories that were explicitly added to
- // the configuration and their complements, recursively, are also
- // available to build.
- //
- for (shared_ptr<available_package> ap:
- pointer_result (
- db.query<available_package> (q)))
- {
- bool build (filter (root, ap));
- apkgs.push_back (apkg {move (ap), build});
- }
- }
- }
+ available_package_statuses ps (pkg_statuses (o, p));
cout << indent;
// Selected.
//
+ const shared_ptr<selected_package>& s (p.selected);
// Hold package status.
//
@@ -130,7 +182,7 @@ namespace bpkg
// If the package name is selected, then print its exact spelling.
//
- cout << (s != nullptr ? s->name : p.name);
+ cout << (s != nullptr ? s->name : p.name) << p.pdb;
if (o.constraint () && p.constraint)
cout << ' ' << *p.constraint;
@@ -154,105 +206,211 @@ namespace bpkg
// Available.
//
- bool available (false);
- if (known)
+ if (!ps.empty () || !ps.system_package_version.empty ())
{
- // Available from the system.
- //
- // The idea is that in the future we will try to auto-discover a
- // system version and then print that. For now we just say "maybe
- // available from the system" even if the version was specified by
- // the user. We will later compare it if the user did specify the
- // version.
- //
- string sys;
- if (o.system ())
+ cout << (s != nullptr ? " " : "") << "available";
+
+ for (const available_package_status& a: ps)
{
- sys = "?";
- available = true;
+ const version& v (a.package->version);
+
+ // Show the currently selected version in parenthesis.
+ //
+ bool cur (s != nullptr && v == s->version);
+
+ cout << ' '
+ << (cur ? "(" : a.dependency ? "[" : "")
+ << v
+ << (cur ? ")" : a.dependency ? "]" : "");
}
- // Get rid of stubs.
+ if (!ps.system_package_version.empty ())
+ cout << ' '
+ << (ps.dependency ? "[" : "")
+ << "sys:" << ps.system_package_version
+ << (ps.dependency ? "]" : "");
+ }
+ //
+ // Unknown.
+ //
+ else if (s == nullptr)
+ {
+ cout << "unknown";
+
+ // Print the user's version if specified.
//
- for (auto i (apkgs.begin ()); i != apkgs.end (); ++i)
+ if (!p.version.empty ())
+ cout << ' ' << p.version;
+ }
+
+ cout << endl;
+
+ if (recursive || immediate)
+ {
+ // Collect and recurse.
+ //
+ // Let's propagate the repository information source database from the
+ // dependent to its prerequisites.
+ //
+ if (s != nullptr)
{
- if (i->package->stub ())
+ packages dpkgs (pkg_prerequisites (s, p.rdb));
+
+ if (!dpkgs.empty ())
{
- // All the rest are stubs so bail out.
- //
- apkgs.erase (i, apkgs.end ());
- break;
+ indent += " ";
+ pkg_status_lines (o, dpkgs, indent, recursive, false /* immediate */);
+ indent.resize (indent.size () - 2);
}
+ }
+ }
+ }
+ }
+
+ static void
+ pkg_status_json (const pkg_status_options& o,
+ const packages& pkgs,
+ json::stream_serializer& ss,
+ bool recursive,
+ bool immediate)
+ {
+ tracer trace ("pkg_status_json");
+
+ ss.begin_array ();
+
+ for (const package& p: pkgs)
+ {
+ l4 ([&]{trace << "package " << p.name << "; version " << p.version;});
+
+ available_package_statuses ps (pkg_statuses (o, p));
+
+ const shared_ptr<selected_package>& s (p.selected);
+
+ // Note that we won't check some values for being valid UTF-8 (package
+ // names, etc), since their characters belong to even stricter character
+ // sets.
+ //
+ ss.begin_object ();
+
+ // If the package name is selected, then print its exact spelling.
+ //
+ ss.member ("name",
+ (s != nullptr ? s->name : p.name).string (),
+ false /* check */);
+
+ if (!p.pdb.string.empty ())
+ ss.member ("configuration", p.pdb.string);
+
+ if (o.constraint () && p.constraint)
+ ss.member ("constraint", p.constraint->string (), false /* check */);
+
+ // Selected.
+ //
+ if (s != nullptr)
+ {
+ ss.member ("status", to_string (s->state), false /* check */);
+
+ if (s->substate != package_substate::none)
+ ss.member ("sub_status", to_string (s->substate), false /* check */);
- available = true;
+ ss.member ("version", s->version_string (), false /* check */);
+
+ if (s->hold_package)
+ ss.member ("hold_package", true);
+
+ if (s->hold_version)
+ ss.member ("hold_version", true);
+ }
+
+ // Available.
+ //
+ if (!ps.empty () || !ps.system_package_version.empty ())
+ {
+ if (s == nullptr)
+ {
+ ss.member ("status", "available", false /* check */);
+
+ // Print the user's version if specified.
+ //
+ if (!p.version.empty ())
+ ss.member ("version", p.version.string (), false /* check */);
}
- if (available)
+ // Print the list of available versions, unless a specific available
+ // version is already printed.
+ //
+ if (s != nullptr || p.version.empty ())
{
- cout << (s != nullptr ? " " : "") << "available";
+ ss.member_name ("available_versions", false /* check */);
- for (const apkg& a: apkgs)
+ // Serialize an available package version.
+ //
+ auto serialize = [&ss] (const string& v, bool s, bool d)
{
- const version& v (a.package->version);
+ ss.begin_object ();
- // Show the currently selected version in parenthesis.
- //
- bool cur (s != nullptr && v == s->version);
+ ss.member ("version", v, false /* check */);
- cout << ' '
- << (cur ? "(" : a.build ? "" : "[")
- << v
- << (cur ? ")" : a.build ? "" : "]");
- }
+ if (s)
+ ss.member ("system", s);
+
+ if (d)
+ ss.member ("dependency", d);
- if (!sys.empty ())
- cout << ' '
- << (build ? "" : "[")
- << "sys:" << sys
- << (build ? "" : "]");
+ ss.end_object ();
+ };
+
+ ss.begin_array ();
+
+ for (const available_package_status& a: ps)
+ serialize (a.package->version.string (),
+ false /* system */,
+ a.dependency);
+
+ if (!ps.system_package_version.empty ())
+ serialize (ps.system_package_version,
+ true /* system */,
+ ps.dependency);
+
+ ss.end_array ();
}
}
-
- if (s == nullptr && !available)
+ //
+ // Unknown.
+ //
+ else if (s == nullptr)
{
- cout << "unknown";
+ ss.member ("status", "unknown", false /* check */);
// Print the user's version if specified.
//
if (!p.version.empty ())
- cout << ' ' << p.version;
+ ss.member ("version", p.version.string (), false /* check */);
}
- cout << endl;
-
if (recursive || immediate)
{
// Collect and recurse.
//
- packages dpkgs;
+ // Let's propagate the repository information source database from the
+ // dependent to its prerequisites.
+ //
if (s != nullptr)
{
- for (const auto& pair: s->prerequisites)
+ packages dpkgs (pkg_prerequisites (s, p.rdb));
+
+ if (!dpkgs.empty ())
{
- shared_ptr<selected_package> d (pair.first.load ());
- const optional<version_constraint>& c (pair.second);
- dpkgs.push_back (package {d->name, version (), move (d), c});
+ ss.member_name ("dependencies", false /* check */);
+ pkg_status_json (o, dpkgs, ss, recursive, false /* immediate */);
}
}
-
- if (!dpkgs.empty ())
- {
- indent += " ";
- pkg_status (o,
- db,
- dpkgs,
- indent,
- recursive,
- false /* immediate */);
- indent.resize (indent.size () - 2);
- }
}
+
+ ss.end_object ();
}
+
+ ss.end_array ();
}
int
@@ -266,10 +424,17 @@ namespace bpkg
const dir_path& c (o.directory ());
l4 ([&]{trace << "configuration: " << c;});
- database db (open (c, trace));
+ database db (c, trace, true /* pre_attach */);
transaction t (db);
session s;
+ // Let's use as repository information source the package database for the
+ // held packages and the current database for the dependency packages.
+ //
+ // For the dependency packages we should probably use their dependent held
+ // package configurations recursively, but feels a bit hairy at the
+ // moment. So let's keep it simple for now. @@ TODO.
+ //
packages pkgs;
{
using query = query<selected_package>;
@@ -279,51 +444,109 @@ namespace bpkg
while (args.more ())
{
const char* arg (args.next ());
- package p {parse_package_name (arg),
- parse_package_version (arg,
- false /* allow_wildcard */,
- false /* fold_zero_revision */),
- nullptr /* selected */,
- nullopt /* constraint */};
-
- // Search in the packages that already exist in this configuration.
- //
- {
- query q (query::name == p.name);
- if (!p.version.empty ())
- q = q && compare_version_eq (query::version,
- canonical_version (p.version),
- p.version.revision.has_value (),
- false /* iteration */);
+ package_name pn (parse_package_name (arg));
+ version pv (parse_package_version (arg,
+ false /* allow_wildcard */,
+ version::none));
+
+ query q (query::name == pn);
- p.selected = db.query_one<selected_package> (q);
+ if (!pv.empty ())
+ q = q && compare_version_eq (query::version,
+ canonical_version (pv),
+ pv.revision.has_value (),
+ false /* iteration */);
+
+ // Search in the packages that already exist in this and all the
+ // dependency configurations.
+ //
+ bool found (false);
+ for (database& ldb: db.dependency_configs ())
+ {
+ shared_ptr<selected_package> sp (
+ ldb.query_one<selected_package> (q));
+
+ if (sp != nullptr)
+ {
+ pkgs.push_back (package {ldb,
+ sp->hold_package ? ldb : db,
+ pn,
+ pv,
+ move (sp),
+ nullopt /* constraint */});
+ found = true;
+ }
}
- pkgs.push_back (move (p));
+ if (!found)
+ {
+ pkgs.push_back (package {db,
+ db,
+ move (pn),
+ move (pv),
+ nullptr /* selected */,
+ nullopt /* constraint */});
+ }
}
}
else
{
- // Find all held packages.
+ // Find held/all packages in this and, if --link specified, all the
+ // dependency configurations.
//
- for (shared_ptr<selected_package> s:
- pointer_result (
- db.query<selected_package> (query::hold_package)))
+ query q;
+
+ if (!o.all ())
+ q = query::hold_package;
+
+ for (database& ldb: db.dependency_configs ())
{
- pkgs.push_back (package {s->name, version (), move (s), nullopt});
+ for (shared_ptr<selected_package> s:
+ pointer_result (
+ ldb.query<selected_package> (q)))
+ {
+ pkgs.push_back (package {ldb,
+ s->hold_package ? ldb : db,
+ s->name,
+ version (),
+ move (s),
+ nullopt /* constraint */});
+ }
+
+ if (!o.link ())
+ break;
}
if (pkgs.empty ())
{
- info << "no held packages in the configuration";
+ if (o.all ())
+ info << "no packages in the configuration";
+ else
+ info << "no held packages in the configuration" <<
+ info << "use --all|-a to see status of all packages";
+
return 0;
}
}
}
- string indent;
- pkg_status (o, db, pkgs, indent, o.recursive (), o.immediate ());
+ switch (o.stdout_format ())
+ {
+ case stdout_format::lines:
+ {
+ string indent;
+ pkg_status_lines (o, pkgs, indent, o.recursive (), o.immediate ());
+ break;
+ }
+ case stdout_format::json:
+ {
+ json::stream_serializer s (cout);
+ pkg_status_json (o, pkgs, s, o.recursive (), o.immediate ());
+ cout << endl;
+ break;
+ }
+ }
t.commit ();
return 0;
diff --git a/bpkg/pkg-test.hxx b/bpkg/pkg-test.hxx
index 0cadabe..26c7b18 100644
--- a/bpkg/pkg-test.hxx
+++ b/bpkg/pkg-test.hxx
@@ -23,6 +23,7 @@ namespace bpkg
o.all (),
o.all_pattern (),
o.package_cwd (),
+ true /* allow_host_type */,
args);
}
}
diff --git a/bpkg/pkg-uninstall.hxx b/bpkg/pkg-uninstall.hxx
index 94a8390..6024fe1 100644
--- a/bpkg/pkg-uninstall.hxx
+++ b/bpkg/pkg-uninstall.hxx
@@ -5,7 +5,6 @@
#define BPKG_PKG_UNINSTALL_HXX
#include <bpkg/types.hxx>
-#include <bpkg/forward.hxx> // selected_package
#include <bpkg/utility.hxx>
#include <bpkg/pkg-command.hxx>
@@ -13,6 +12,9 @@
namespace bpkg
{
+ // Note that we disallow uninstalling packages from the host/build2
+ // configurations (see pkg_install() for details).
+ //
inline int
pkg_uninstall (const pkg_uninstall_options& o, cli::group_scanner& args)
{
@@ -23,6 +25,7 @@ namespace bpkg
o.all (),
o.all_pattern (),
false /* package_cwd */,
+ false /* allow_host_type */,
args);
}
}
diff --git a/bpkg/pkg-unpack.cxx b/bpkg/pkg-unpack.cxx
index 9685f3e..22ff02f 100644
--- a/bpkg/pkg-unpack.cxx
+++ b/bpkg/pkg-unpack.cxx
@@ -10,6 +10,7 @@
#include <bpkg/package-odb.hxx>
#include <bpkg/database.hxx>
#include <bpkg/checksum.hxx>
+#include <bpkg/rep-mask.hxx>
#include <bpkg/diagnostics.hxx>
#include <bpkg/manifest-utility.hxx>
@@ -24,14 +25,13 @@ namespace bpkg
// diagnose all the illegal cases.
//
static void
- pkg_unpack_check (const dir_path& c,
- transaction& t,
+ pkg_unpack_check (database& db,
+ transaction&,
const package_name& n,
bool replace)
{
tracer trace ("pkg_update_check");
- database& db (t.database ());
tracer_guard tg (db, trace);
if (shared_ptr<selected_package> p = db.find<selected_package> (n))
@@ -42,6 +42,7 @@ namespace bpkg
if (!replace || !s)
{
diag_record dr (fail);
+ const dir_path& c (db.config_orig);
dr << "package " << n << " already exists in configuration " << c <<
info << "version: " << p->version_string ()
@@ -58,37 +59,26 @@ namespace bpkg
// package object which may replace the existing one.
//
static shared_ptr<selected_package>
- pkg_unpack (const common_options& o,
- dir_path c,
+ pkg_unpack (database& db,
transaction& t,
- package_name n,
- version v,
- dir_path d,
- repository_location rl,
+ package_name&& n,
+ version&& v,
+ dir_path&& d,
+ repository_location&& rl,
+ shared_ptr<selected_package>&& p,
+ optional<string>&& mc,
+ optional<string>&& bc,
bool purge,
bool simulate)
{
- tracer trace ("pkg_unpack");
-
- database& db (t.database ());
- tracer_guard tg (db, trace);
-
- optional<string> mc;
-
- if (!simulate)
- mc = sha256 (o, d / manifest_file);
-
- // Make the package and configuration paths absolute and normalized.
- // If the package is inside the configuration, use the relative path.
- // This way we can move the configuration around.
+ // Make the package path absolute and normalized. If the package is inside
+ // the configuration, use the relative path. This way we can move the
+ // configuration around.
//
- normalize (c, "configuration");
normalize (d, "package");
- if (d.sub (c))
- d = d.leaf (c);
-
- shared_ptr<selected_package> p (db.find<selected_package> (n));
+ if (d.sub (db.config))
+ d = d.leaf (db.config);
if (p != nullptr)
{
@@ -96,7 +86,7 @@ namespace bpkg
// replacing. Once this is done, there is no going back. If things
// go badly, we can't simply abort the transaction.
//
- pkg_purge_fs (c, t, p, simulate);
+ pkg_purge_fs (db, t, p, simulate);
// Note that if the package name spelling changed then we need to update
// it, to make sure that the subsequent commands don't fail and the
@@ -118,6 +108,7 @@ namespace bpkg
p->src_root = move (d);
p->purge_src = purge;
p->manifest_checksum = move (mc);
+ p->buildfiles_checksum = move (bc);
db.update (p);
}
@@ -136,6 +127,7 @@ namespace bpkg
move (d),
purge,
move (mc),
+ move (bc),
nullopt, // No output directory yet.
{}}); // No prerequisites captured yet.
@@ -145,12 +137,68 @@ namespace bpkg
assert (p->external ());
t.commit ();
- return p;
+ return move (p);
+ }
+
+ template <typename T>
+ static shared_ptr<selected_package>
+ pkg_unpack (const common_options& o,
+ database& db,
+ transaction& t,
+ package_name n,
+ version v,
+ const vector<T>& deps,
+ const package_info* pi,
+ dir_path d,
+ repository_location rl,
+ bool purge,
+ bool simulate)
+ {
+ tracer trace ("pkg_unpack");
+
+ tracer_guard tg (db, trace);
+
+ shared_ptr<selected_package> p (db.find<selected_package> (n));
+
+ optional<string> mc;
+ optional<string> bc;
+
+ // Only calculate the manifest/subprojects and buildfiles checksums for
+ // external packages (see selected_package::external() for details).
+ //
+ if (!simulate && (rl.empty () || rl.directory_based ()))
+ {
+ mc = package_checksum (o, d, pi);
+
+ // Calculate the buildfiles checksum if the package has any buildfile
+ // clauses in the dependencies. Always calculate it over the buildfiles
+ // since the package is external.
+ //
+ if ((p != nullptr && p->manifest_checksum == mc)
+ ? p->buildfiles_checksum.has_value ()
+ : has_buildfile_clause (deps))
+ bc = package_buildfiles_checksum (nullopt /* bootstrap_build */,
+ nullopt /* root_build */,
+ {} /* buildfiles */,
+ d);
+ }
+
+ return pkg_unpack (db,
+ t,
+ move (n),
+ move (v),
+ move (d),
+ move (rl),
+ move (p),
+ move (mc),
+ move (bc),
+ purge,
+ simulate);
}
shared_ptr<selected_package>
pkg_unpack (const common_options& o,
- const dir_path& c,
+ database& db,
transaction& t,
const dir_path& d,
bool replace,
@@ -162,37 +210,59 @@ namespace bpkg
if (!exists (d))
fail << "package directory " << d << " does not exist";
+ // For better diagnostics, let's obtain the package info after
+ // pkg_verify() verifies that this is a package directory.
+ //
+ package_version_info pvi;
+
// Verify the directory is a package and get its manifest.
//
package_manifest m (
- pkg_verify (d,
+ pkg_verify (o,
+ d,
true /* ignore_unknown */,
- [&o, &d] (version& v)
+ false /* ignore_toolchain */,
+ false /* load_buildfiles */,
+ [&o, &d, &pvi] (version& v)
{
- if (optional<version> pv = package_version (o, d))
- v = move (*pv);
+ // Note that we also query subprojects since the package
+ // information will be used for the subsequent
+ // package_iteration() call.
+ //
+ pvi = package_version (o, d, b_info_flags::subprojects);
+
+ if (pvi.version)
+ v = move (*pvi.version);
}));
l4 ([&]{trace << d << ": " << m.name << " " << m.version;});
// Check/diagnose an already existing package.
//
- pkg_unpack_check (c, t, m.name, replace);
+ pkg_unpack_check (db, t, m.name, replace);
// Fix-up the package version.
//
- if (optional<version> v = package_iteration (
- o, c, t, d, m.name, m.version, true /* check_external */))
+ if (optional<version> v = package_iteration (o,
+ db,
+ t,
+ d,
+ m.name,
+ m.version,
+ &pvi.info,
+ true /* check_external */))
m.version = move (*v);
// Use the special root repository fragment as the repository fragment of
// this package.
//
return pkg_unpack (o,
- c,
+ db,
t,
move (m.name),
move (m.version),
+ m.dependencies,
+ &pvi.info,
d,
repository_location (),
purge,
@@ -201,7 +271,8 @@ namespace bpkg
shared_ptr<selected_package>
pkg_unpack (const common_options& o,
- const dir_path& c,
+ database& pdb,
+ database& rdb,
transaction& t,
package_name n,
version v,
@@ -210,20 +281,19 @@ namespace bpkg
{
tracer trace ("pkg_unpack");
- database& db (t.database ());
- tracer_guard tg (db, trace);
+ tracer_guard tg (pdb, trace); // NOTE: sets tracer for the whole cluster.
// Check/diagnose an already existing package.
//
- pkg_unpack_check (c, t, n, replace);
+ pkg_unpack_check (pdb, t, n, replace);
- check_any_available (c, t);
+ check_any_available (rdb, t);
- // Note that here we compare including the revision (see pkg-fetch()
+ // Note that here we compare including the revision (see pkg_fetch()
// implementation for more details).
//
shared_ptr<available_package> ap (
- db.find<available_package> (available_package_id (n, v)));
+ rdb.find<available_package> (available_package_id (n, v)));
if (ap == nullptr)
fail << "package " << n << " " << v << " is not available";
@@ -235,7 +305,8 @@ namespace bpkg
for (const package_location& l: ap->locations)
{
- if (l.repository_fragment.load ()->location.directory_based ())
+ if (!rep_masked_fragment (l.repository_fragment) &&
+ l.repository_fragment.load ()->location.directory_based ())
{
pl = &l;
break;
@@ -253,35 +324,37 @@ namespace bpkg
const repository_location& rl (pl->repository_fragment->location);
return pkg_unpack (o,
- c,
+ pdb,
t,
move (n),
move (v),
+ ap->dependencies,
+ nullptr /* package_info */,
path_cast<dir_path> (rl.path () / pl->location),
rl,
- false /* purge */,
+ false /* purge */,
simulate);
}
shared_ptr<selected_package>
pkg_unpack (const common_options& co,
- const dir_path& c,
+ database& db,
transaction& t,
const package_name& name,
bool simulate)
{
tracer trace ("pkg_unpack");
- database& db (t.database ());
tracer_guard tg (db, trace);
+ const dir_path& c (db.config_orig);
shared_ptr<selected_package> p (db.find<selected_package> (name));
if (p == nullptr)
fail << "package " << name << " does not exist in configuration " << c;
if (p->state != package_state::fetched)
- fail << "package " << name << " is " << p->state <<
+ fail << "package " << name << db << " is " << p->state <<
info << "expected it to be fetched";
l4 ([&]{trace << *p;});
@@ -293,20 +366,22 @@ namespace bpkg
// Also, since we must have verified the archive during fetch,
// here we can just assume what the resulting directory will be.
//
- dir_path d (c / dir_path (p->name.string () + '-' + p->version.string ()));
+ const package_name& n (p->name);
+ const version& v (p->version);
- if (exists (d))
- fail << "package directory " << d << " already exists";
+ dir_path d (c / dir_path (n.string () + '-' + v.string ()));
auto_rmdir arm;
- optional<string> mc;
if (!simulate)
{
+ if (exists (d))
+ fail << "package directory " << d << " already exists";
+
// If the archive path is not absolute, then it must be relative
// to the configuration.
//
- path a (p->archive->absolute () ? *p->archive : c / *p->archive);
+ path a (p->effective_archive (c));
l4 ([&]{trace << "archive: " << a;});
@@ -329,15 +404,11 @@ namespace bpkg
{
fail << "unable to extract " << a << " to " << c << ": " << e;
}
-
- mc = sha256 (co, d / manifest_file);
}
p->src_root = d.leaf (); // For now assuming to be in configuration.
p->purge_src = true;
- p->manifest_checksum = move (mc);
-
p->state = package_state::unpacked;
db.update (p);
@@ -356,7 +427,7 @@ namespace bpkg
const dir_path& c (o.directory ());
l4 ([&]{trace << "configuration: " << c;});
- database db (open (c, trace));
+ database db (c, trace, true /* pre_attach */);
transaction t (db);
shared_ptr<selected_package> p;
@@ -371,7 +442,7 @@ namespace bpkg
info << "run 'bpkg help pkg-unpack' for more information";
p = pkg_unpack (o,
- c,
+ db,
t,
dir_path (args.next ()),
o.replace (),
@@ -400,9 +471,14 @@ namespace bpkg
// "unpack" it from the directory-based repository.
//
p = v.empty ()
- ? pkg_unpack (o, c, t, n, false /* simulate */)
+ ? pkg_unpack (o,
+ db /* pdb */,
+ t,
+ n,
+ false /* simulate */)
: pkg_unpack (o,
- c,
+ db /* pdb */,
+ db /* rdb */,
t,
move (n),
move (v),
diff --git a/bpkg/pkg-unpack.hxx b/bpkg/pkg-unpack.hxx
index 107322b..7394732 100644
--- a/bpkg/pkg-unpack.hxx
+++ b/bpkg/pkg-unpack.hxx
@@ -23,7 +23,7 @@ namespace bpkg
//
shared_ptr<selected_package>
pkg_unpack (const common_options&,
- const dir_path& configuration,
+ database&,
transaction&,
const dir_path&,
bool replace,
@@ -34,7 +34,7 @@ namespace bpkg
//
shared_ptr<selected_package>
pkg_unpack (const common_options&,
- const dir_path& configuration,
+ database&,
transaction&,
const package_name&,
bool simulate);
@@ -43,9 +43,13 @@ namespace bpkg
// repository and commit the transaction. Return the selected package object
// which may replace the existing one.
//
+ // Note that both package and repository information configurations need to
+ // be passed.
+ //
shared_ptr<selected_package>
pkg_unpack (const common_options&,
- const dir_path& configuration,
+ database& pdb,
+ database& rdb,
transaction&,
package_name,
version,
diff --git a/bpkg/pkg-update.hxx b/bpkg/pkg-update.hxx
index d7b9536..cac7651 100644
--- a/bpkg/pkg-update.hxx
+++ b/bpkg/pkg-update.hxx
@@ -5,7 +5,6 @@
#define BPKG_PKG_UPDATE_HXX
#include <bpkg/types.hxx>
-#include <bpkg/forward.hxx> // selected_package
#include <bpkg/utility.hxx>
#include <bpkg/pkg-command.hxx>
@@ -24,17 +23,17 @@ namespace bpkg
o.all (),
o.all_pattern (),
false /* package_cwd */,
+ true /* allow_host_type */,
args);
}
inline void
- pkg_update (const dir_path& configuration,
- const common_options& o,
+ pkg_update (const common_options& o,
const string& cmd_variant,
const strings& common_vars,
const vector<pkg_command_vars>& pkgs)
{
- pkg_command ("update", configuration, o, cmd_variant, common_vars, pkgs);
+ pkg_command ("update", o, cmd_variant, common_vars, pkgs);
}
}
diff --git a/bpkg/pkg-verify.cli b/bpkg/pkg-verify.cli
index 33593e4..9d45cad 100644
--- a/bpkg/pkg-verify.cli
+++ b/bpkg/pkg-verify.cli
@@ -23,8 +23,9 @@ namespace bpkg
the top-level directory inside the archive match the canonical
\c{\i{name}\b{-}\i{version}} form and that there is a valid manifest file
in that top-level directory. Additionally, if the \cb{--deep} option is
- specified, it also checks that the files referenced by the \cb{*-file}
- manifest values are present in the archive and are not empty."
+ specified, it also checks that the required \c{*-build} values/files are
+ present in the manifest/archive and the files referenced by the
+ \cb{*-file} manifest values are present in the archive and are not empty."
}
class pkg_verify_options: common_options
@@ -39,14 +40,16 @@ namespace bpkg
bool --deep
{
- "Verify files referenced by the \cb{*-file} manifest values."
+ "Verify the presence of the required \c{*-build} values/files and
+ the validity of files referenced by the \cb{*-file} manifest values."
}
bool --ignore-unknown
{
"Ignore unknown manifest entries. By default, \cb{bpkg} will refuse to
- declare such a package valid since it cannot be sure the unknown entries
- are valid."
+ declare such a package valid since it cannot be sure the unknown
+ entries are valid. Note that this option also ignores the version
+ constraints in the special toolchain build-time dependencies."
}
bool --manifest
@@ -55,7 +58,9 @@ namespace bpkg
human-readable form, dump the package manifest to \cb{stdout}. If the
\cb{--deep} option is specified, then in the resulting manifest the
\cb{*-file} values are replaced with the contents of the referenced
- files and the package dependency constraints are completed."
+ files, the \c{*-build} values are automatically added (unless the
+ corresponding files are absent), and the package dependency constraints
+ are completed."
}
};
diff --git a/bpkg/pkg-verify.cxx b/bpkg/pkg-verify.cxx
index 38b4d68..d48c5b7 100644
--- a/bpkg/pkg-verify.cxx
+++ b/bpkg/pkg-verify.cxx
@@ -5,11 +5,12 @@
#include <iostream> // cout
-#include <libbutl/manifest-parser.mxx>
-#include <libbutl/manifest-serializer.mxx>
+#include <libbutl/manifest-parser.hxx>
+#include <libbutl/manifest-serializer.hxx>
#include <bpkg/archive.hxx>
#include <bpkg/diagnostics.hxx>
+#include <bpkg/satisfaction.hxx>
#include <bpkg/manifest-utility.hxx>
using namespace std;
@@ -17,28 +18,171 @@ using namespace butl;
namespace bpkg
{
+ pkg_verify_result
+ pkg_verify (const common_options& co,
+ manifest_parser& p,
+ bool it,
+ const path& what,
+ int diag_level)
+ {
+ manifest_name_value nv (p.next ());
+
+ // Make sure this is the start and we support the version.
+ //
+ if (!nv.name.empty ())
+ throw manifest_parsing (p.name (), nv.name_line, nv.name_column,
+ "start of package manifest expected");
+
+ if (nv.value != "1")
+ throw manifest_parsing (p.name (), nv.value_line, nv.value_column,
+ "unsupported format version");
+
+ pkg_verify_result r;
+
+ // For the depends name, parse the value and if it contains the build2 or
+ // bpkg constraints, verify that they are satisfied, if requested.
+ //
+ // Note that if the semantics of the depends value changes we may be
+ // unable to parse some of them before we get to build2 or bpkg and issue
+ // the user-friendly diagnostics. So we are going to ignore such depends
+ // values. But that means that if the user made a mistake in build2/bpkg
+ // then we will skip them as well. This, however, is not a problem since
+ // the pre-parsed result will then be re-parsed (e.g., by the
+ // package_manifest() constructor) which will diagnose any mistakes.
+ //
+ for (nv = p.next (); !nv.empty (); nv = p.next ())
+ {
+ if (nv.name == "depends")
+ try
+ {
+ // Note that we don't have the dependent package name here (unless we
+ // bother to retrieve it from the manifest in advance). This may cause
+ // parsing of a dependency alternative to fail while verifying the
+ // reflect clause (see dependency_alternative for details). That is,
+ // however, OK since we don't expect any clauses for the build2 and
+ // bpkg constraints and we just ignore failures for other depends
+ // values (see above).
+ //
+ dependency_alternatives das (nv.value, package_name ());
+
+ if (das.buildtime)
+ {
+ for (dependency_alternative& da: das)
+ {
+ for (dependency& d: da)
+ {
+ const package_name& dn (d.name);
+
+ if (dn != "build2" && dn != "bpkg")
+ continue;
+
+ // Even if the toolchain build-time dependencies are requested
+ // to be ignored let's make sure they are well-formed, i.e. they
+ // are the only dependencies in the respective depends values.
+ //
+ if (da.size () != 1)
+ {
+ if (diag_level != 0)
+ error (p.name (), nv.value_line, nv.value_column)
+ << "multiple names in " << dn << " dependency";
+
+ throw failed ();
+ }
+
+ if (das.size () != 1)
+ {
+ if (diag_level != 0)
+ error (p.name (), nv.value_line, nv.value_column)
+ << "alternatives in " << dn << " dependency";
+
+ throw failed ();
+ }
+
+ if (dn == "build2")
+ {
+ if (!it && d.constraint && !satisfy_build2 (co, d))
+ {
+ if (diag_level != 0)
+ {
+ diag_record dr (error);
+ dr << "unable to satisfy constraint (" << d << ")";
+
+ if (!what.empty ())
+ dr << " for package " << what;
+
+ dr << info << "available build2 version is "
+ << build2_version;
+ }
+
+ throw failed ();
+ }
+
+ r.build2_dependency = move (d);
+ }
+ else
+ {
+ if (!it && d.constraint && !satisfy_bpkg (co, d))
+ {
+ if (diag_level != 0)
+ {
+ diag_record dr (error);
+ dr << "unable to satisfy constraint (" << d << ")";
+
+ if (!what.empty ())
+ dr << " for package " << what;
+
+ dr << info << "available bpkg version is "
+ << bpkg_version;
+ }
+
+ throw failed ();
+ }
+
+ r.bpkg_dependency = move (d);
+ }
+ }
+ }
+ }
+ }
+ catch (const manifest_parsing&) {} // Ignore
+
+ r.push_back (move (nv));
+ }
+
+ // Make sure this is the end.
+ //
+ nv = p.next ();
+ if (!nv.empty ())
+ throw manifest_parsing (p.name (), nv.name_line, nv.name_column,
+ "single package manifest expected");
+
+ return r;
+ }
+
package_manifest
pkg_verify (const common_options& co,
const path& af,
bool iu,
+ bool it,
bool ev,
+ bool lb,
bool cd,
- bool diag)
+ int diag_level)
try
{
dir_path pd (package_dir (af));
path mf (pd / manifest_file);
- // If diag is false, we need to make tar not print any diagnostics. There
- // doesn't seem to be an option to suppress this and the only way is to
- // redirect stderr to something like /dev/null.
+ // If the diag level is less than 2, we need to make tar not print any
+ // diagnostics. There doesn't seem to be an option to suppress this and
+ // the only way is to redirect stderr to something like /dev/null.
//
// If things go badly for tar and it starts spitting errors instead of the
// manifest, the manifest parser will fail. But that's ok since we assume
// that the child error is always the reason for the manifest parsing
// failure.
//
- pair<process, process> pr (start_extract (co, af, mf, diag));
+ pair<process, process> pr (start_extract (co, af, mf, diag_level == 2));
auto wait = [&pr] () {return pr.second.wait () && pr.first.wait ();};
@@ -46,18 +190,23 @@ namespace bpkg
{
ifdstream is (move (pr.second.in_ofd), fdstream_mode::skip);
manifest_parser mp (is, mf.string ());
- package_manifest m (mp, iu, cd);
+
+ package_manifest m (mp.name (),
+ pkg_verify (co, mp, it, af, diag_level),
+ iu,
+ cd);
+
is.close ();
if (wait ())
{
// Verify package archive/directory is <name>-<version>.
//
- dir_path ed (m.name.string () + "-" + m.version.string ());
+ dir_path ed (m.name.string () + '-' + m.version.string ());
if (pd != ed)
{
- if (diag)
+ if (diag_level != 0)
error << "package archive/directory name mismatch in " << af <<
info << "extracted from archive '" << pd << "'" <<
info << "expected from manifest '" << ed << "'";
@@ -65,30 +214,163 @@ namespace bpkg
throw failed ();
}
- // Expand the *-file manifest values, if requested.
+ // If requested, expand file-referencing package manifest values.
//
- if (ev)
+ if (ev || lb)
{
m.load_files (
- [&pd, &co, &af, diag] (const string& n, const path& p)
+ [ev, &pd, &co, &af, diag_level]
+ (const string& n, const path& p) -> optional<string>
{
- path f (pd / p);
- string s (extract (co, af, f, diag));
+ bool bf (n == "build-file");
- if (s.empty ())
+ // Always expand the build-file values.
+ //
+ if (ev || bf)
{
- if (diag)
- error << n << " manifest value in package archive "
- << af << " references empty file " << f;
+ path f (pd / p);
+ string s (extract (co, af, f, diag_level != 0));
- throw failed ();
- }
+ if (s.empty () && !bf)
+ {
+ if (diag_level != 0)
+ error << n << " manifest value in package archive "
+ << af << " references empty file " << f;
- return s;
+ throw failed ();
+ }
+
+ return s;
+ }
+ else
+ return nullopt;
},
iu);
}
+ // Load the bootstrap, root, and config/*.build buildfiles into the
+ // respective *-build values, if requested and are not already
+ // specified in the manifest.
+ //
+ // Note that we don't verify that the files are not empty.
+ //
+ if (lb)
+ {
+ paths ps (archive_contents (co, af, diag_level != 0));
+
+ auto contains = [&ps] (const path& p)
+ {
+ return find (ps.begin (), ps.end (), p) != ps.end ();
+ };
+
+ auto extract_buildfiles = [&m, &co, &af, &ps, diag_level, &contains]
+ (const path& b,
+ const path& r,
+ const dir_path& c,
+ const string& ext)
+ {
+ if (!m.bootstrap_build)
+ m.bootstrap_build = extract (co, af, b, diag_level != 0);
+
+ if (!m.root_build && contains (r))
+ m.root_build = extract (co, af, r, diag_level != 0);
+
+ // Extract build/config/*.build files.
+ //
+ if (m.root_build)
+ {
+ vector<buildfile>& bs (m.buildfiles);
+ size_t n (bs.size ());
+
+ for (const path& ap: ps)
+ {
+ if (!ap.to_directory () && ap.sub (c))
+ {
+ path p (ap.leaf (c));
+ const char* e (p.extension_cstring ());
+
+ // Only consider immediate sub-entries of the config/
+ // subdirectory.
+ //
+ if (e != nullptr && ext == e && p.simple ())
+ {
+ path f (c.leaf () / p.base ()); // Relative to build/.
+
+ if (find_if (bs.begin (), bs.end (),
+ [&f] (const auto& v) {return v.path == f;}) ==
+ bs.end ())
+ {
+ bs.emplace_back (move (f),
+ extract (co, af, ap, diag_level != 0));
+ }
+ }
+ }
+ }
+
+ // To produce a stable result sort the appended *-build values.
+ //
+ if (bs.size () != n)
+ {
+ sort (bs.begin () + n, bs.end (),
+ [] (const auto& x, const auto& y)
+ {
+ return x.path < y.path;
+ });
+ }
+ }
+ };
+
+ // Set the manifest's alt_naming flag to the deduced value if absent
+ // and verify that it matches otherwise.
+ //
+ auto alt_naming = [&m, diag_level, &af] (bool v)
+ {
+ if (!m.alt_naming)
+ {
+ m.alt_naming = v;
+ }
+ else if (*m.alt_naming != v)
+ {
+ if (diag_level != 0)
+ error << "buildfile naming scheme mismatch between manifest "
+ << "and package archive " << af;
+
+ throw failed ();
+ }
+ };
+
+ // Check the alternative bootstrap file first since it is more
+ // specific.
+ //
+ path bf;
+ if (contains (bf = pd / alt_bootstrap_file))
+ {
+ alt_naming (true);
+
+ extract_buildfiles (bf,
+ pd / alt_root_file,
+ pd / alt_config_dir,
+ alt_build_ext);
+ }
+ else if (contains (bf = pd / std_bootstrap_file))
+ {
+ alt_naming (false);
+
+ extract_buildfiles (bf,
+ pd / std_root_file,
+ pd / std_config_dir,
+ std_build_ext);
+ }
+ else
+ {
+ if (diag_level != 0)
+ error << "unable to find bootstrap.build file in package "
+ << "archive " << af;
+
+ throw failed ();
+ }
+ }
+
return m;
}
@@ -101,7 +383,7 @@ namespace bpkg
{
if (wait ())
{
- if (diag)
+ if (diag_level != 0)
error (e.name, e.line, e.column) << e.description <<
info << "package archive " << af;
@@ -112,7 +394,7 @@ namespace bpkg
{
if (wait ())
{
- if (diag)
+ if (diag_level != 0)
error << "unable to extract " << mf << " from " << af;
throw failed ();
@@ -128,10 +410,10 @@ namespace bpkg
// diagnostics, tar, specifically, doesn't mention the archive
// name.
//
- if (diag)
+ if (diag_level == 2)
error << af << " does not appear to be a bpkg package";
- throw failed ();
+ throw not_package ();
}
catch (const process_error& e)
{
@@ -142,10 +424,13 @@ namespace bpkg
}
package_manifest
- pkg_verify (const dir_path& d,
+ pkg_verify (const common_options& co,
+ const dir_path& d,
bool iu,
+ bool it,
+ bool lb,
const function<package_manifest::translate_function>& tf,
- bool diag)
+ int diag_level)
{
// Parse the manifest.
//
@@ -153,45 +438,100 @@ namespace bpkg
if (!exists (mf))
{
- if (diag)
+ if (diag_level == 2)
error << "no manifest file in package directory " << d;
- throw failed ();
+ throw not_package ();
}
try
{
ifdstream ifs (mf);
manifest_parser mp (ifs, mf.string ());
- package_manifest m (mp, tf, iu);
+
+ package_manifest m (mp.name (),
+ pkg_verify (co, mp, it, d, diag_level),
+ tf,
+ iu);
+
+ // Load the bootstrap, root, and config/*.build buildfiles into the
+ // respective *-build values, if requested and if they are not already
+ // specified in the manifest. But first expand the build-file manifest
+ // values into the respective *-build values.
+ //
+ // Note that we don't verify that the files are not empty.
+ //
+ if (lb)
+ {
+ m.load_files (
+ [&d, &mf, diag_level]
+ (const string& n, const path& p) -> optional<string>
+ {
+ // Only expand the build-file values.
+ //
+ if (n == "build-file")
+ {
+ path f (d / p);
+
+ try
+ {
+ ifdstream is (f);
+ return is.read_text ();
+ }
+ catch (const io_error& e)
+ {
+ if (diag_level != 0)
+ error << "unable to read from " << f << " referenced by "
+ << n << " manifest value in " << mf << ": " << e;
+
+ throw failed ();
+ }
+ }
+ else
+ return nullopt;
+ },
+ iu);
+
+ try
+ {
+ load_package_buildfiles (m, d);
+ }
+ catch (const runtime_error& e)
+ {
+ if (diag_level != 0)
+ error << e;
+
+ throw failed ();
+ }
+ }
// We used to verify package directory is <name>-<version> but it is
// not clear why we should enforce it in this case (i.e., the user
// provides us with a package directory).
//
- // dir_path ed (m.name + "-" + m.version.string ());
+ // dir_path ed (m.name + '-' + m.version.string ());
//
// if (d.leaf () != ed)
// {
- // if (diag)
- // error << "invalid package directory name '" << d.leaf () << "'" <<
- // info << "expected from manifest '" << ed << "'";
+ // if (diag_level != 0)
+ // error << "invalid package directory name '" << d.leaf () << "'" <<
+ // info << "expected from manifest '" << ed << "'";
//
- // throw failed ();
+ // throw failed ();
// }
return m;
}
catch (const manifest_parsing& e)
{
- if (diag)
+ if (diag_level != 0)
error (e.name, e.line, e.column) << e.description;
throw failed ();
}
catch (const io_error& e)
{
- if (diag)
+ if (diag_level != 0)
error << "unable to read from " << mf << ": " << e;
throw failed ();
@@ -219,12 +559,15 @@ namespace bpkg
//
try
{
- package_manifest m (pkg_verify (o,
- a,
- o.ignore_unknown (),
- o.deep () /* expand_values */,
- o.deep () /* complete_depends */,
- !o.silent ()));
+ package_manifest m (
+ pkg_verify (o,
+ a,
+ o.ignore_unknown (),
+ o.ignore_unknown () /* ignore_toolchain */,
+ o.deep () /* expand_values */,
+ o.deep () /* load_buildfiles */,
+ o.deep () /* complete_values */,
+ o.silent () ? 0 : 2));
if (o.manifest ())
{
@@ -247,9 +590,9 @@ namespace bpkg
return 0;
}
- catch (const failed&)
+ catch (const failed& e)
{
- return 1;
+ return e.code;
}
}
}
diff --git a/bpkg/pkg-verify.hxx b/bpkg/pkg-verify.hxx
index 5643692..0609844 100644
--- a/bpkg/pkg-verify.hxx
+++ b/bpkg/pkg-verify.hxx
@@ -4,6 +4,8 @@
#ifndef BPKG_PKG_VERIFY_HXX
#define BPKG_PKG_VERIFY_HXX
+#include <libbutl/manifest-forward.hxx>
+
#include <libbpkg/manifest.hxx>
#include <bpkg/types.hxx>
@@ -17,20 +19,36 @@ namespace bpkg
pkg_verify (const pkg_verify_options&, cli::scanner& args);
// Verify archive is a valid package and return its manifest. If requested,
- // expand the file-referencing manifest values (description, changes, etc),
- // setting them to the contents of files they refer to, set the potentially
- // absent description-type value to the effective description type (see
- // libbpkg/manifest.hxx), and complete the dependency constraints. Throw
- // failed if invalid or if something goes wrong. If diag is false, then
- // don't issue diagnostics about the reason why the package is invalid.
+ // verify that all manifest entries are recognized and the package is
+ // compatible with the current toolchain. Also, if requested, expand the
+ // file-referencing manifest values (description, changes, etc), setting
+ // them to the contents of files they refer to, set the potentially absent
+ // description-type value to the effective description type (see
+ // libbpkg/manifest.hxx), load the bootstrap, root, and config/*.build
+ // buildfiles into the respective *-build values, and complete the
+ // manifest values (depends, <distribution>-version, etc).
+ //
+ // Throw not_package (derived from failed) if this doesn't look like a
+ // package. Throw plain failed if this does looks like a package but
+ // something about it is invalid or if something else goes wrong.
+ //
+ // Issue diagnostics according the diag_level as follows:
//
+ // 0 - Suppress all errors messages except for underlying system errors.
+ // 1 - Suppress error messages about the reason why this is not a package.
+ // 2 - Suppress no error messages.
+ //
+ class not_package: public failed {};
+
package_manifest
pkg_verify (const common_options&,
const path& archive,
bool ignore_unknown,
+ bool ignore_toolchain,
bool expand_values,
- bool complete_depends = true,
- bool diag = true);
+ bool load_buildfiles,
+ bool complete_values = true,
+ int diag_level = 2);
// Similar to the above but verifies that a source directory is a valid
// package. Always translates the package version and completes dependency
@@ -39,10 +57,39 @@ namespace bpkg
// itself.
//
package_manifest
- pkg_verify (const dir_path& source,
+ pkg_verify (const common_options&,
+ const dir_path& source,
bool ignore_unknown,
+ bool ignore_toolchain,
+ bool load_buildfiles,
const function<package_manifest::translate_function>&,
- bool diag = true);
+ int diag_level = 2);
+
+ // Pre-parse the package manifest and return the name value pairs list,
+ // stripping the format version and the end-of-manifest/stream pairs,
+ // together with the build2/bpkg build-time dependencies, if present. If
+ // requested, verify that the package is compatible with the current
+ // toolchain and issue diagnostics and throw failed if it is not.
+ //
+ // Pass through the manifest_parsing and io_error exceptions, so that the
+ // caller can decide how to handle them (for example, ignore them if the
+ // manifest-printing process has failed, etc).
+ //
+ // To omit the package location from the diagnostics, pass an empty path as
+ // the what argument.
+ //
+ struct pkg_verify_result: vector<butl::manifest_name_value>
+ {
+ optional<dependency> build2_dependency;
+ optional<dependency> bpkg_dependency;
+ };
+
+ pkg_verify_result
+ pkg_verify (const common_options&,
+ butl::manifest_parser&,
+ bool ignore_toolchain,
+ const path& what,
+ int diag_level = 2);
}
#endif // BPKG_PKG_VERIFY_HXX
diff --git a/bpkg/pointer-traits.hxx b/bpkg/pointer-traits.hxx
new file mode 100644
index 0000000..a63b289
--- /dev/null
+++ b/bpkg/pointer-traits.hxx
@@ -0,0 +1,58 @@
+// file : bpkg/pointer-traits.hxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#ifndef BPKG_POINTER_TRAITS_HXX
+#define BPKG_POINTER_TRAITS_HXX
+
+#include <bpkg/types.hxx>
+
+#include <odb/pointer-traits.hxx>
+
+namespace odb
+{
+ template <typename T>
+ class pointer_traits<bpkg::lazy_shared_ptr<T>>
+ {
+ public:
+ static const pointer_kind kind = pk_shared;
+ static const bool lazy = true;
+
+ typedef T element_type;
+ typedef bpkg::lazy_shared_ptr<element_type> pointer_type;
+ typedef bpkg::shared_ptr<element_type> eager_pointer_type;
+
+ static bool
+ null_ptr (const pointer_type& p)
+ {
+ return !p;
+ }
+
+ template <class O = T>
+ static typename object_traits<O>::id_type
+ object_id (const pointer_type& p)
+ {
+ return p.template object_id<O> ();
+ }
+ };
+
+ template <typename T>
+ class pointer_traits<bpkg::lazy_weak_ptr<T>>
+ {
+ public:
+ static const pointer_kind kind = pk_weak;
+ static const bool lazy = true;
+
+ typedef T element_type;
+ typedef bpkg::lazy_weak_ptr<element_type> pointer_type;
+ typedef bpkg::lazy_shared_ptr<element_type> strong_pointer_type;
+ typedef bpkg::weak_ptr<element_type> eager_pointer_type;
+
+ static strong_pointer_type
+ lock (const pointer_type& p)
+ {
+ return p.lock ();
+ }
+ };
+}
+
+#endif // BPKG_POINTER_TRAITS_HXX
diff --git a/bpkg/rep-add.cxx b/bpkg/rep-add.cxx
index 6856437..81b1286 100644
--- a/bpkg/rep-add.cxx
+++ b/bpkg/rep-add.cxx
@@ -16,12 +16,12 @@ namespace bpkg
{
shared_ptr<repository>
rep_add (const common_options& o,
- transaction& t,
+ database& db,
+ transaction&,
const repository_location& rl)
{
const string& rn (rl.canonical_name ());
- database& db (t.database ());
shared_ptr<repository> r (db.find<repository> (rn));
bool updated (false);
@@ -65,7 +65,7 @@ namespace bpkg
fail << "repository location argument expected" <<
info << "run 'bpkg help rep-add' for more information";
- database db (open (c, trace));
+ database db (c, trace, false /* pre_attach */);
transaction t (db);
session s; // Repository dependencies can have cycles.
@@ -77,7 +77,7 @@ namespace bpkg
? optional<repository_type> (o.type ())
: nullopt));
- rep_add (o, t, rl);
+ rep_add (o, db, t, rl);
}
t.commit ();
diff --git a/bpkg/rep-add.hxx b/bpkg/rep-add.hxx
index 0062cdc..d5cec5d 100644
--- a/bpkg/rep-add.hxx
+++ b/bpkg/rep-add.hxx
@@ -22,7 +22,10 @@ namespace bpkg
// repository if it is not already.
//
shared_ptr<repository>
- rep_add (const common_options&, transaction&, const repository_location&);
+ rep_add (const common_options&,
+ database&,
+ transaction&,
+ const repository_location&);
}
#endif // BPKG_REP_ADD_HXX
diff --git a/bpkg/rep-create.cli b/bpkg/rep-create.cli
index be4cc42..0e44b68 100644
--- a/bpkg/rep-create.cli
+++ b/bpkg/rep-create.cli
@@ -33,7 +33,19 @@ namespace bpkg
bool --ignore-unknown
{
- "Ignore unknown manifest entries."
+ "Ignore unknown manifest entries. Note that this option also ignores the
+ version constraints in the special toolchain build-time dependencies."
+ }
+
+ butl::standard_version --min-bpkg-version
+ {
+ "<ver>",
+ "Apply backward compatibility workarounds to the generated
+ \cb{packages.manifest} file so that it can be consumed by \cb{bpkg}
+ versions greater or equal to the specified version. If unspecified,
+ then the \cb{min-bpkg-version} value from the \cb{repositories.manifest}
+ file is used, if present. If the manifest value is not specified
+ either, then no backward compatibility workarounds are applied."
}
string --key
diff --git a/bpkg/rep-create.cxx b/bpkg/rep-create.cxx
index f47bc09..9b9bdeb 100644
--- a/bpkg/rep-create.cxx
+++ b/bpkg/rep-create.cxx
@@ -5,8 +5,8 @@
#include <map>
-#include <libbutl/filesystem.mxx> // dir_iterator
-#include <libbutl/manifest-serializer.mxx>
+#include <libbutl/filesystem.hxx> // dir_iterator
+#include <libbutl/manifest-serializer.hxx>
#include <libbpkg/manifest.hxx>
#include <libbpkg/package-name.hxx>
@@ -24,7 +24,7 @@ using namespace butl;
namespace bpkg
{
- struct package_key
+ struct package_name_version
{
package_name name;
bpkg::version version;
@@ -34,7 +34,7 @@ namespace bpkg
// revision.
//
bool
- operator< (const package_key& y) const
+ operator< (const package_name_version& y) const
{
int r (name.compare (y.name));
return r < 0 || (r == 0 && version.compare (y.version, true) < 0);
@@ -47,7 +47,7 @@ namespace bpkg
package_manifest manifest;
};
- using package_map = map<package_key, package_data>;
+ using package_map = map<package_name_version, package_data>;
static void
collect (const rep_create_options& o,
@@ -58,7 +58,7 @@ namespace bpkg
{
tracer trace ("collect");
- for (const dir_entry& de: dir_iterator (d, false /* ignore_dangling */))
+ for (const dir_entry& de: dir_iterator (d, dir_iterator::no_follow))
{
path p (de.path ());
@@ -98,11 +98,16 @@ namespace bpkg
path a (d / p);
package_manifest m (
- pkg_verify (o, a, o.ignore_unknown (), true /* expand_values */));
+ pkg_verify (o,
+ a,
+ o.ignore_unknown (),
+ o.ignore_unknown () /* ignore_toolchain */,
+ true /* expand_values */,
+ true /* load_buildfiles */));
// Calculate its checksum.
//
- m.sha256sum = sha256 (o, a);
+ m.sha256sum = sha256sum (o, a);
l4 ([&]{trace << m.name << " " << m.version << " in " << a
<< " sha256sum " << *m.sha256sum;});
@@ -111,7 +116,7 @@ namespace bpkg
//
m.location = a.leaf (root);
- package_key k {m.name, m.version}; // Argument evaluation order.
+ package_name_version k {m.name, m.version}; // Argument evaluation order.
auto r (map.emplace (move (k), package_data {a, move (m)}));
// Diagnose duplicates.
@@ -162,6 +167,15 @@ namespace bpkg
})
<< " prerequisite repository(s)";});
+ optional<standard_version> rmv (
+ rms.header && rms.header->min_bpkg_version
+ ? rms.header->min_bpkg_version
+ : nullopt);
+
+ optional<standard_version> opv (o.min_bpkg_version_specified ()
+ ? o.min_bpkg_version ()
+ : optional<standard_version> ());
+
// While we could have serialized as we go along, the order of
// packages will be pretty much random and not reproducible. By
// collecting all the manifests in a map we get a sorted list.
@@ -170,7 +184,7 @@ namespace bpkg
collect (o, pm, d, d);
pkg_package_manifests manifests;
- manifests.sha256sum = sha256 (o, path (d / repositories_file));
+ manifests.sha256sum = sha256sum (o, path (d / repositories_file));
for (auto& p: pm)
{
@@ -182,6 +196,16 @@ namespace bpkg
manifests.emplace_back (move (m));
}
+ // Issue a warning if --min-bpkg-version option and the repositories
+ // manifest's min-bpkg-version value are both specified and don't match.
+ // Let's issue it after the added repositories are printed to stdout, so
+ // that it doesn't go unnoticed.
+ //
+ if (opv && rmv && *opv != *rmv)
+ warn << "--min-bpkg-version option value " << *opv << " differs from "
+ << "minimum bpkg version " << *rmv << " specified in "
+ << d / repositories_file;
+
// Serialize packages manifest, optionally generate the signature manifest.
//
path p (d / packages_file);
@@ -197,7 +221,7 @@ namespace bpkg
ofdstream ofs (p, fdopen_mode::binary);
manifest_serializer s (ofs, p.string ());
- manifests.serialize (s);
+ manifests.serialize (s, opv ? opv : rmv);
ofs.close ();
}
@@ -212,7 +236,7 @@ namespace bpkg
info << "run 'bpkg help rep-create' for more information";
signature_manifest m;
- m.sha256sum = sha256 (o, p);
+ m.sha256sum = sha256sum (o, p);
m.signature = sign_repository (o, m.sha256sum, key, *cert, d);
p = path (d / signature_file);
diff --git a/bpkg/rep-fetch.cxx b/bpkg/rep-fetch.cxx
index 3239421..d02a064 100644
--- a/bpkg/rep-fetch.cxx
+++ b/bpkg/rep-fetch.cxx
@@ -6,7 +6,7 @@
#include <map>
#include <set>
-#include <libbutl/manifest-parser.mxx>
+#include <libbutl/manifest-parser.hxx>
#include <bpkg/auth.hxx>
#include <bpkg/fetch.hxx>
@@ -15,7 +15,10 @@
#include <bpkg/package-odb.hxx>
#include <bpkg/database.hxx>
#include <bpkg/rep-remove.hxx>
+#include <bpkg/pkg-verify.hxx>
#include <bpkg/diagnostics.hxx>
+#include <bpkg/satisfaction.hxx>
+#include <bpkg/package-query.hxx>
#include <bpkg/manifest-utility.hxx>
using namespace std;
@@ -49,9 +52,11 @@ namespace bpkg
static rep_fetch_data
rep_fetch_pkg (const common_options& co,
const dir_path* conf,
+ database* db,
const repository_location& rl,
const optional<string>& dependent_trust,
- bool ignore_unknown)
+ bool ignore_unknown,
+ bool ignore_toolchain)
{
// First fetch the repositories list and authenticate the base's
// certificate.
@@ -71,7 +76,7 @@ namespace bpkg
if (a)
{
cert = authenticate_certificate (
- co, conf, cert_pem, rl, dependent_trust);
+ co, conf, db, cert_pem, rl, dependent_trust);
a = !cert->dummy ();
}
@@ -113,6 +118,18 @@ namespace bpkg
authenticate_repository (co, conf, cert_pem, *cert, sm, rl);
}
+ // If requested, verify that the packages are compatible with the current
+ // toolchain.
+ //
+ if (!ignore_toolchain)
+ {
+ for (const package_manifest& m: fr.packages)
+ {
+ for (const dependency_alternatives& das: m.dependencies)
+ toolchain_buildtime_dependency (co, das, &m.name);
+ }
+ }
+
return rep_fetch_data {{move (fr)}, move (cert_pem), move (cert)};
}
@@ -160,7 +177,8 @@ namespace bpkg
M r;
if (exists (f))
r = parse_manifest<M> (f, iu, rl, fragment);
- else
+
+ if (r.empty ())
r.emplace_back (repository_manifest ()); // Add the base repository.
return r;
@@ -189,81 +207,194 @@ namespace bpkg
return r;
}
+ static void
+ print_package_info (diag_record& dr,
+ const dir_path& pl,
+ const repository_location& rl,
+ const optional<string>& fragment)
+ {
+ dr << "package ";
+
+ if (!pl.current ())
+ dr << "'" << pl.string () << "' "; // Strip trailing '/'.
+
+ dr << "in repository " << rl;
+
+ if (fragment)
+ dr << ' ' << *fragment;
+ }
+
// Parse package manifests referenced by the package directory manifests.
//
- static vector<package_manifest>
+ static pair<vector<package_manifest>, vector<package_info>>
parse_package_manifests (const common_options& co,
const dir_path& repo_dir,
- vector<package_manifest>&& sms,
+ vector<package_manifest>&& pms,
bool iu,
+ bool it,
const repository_location& rl,
const optional<string>& fragment) // For diagnostics.
{
- vector<package_manifest> r;
- r.reserve (sms.size ());
+ auto prn_package_info = [&rl, &fragment] (diag_record& dr,
+ const package_manifest& pm)
+ {
+ print_package_info (dr,
+ path_cast<dir_path> (*pm.location),
+ rl,
+ fragment);
+ };
- for (package_manifest& sm: sms)
+ // Verify that all the package directories contain the package manifest
+ // files and retrieve the package versions via the single `b info` call,
+ // but only if the current build2 version is satisfactory for all the
+ // repository packages. While at it cache the manifest paths for the
+ // future use.
+ //
+ // Note that if the package is not compatible with the toolchain, not to
+ // end up with an unfriendly build2 error message (referring a line in the
+ // bootstrap file issued by the version module), we need to verify the
+ // compatibility of the package manifests prior to calling `b info`. Also
+ // note that we cannot create the package manifest objects at this stage,
+ // since we need the package versions for that. Thus, we cache the
+ // respective name value lists instead.
+ //
+ optional<package_version_infos> pvs;
+ paths mfs;
+ vector<vector<manifest_name_value>> nvs;
{
- assert (sm.location);
+ mfs.reserve (pms.size ());
+ nvs.reserve (pms.size ());
- auto package_info = [&sm, &rl, &fragment] (diag_record& dr)
+ dir_paths pds;
+ pds.reserve (pms.size ());
+
+ // If true, then build2 version is satisfactory for all the repository
+ // packages.
+ //
+ bool bs (true);
+
+ for (const package_manifest& pm: pms)
{
- dr << "package ";
+ assert (pm.location);
- if (!sm.location->current ())
- dr << "'" << sm.location->string () << "' "; // Strip trailing '/'.
+ dir_path d (repo_dir / path_cast<dir_path> (*pm.location));
+ d.normalize (); // In case location is './'.
- dr << "in repository " << rl;
+ path f (d / manifest_file);
+ if (!exists (f))
+ {
+ diag_record dr (fail);
+ dr << "no manifest file for ";
+ prn_package_info (dr, pm);
+ }
- if (fragment)
- dr << ' ' << *fragment;
- };
+ // Provide the context if the package compatibility verification fails.
+ //
+ auto g (
+ make_exception_guard (
+ [&pm, &prn_package_info] ()
+ {
+ diag_record dr (info);
- auto failure = [&package_info] (const char* desc)
- {
- diag_record dr (fail);
- dr << desc << " for ";
- package_info (dr);
- };
+ dr << "while retrieving information for ";
+ prn_package_info (dr, pm);
+ }));
+
+ try
+ {
+ ifdstream ifs (f);
+ manifest_parser mp (ifs, f.string ());
- dir_path d (repo_dir / path_cast<dir_path> (*sm.location));
- d.normalize (); // In case location is './'.
+ // Note that the package directory points to something temporary
+ // (e.g., .bpkg/tmp/6f746365314d/) and it's probably better to omit
+ // it entirely (the above exception guard will print all we've got).
+ //
+ pkg_verify_result r (pkg_verify (co, mp, it, dir_path ()));
+
+ if (bs &&
+ r.build2_dependency &&
+ !satisfy_build2 (co, *r.build2_dependency))
+ {
+ bs = false;
+ pds.clear (); // Won't now be used.
+ }
+
+ nvs.push_back (move (r));
+ }
+ catch (const manifest_parsing& e)
+ {
+ fail (e.name, e.line, e.column) << e.description;
+ }
+ catch (const io_error& e)
+ {
+ fail << "unable to read from " << f << ": " << e;
+ }
- path f (d / manifest_file);
- if (!exists (f))
- failure ("no manifest file");
+ mfs.push_back (move (f));
+
+ if (bs)
+ pds.push_back (move (d));
+ }
+
+ // Note that for the directory-based repositories we also query
+ // subprojects since the package information will be used for the
+ // subsequent package_iteration() call (see below).
+ //
+ if (bs)
+ pvs = package_versions (co, pds,
+ (rl.directory_based ()
+ ? b_info_flags::subprojects
+ : b_info_flags::none));
+ }
+
+ // Parse package manifests, fixing up their versions.
+ //
+ pair<vector<package_manifest>, vector<package_info>> r;
+ r.first.reserve (pms.size ());
+
+ if (pvs)
+ r.second.reserve (pms.size ());
+
+ for (size_t i (0); i != pms.size (); ++i)
+ {
+ package_manifest& pm (pms[i]);
+
+ assert (pm.location);
try
{
- ifdstream ifs (f);
- manifest_parser mp (ifs, f.string ());
-
package_manifest m (
- mp,
- [&co, &d] (version& v)
+ mfs[i].string (),
+ move (nvs[i]),
+ [&pvs, i] (version& v)
{
- if (optional<version> pv = package_version (co, d))
- v = move (*pv);
+ if (pvs)
+ {
+ optional<version>& pv ((*pvs)[i].version);
+
+ if (pv)
+ v = move (*pv);
+ }
},
iu);
// Save the package manifest, preserving its location.
//
- m.location = move (*sm.location);
- sm = move (m);
+ m.location = move (*pm.location);
+
+ pm = move (m);
}
catch (const manifest_parsing& e)
{
diag_record dr (fail (e.name, e.line, e.column));
dr << e.description << info;
- package_info (dr);
- }
- catch (const io_error& e)
- {
- fail << "unable to read from " << f << ": " << e;
+ prn_package_info (dr, pm);
}
- r.emplace_back (move (sm));
+ r.first.push_back (move (pm));
+
+ if (pvs)
+ r.second.push_back (move ((*pvs)[i].info));
}
return r;
@@ -287,11 +418,11 @@ namespace bpkg
ifdstream is (fp);
string s (is.read_text ());
- if (s.empty ())
+ if (s.empty () && name != "build-file")
fail << name << " manifest value in " << pkg / manifest_file
<< " references empty file " << rp <<
info << "repository " << rl
- << (!fragment.empty () ? " " + fragment : "");
+ << (!fragment.empty () ? ' ' + fragment : "");
return s;
}
@@ -301,7 +432,7 @@ namespace bpkg
<< name << " manifest value in " << pkg / manifest_file << ": "
<< e <<
info << "repository " << rl
- << (!fragment.empty () ? " " + fragment : "") << endf;
+ << (!fragment.empty () ? ' ' + fragment : "") << endf;
}
}
@@ -309,7 +440,9 @@ namespace bpkg
rep_fetch_dir (const common_options& co,
const repository_location& rl,
bool iu,
- bool ev)
+ bool it,
+ bool ev,
+ bool lb)
{
assert (rl.absolute ());
@@ -330,29 +463,76 @@ namespace bpkg
rl,
string () /* fragment */));
- fr.packages = parse_package_manifests (co,
- rd,
- move (pms),
- iu,
- rl,
- empty_string /* fragment */);
+ pair<vector<package_manifest>, vector<package_info>> pmi (
+ parse_package_manifests (co,
+ rd,
+ move (pms),
+ iu,
+ it,
+ rl,
+ empty_string /* fragment */));
+
+ fr.packages = move (pmi.first);
+ fr.package_infos = move (pmi.second);
- // Expand file-referencing package manifest values.
+ // If requested, expand file-referencing package manifest values and load
+ // the buildfiles into the respective *-build values.
//
- if (ev)
+ if (ev || lb)
{
for (package_manifest& m: fr.packages)
- m.load_files (
- [&m, &rd, &rl] (const string& n, const path& p)
- {
- return read_package_file (p,
- n,
- path_cast<dir_path> (*m.location),
- rd,
- rl,
- empty_string /* fragment */);
- },
- iu);
+ {
+ dir_path pl (path_cast<dir_path> (*m.location));
+
+ // Load *-file values.
+ //
+ try
+ {
+ m.load_files (
+ [ev, &rd, &rl, &pl]
+ (const string& n, const path& p) -> optional<string>
+ {
+ // Always expand the build-file values.
+ //
+ if (ev || n == "build-file")
+ {
+ return read_package_file (p,
+ n,
+ pl,
+ rd,
+ rl,
+ empty_string /* fragment */);
+ }
+ else
+ return nullopt;
+ },
+ iu);
+ }
+ catch (const manifest_parsing& e)
+ {
+ diag_record dr (fail);
+ dr << e << info;
+ print_package_info (dr, pl, rl, nullopt /* fragment */);
+ dr << endf;
+ }
+
+ // Load the bootstrap, root, and config/*.build buildfiles into the
+ // respective *-build values, if requested and if they are not already
+ // specified in the manifest.
+ //
+ if (lb)
+ try
+ {
+ load_package_buildfiles (m, rd / pl, true /* err_path_relative */);
+ }
+ catch (const runtime_error& e)
+ {
+ diag_record dr (fail);
+ dr << e << info;
+ print_package_info (dr, pl, rl, nullopt /* fragment */);
+ dr << endf;
+ }
+ }
}
return rep_fetch_data {{move (fr)},
@@ -365,16 +545,16 @@ namespace bpkg
const dir_path* conf,
const repository_location& rl,
bool iu,
- bool ev)
+ bool it,
+ bool ev,
+ bool lb)
{
- if (conf != nullptr && conf->empty ())
- conf = exists (bpkg_dir) ? &current_dir : nullptr;
-
- assert (conf == nullptr || !conf->empty ());
+ auto i (tmp_dirs.find (conf != nullptr ? *conf : empty_dir_path));
+ assert (i != tmp_dirs.end ());
dir_path sd (repository_state (rl));
- auto_rmdir rm (temp_dir / sd);
+ auto_rmdir rm (i->second / sd, !keep_tmp);
const dir_path& td (rm.path);
if (exists (td))
@@ -490,42 +670,91 @@ namespace bpkg
// Parse package manifests.
//
- fr.packages = parse_package_manifests (co,
- td,
- move (pms),
- iu,
- rl,
- fr.friendly_name);
-
- // Expand file-referencing package manifest values checking out
- // submodules, if required.
+ pair<vector<package_manifest>, vector<package_info>> pmi (
+ parse_package_manifests (co,
+ td,
+ move (pms),
+ iu,
+ it,
+ rl,
+ fr.friendly_name));
+
+ fr.packages = move (pmi.first);
+ fr.package_infos = move (pmi.second);
+
+ // If requested, expand file-referencing package manifest values
+ // checking out submodules, if required, and load the buildfiles into
+ // the respective *-build values.
//
- if (ev)
+ if (ev || lb)
{
for (package_manifest& m: fr.packages)
- m.load_files (
- [&m, &td, &rl, &fr, &checkout_submodules] (const string& n,
- const path& p)
- {
- // Note that this doesn't work for symlinks on Windows where git
- // normally creates filesystem-agnostic symlinks that are
- // indistinguishable from regular files (see fixup_worktree()
- // for details). It seems like the only way to deal with that is
- // to unconditionally checkout submodules on Windows. Let's not
- // pessimize things for now (if someone really wants this to
- // work, they can always enable real symlinks in git).
- //
- if (!exists (td / *m.location / p))
- checkout_submodules ();
-
- return read_package_file (p,
- n,
- path_cast<dir_path> (*m.location),
- td,
- rl,
- fr.friendly_name);
- },
- iu);
+ {
+ dir_path pl (path_cast<dir_path> (*m.location));
+
+ // Load *-file values.
+ //
+ try
+ {
+ m.load_files (
+ [ev, &td, &rl, &pl, &fr, &checkout_submodules]
+ (const string& n, const path& p) -> optional<string>
+ {
+ // Always expand the build-file values.
+ //
+ if (ev || n == "build-file")
+ {
+ // Check out submodules if the referenced file doesn't exist.
+ //
+ // Note that this doesn't work for symlinks on Windows where
+ // git normally creates filesystem-agnostic symlinks that
+ // are indistinguishable from regular files (see
+ // fixup_worktree() for details). It seems like the only way
+ // to deal with that is to unconditionally checkout
+ // submodules on Windows. Let's not pessimize things for now
+ // (if someone really wants this to work, they can always
+ // enable real symlinks in git).
+ //
+ if (!exists (td / pl / p))
+ checkout_submodules ();
+
+ return read_package_file (p,
+ n,
+ pl,
+ td,
+ rl,
+ fr.friendly_name);
+ }
+ else
+ return nullopt;
+ },
+ iu);
+ }
+ catch (const manifest_parsing& e)
+ {
+ diag_record dr (fail);
+ dr << e << info;
+ print_package_info (dr, pl, rl, fr.friendly_name);
+ dr << endf;
+ }
+
+ // Load the bootstrap, root, and config/*.build buildfiles into the
+ // respective *-build values, if requested and if they are not
+ // already specified in the manifest.
+ //
+ if (lb)
+ try
+ {
+ load_package_buildfiles (m, td / pl, true /* err_path_relative */);
+ }
+ catch (const runtime_error& e)
+ {
+ diag_record dr (fail);
+ dr << e << info;
+ print_package_info (dr, pl, rl, fr.friendly_name);
+ dr << endf;
+ }
+ }
}
np += fr.packages.size ();
@@ -556,16 +785,28 @@ namespace bpkg
static rep_fetch_data
rep_fetch (const common_options& co,
const dir_path* conf,
+ database* db,
const repository_location& rl,
const optional<string>& dt,
bool iu,
- bool ev)
+ bool it,
+ bool ev,
+ bool lb)
{
switch (rl.type ())
{
- case repository_type::pkg: return rep_fetch_pkg (co, conf, rl, dt, iu);
- case repository_type::dir: return rep_fetch_dir (co, rl, iu, ev);
- case repository_type::git: return rep_fetch_git (co, conf, rl, iu, ev);
+ case repository_type::pkg:
+ {
+ return rep_fetch_pkg (co, conf, db, rl, dt, iu, it);
+ }
+ case repository_type::dir:
+ {
+ return rep_fetch_dir (co, rl, iu, it, ev, lb);
+ }
+ case repository_type::git:
+ {
+ return rep_fetch_git (co, conf, rl, iu, it, ev, lb);
+ }
}
assert (false); // Can't be here.
@@ -577,9 +818,19 @@ namespace bpkg
const dir_path* conf,
const repository_location& rl,
bool iu,
- bool ev)
+ bool it,
+ bool ev,
+ bool lb)
{
- return rep_fetch (co, conf, rl, nullopt /* dependent_trust */, iu, ev);
+ return rep_fetch (co,
+ conf,
+ nullptr /* database */,
+ rl,
+ nullopt /* dependent_trust */,
+ iu,
+ it,
+ ev,
+ lb);
}
// Return an existing repository fragment or create a new one. Update the
@@ -591,7 +842,7 @@ namespace bpkg
static shared_ptr<repository_fragment>
rep_fragment (const common_options& co,
- const dir_path& conf,
+ database& db,
transaction& t,
const repository_location& rl,
rep_fetch_data::fragment&& fr,
@@ -601,7 +852,6 @@ namespace bpkg
{
tracer trace ("rep_fragment");
- database& db (t.database ());
tracer_guard tg (db, trace);
// Calculate the fragment location.
@@ -852,10 +1102,15 @@ namespace bpkg
// details).
//
if (exists && !full_fetch)
- rep_remove_package_locations (t, rf->name);
+ rep_remove_package_locations (db, t, rf->name);
+
+ vector<package_manifest>& pms (fr.packages);
+ const vector<package_info>& pis (fr.package_infos);
- for (package_manifest& pm: fr.packages)
+ for (size_t i (0); i != pms.size (); ++i)
{
+ package_manifest& pm (pms[i]);
+
// Fix-up the external package version iteration number.
//
if (rl.directory_based ())
@@ -868,12 +1123,13 @@ namespace bpkg
optional<version> v (
package_iteration (
co,
- conf,
+ db,
t,
path_cast<dir_path> (rl.path () / *pm.location),
pm.name,
pm.version,
- false /* check_external */));
+ !pis.empty () ? &pis[i] : nullptr,
+ false /* check_external */));
if (v)
pm.version = move (*v);
@@ -956,7 +1212,7 @@ namespace bpkg
//
static void
rep_fetch (const common_options& co,
- const dir_path& conf,
+ database& db,
transaction& t,
const shared_ptr<repository>& r,
const optional<string>& dependent_trust,
@@ -970,7 +1226,6 @@ namespace bpkg
{
tracer trace ("rep_fetch(rep)");
- database& db (t.database ());
tracer_guard tg (db, trace);
// Check that the repository is not fetched yet and register it as fetched
@@ -990,7 +1245,8 @@ namespace bpkg
//
if (need_auth (co, r->location))
authenticate_certificate (co,
- &conf,
+ &db.config_orig,
+ &db,
r->certificate,
r->location,
dependent_trust);
@@ -1057,13 +1313,22 @@ namespace bpkg
// repository fragments list, as well as its prerequisite and complement
// repository sets.
//
+ // Note that we do this in the forward compatible manner ignoring
+ // unrecognized manifest values and unsatisfied build2 toolchain
+ // constraints in the package manifests. This approach allows older
+ // toolchains to work with newer repositories, successfully building the
+ // toolchain-satisfied packages and only failing for unsatisfied ones.
+ //
rep_fetch_data rfd (
rep_fetch (co,
- &conf,
+ &db.config_orig,
+ &db,
rl,
dependent_trust,
true /* ignore_unknow */,
- false /* expand_values */));
+ true /* ignore_toolchain */,
+ false /* expand_values */,
+ true /* load_buildfiles */));
// Save for subsequent certificate authentication for repository use by
// its dependents.
@@ -1079,7 +1344,7 @@ namespace bpkg
string nm (fr.friendly_name); // Don't move, still may be used.
shared_ptr<repository_fragment> rf (rep_fragment (co,
- conf,
+ db,
t,
rl,
move (fr),
@@ -1156,7 +1421,7 @@ namespace bpkg
rm (pr);
auto fetch = [&co,
- &conf,
+ &db,
&t,
&fetched_repositories,
&removed_repositories,
@@ -1171,7 +1436,7 @@ namespace bpkg
assert (i != repo_trust.end ());
rep_fetch (co,
- conf,
+ db,
t,
r,
i->second,
@@ -1206,7 +1471,7 @@ namespace bpkg
static void
rep_fetch (const common_options& o,
- const dir_path& conf,
+ database& db,
transaction& t,
const vector<lazy_shared_ptr<repository>>& repos,
bool shallow,
@@ -1215,7 +1480,6 @@ namespace bpkg
{
tracer trace ("rep_fetch(repos)");
- database& db (t.database ());
tracer_guard tg (db, trace);
// As a fist step we fetch repositories recursively building the list of
@@ -1243,7 +1507,7 @@ namespace bpkg
//
for (const lazy_shared_ptr<repository>& r: repos)
rep_fetch (o,
- conf,
+ db,
t,
r.load (),
nullopt /* dependent_trust */,
@@ -1258,7 +1522,14 @@ namespace bpkg
// Remove dangling repositories.
//
for (const shared_ptr<repository>& r: removed_repositories)
- rep_remove (conf, t, r);
+ {
+ // Prior to removing the repository we need to make sure it still
+ // exists, which may not be the case due to earlier removal of the
+ // dependent dangling repository.
+ //
+ if (db.find<repository> (r->name) != nullptr)
+ rep_remove (db, t, r);
+ }
// Remove dangling repository fragments.
//
@@ -1277,7 +1548,7 @@ namespace bpkg
//
assert (f == rf);
- rep_remove_fragment (conf, t, rf);
+ rep_remove_fragment (db, t, rf);
}
}
@@ -1346,12 +1617,17 @@ namespace bpkg
{
dependencies& ds (at.package->dependencies);
- // Note that the special test dependencies entry is always the last
- // one, if present.
+ // Note that there is only one special test dependencies entry in
+ // the test package.
//
- assert (!ds.empty () && ds.back ().type);
-
- ds.pop_back ();
+ for (auto i (ds.begin ()), e (ds.end ()); i != e; ++i)
+ {
+ if (i->type)
+ {
+ ds.erase (i);
+ break;
+ }
+ }
db.update (at.package);
}
@@ -1360,17 +1636,37 @@ namespace bpkg
// Go through the available packages that have external tests and add
// them as the special test dependencies to these test packages.
//
+ // Note that not being able to resolve the test package for a main
+ // package is not an error, since the test package absence doesn't
+ // affect the main package building and internal testing. Dropping of an
+ // external test package from a repository may, however, be intentional.
+ // Think of a private repository crafted as a subset of some public
+ // repository with the external examples packages omitted.
+ //
for (const auto& am: db.query<available_main> ())
{
const shared_ptr<available_package>& p (am.package);
+ const package_name& n (p->id.name);
+ const version& v (p->version);
vector<shared_ptr<repository_fragment>> rfs;
for (const package_location& pl: p->locations)
rfs.push_back (pl.repository_fragment.load ());
+ bool module (build2_module (n));
+
for (const test_dependency& td: p->tests)
{
+ // Verify that the package has no runtime tests if it is a build
+ // system module.
+ //
+ if (module && !td.buildtime)
+ fail << "run-time " << td.type << ' ' << td.name << " for build "
+ << "system module "
+ << package_string (n, v) <<
+ info << "build system modules cannot have run-time " << td.type;
+
vector<pair<shared_ptr<available_package>,
shared_ptr<repository_fragment>>> tps (
filter (rfs,
@@ -1386,11 +1682,122 @@ namespace bpkg
dependencies& ds (tp->dependencies);
- if (ds.empty () || !ds.back ().type)
- ds.push_back (dependency_alternatives_ex (td.type));
+ // Find the special test dependencies entry, if already present.
+ //
+ auto b (ds.begin ());
+ auto e (ds.end ());
+ auto oi (b); // Old entry location.
+ for (; oi != e && !oi->type; ++oi) ;
+
+ // Note that since we store all the primary packages as
+ // alternative dependencies (which must be all of the same
+ // dependency type) for the test package, it must either be a
+ // runtime or build-time dependency for all of them.
+ //
+ // Note that the test package alternative dependencies contain the
+ // `== <version>` constraints (see below), so we can use min
+ // version of such a constraint as the primary package version.
+ //
+ if (oi != e && oi->buildtime != td.buildtime)
+ {
+ dependency_alternatives_ex& das (*oi);
+ assert (!das.empty ()); // Cannot be empty if present.
+
+ const dependency_alternative& da (das[0]);
+
+ // We always add the primary package to the test package as a
+ // single-dependency alternative (see below).
+ //
+ assert (da.size () == 1);
+
+ fail << to_string (td.type) << " package " << td.name << " is a "
+ << "build-time dependency for one primary package and a "
+ << "run-time for another" <<
+ info << (das.buildtime ? "build-time for " : "run-time for ")
+ << package_string (da[0].name,
+ *da[0].constraint->min_version) <<
+ info << (td.buildtime ? "build-time for " : "run-time for ")
+ << package_string (n, v);
+ }
+
+ // Find the (new) location for the special test dependencies entry.
+ //
+ // Note that if the entry is already present, it can only be moved
+ // towards the end of the list.
+ //
+ auto ni (e);
+
+ // First, find the last depends clause that explicitly specifies
+ // this main package but goes after the special entry current
+ // location, if present. Note that we only consider clauses with
+ // the matching buildtime flag.
+ //
+ for (auto i (oi != e ? oi + 1 : b); i != e; ++i)
+ {
+ const dependency_alternatives_ex& das (*i);
+ if (das.buildtime == td.buildtime)
+ {
+ bool specifies (false);
+
+ for (const dependency_alternative& da: das)
+ {
+ for (const dependency& d: da)
+ {
+ if (d.name == n)
+ {
+ specifies = true;
+ break;
+ }
+ }
+
+ if (specifies)
+ break;
+ }
+
+ if (specifies)
+ ni = i;
+ }
+ }
+
+ // Now, set ni to refer to the special test dependencies entry,
+ // moving or creating one, if required.
+ //
+ if (oi != e) // The entry already exists?
+ {
+ if (ni != e) // Move the entry to the new location?
+ {
+ // Move the [oi + 1, ni] range 1 position to the left and
+ // move the *oi element to the now vacant ni slot.
+ //
+ rotate (oi, oi + 1, ni + 1);
+ }
+ else
+ ni = oi; // Leave the entry at the old location.
+ }
+ else // The entry doesn't exist.
+ {
+ if (ni != e) // Create the entry right after ni?
+ ++ni;
+ else
+ ni = b; // Create the entry at the beginning of the list.
+
+ ni = ds.emplace (ni, td.type, td.buildtime); // Create the entry.
+ }
+
+ // Finally, add the new dependency alternative to the special
+ // entry.
+ //
+ dependency_alternative da (td.enable,
+ td.reflect,
+ nullopt /* prefer */,
+ nullopt /* accept */,
+ nullopt /* require */);
- ds.back ().push_back (
- dependency {p->id.name, version_constraint (p->version)});
+ da.push_back (dependency {n, version_constraint (v)});
+
+ assert (ni != ds.end ()); // Must be deduced by now.
+
+ ni->push_back (move (da));
db.update (tp);
}
@@ -1409,7 +1816,7 @@ namespace bpkg
warn << "repository state is now broken and will be cleaned up" <<
info << "run 'bpkg rep-fetch' to update";
- rep_remove_clean (o, conf, t.database ());
+ rep_remove_clean (o, db);
}
throw;
@@ -1418,7 +1825,6 @@ namespace bpkg
void
rep_fetch (const common_options& o,
- const dir_path& conf,
database& db,
const vector<repository_location>& rls,
bool shallow,
@@ -1444,13 +1850,17 @@ namespace bpkg
// Add the repository, unless it is already a top-level one and has the
// same location.
//
+ // Note that on Windows we can overwrite the local repository location
+ // with the same location but some characters specified in a different
+ // case, which is ok.
+ //
if (ua.find (r) == ua.end () || r.load ()->location.url () != rl.url ())
- rep_add (o, t, rl);
+ rep_add (o, db, t, rl);
repos.emplace_back (r);
}
- rep_fetch (o, conf, t, repos, shallow, false /* full_fetch */, reason);
+ rep_fetch (o, db, t, repos, shallow, false /* full_fetch */, reason);
t.commit ();
}
@@ -1467,7 +1877,11 @@ namespace bpkg
//
vector<lazy_shared_ptr<repository>> repos;
- database db (open (c, trace));
+ // Pre-attach the explicitly linked databases since we call
+ // package_iteration().
+ //
+ database db (c, trace, true /* pre_attach */);
+
transaction t (db);
session s; // Repository dependencies can have cycles.
@@ -1531,7 +1945,7 @@ namespace bpkg
//
auto i (ua.find (r));
if (i == ua.end () || i->load ()->location.url () != rl.url ())
- r = lazy_shared_ptr<repository> (db, rep_add (o, t, rl));
+ r = lazy_shared_ptr<repository> (db, rep_add (o, db, t, rl));
}
repos.emplace_back (move (r));
@@ -1558,7 +1972,7 @@ namespace bpkg
}
}
- rep_fetch (o, c, t, repos, o.shallow (), full_fetch, reason);
+ rep_fetch (o, db, t, repos, o.shallow (), full_fetch, reason);
size_t rcount (0), pcount (0);
if (verb)
diff --git a/bpkg/rep-fetch.hxx b/bpkg/rep-fetch.hxx
index 4ddce5b..a26eba3 100644
--- a/bpkg/rep-fetch.hxx
+++ b/bpkg/rep-fetch.hxx
@@ -7,7 +7,6 @@
#include <libbpkg/manifest.hxx>
#include <bpkg/types.hxx>
-#include <bpkg/forward.hxx> // database
#include <bpkg/utility.hxx>
#include <bpkg/rep-fetch-options.hxx>
@@ -19,9 +18,8 @@ namespace bpkg
// Fetch and authenticate repositories and packages manifests.
//
- // If conf is NULL, then assume not running in a bpkg configuration. If it
- // is empty, then check if the bpkg configuration exists in the current
- // working directory.
+ // If configuration directory is NULL, then assume not running in a bpkg
+ // configuration.
//
class certificate;
@@ -36,6 +34,13 @@ namespace bpkg
vector<repository_manifest> repositories;
vector<package_manifest> packages;
+
+ // Empty if the build2 project info is not available for the packages.
+ // Currently we only retrieve it for the directory and version control
+ // based repositories, but only if the current build2 version is
+ // satisfactory for all the repository packages.
+ //
+ vector<package_info> package_infos;
};
vector<fragment> fragments;
@@ -46,18 +51,24 @@ namespace bpkg
shared_ptr<const bpkg::certificate> certificate; // Authenticated.
};
- // If requested, expand the file-referencing package manifest values
- // (description, changes, etc), setting them to the contents of files they
- // refer to and set the potentially absent description-type value to the
- // effective description type (see libbpkg/manifest.hxx). Note that for pkg
- // repositories such values are expanded at the repository creation time.
+ // If requested, verify that all manifest entries are recognized and the
+ // packages are compatible with the current toolchain. Also, if requested,
+ // expand the file-referencing package manifest values (description,
+ // changes, etc), setting them to the contents of files they refer to and
+ // set the potentially absent description-type value to the effective
+ // description type (see libbpkg/manifest.hxx) and load the bootstrap, root,
+ // and config/*.build buildfiles into the respective *-build values. Note
+ // that for pkg repositories such values are expanded/loaded at the
+ // repository creation time.
//
rep_fetch_data
rep_fetch (const common_options&,
- const dir_path* conf,
+ const dir_path* configuration,
const repository_location&,
bool ignore_unknown,
- bool expand_values);
+ bool ignore_toolchain,
+ bool expand_values,
+ bool load_buildfiles);
// Add (or update) repository locations to the configuration and fetch
// them. If shallow is true, then don't fetch their prerequisite and/or
@@ -69,7 +80,6 @@ namespace bpkg
//
void
rep_fetch (const common_options&,
- const dir_path& conf,
database&,
const vector<repository_location>&,
bool shallow,
diff --git a/bpkg/rep-info.cli b/bpkg/rep-info.cli
index 60854ce..c18831f 100644
--- a/bpkg/rep-info.cli
+++ b/bpkg/rep-info.cli
@@ -89,15 +89,26 @@ namespace bpkg
"Instead of printing the information in the human-readable form, dump it
(to \cb{stdout}) as manifest(s). Normally you would use this option in
combination with \cb{--packages|-p} or \cb{--repositories|-r} to only
- dump one of the manifests. If the \cb{--deep} option is specified,
- then in the resulting packages manifest the \cb{*-file} values are
- replaced with the contents of the referenced files. See also
- \cb{--repositories-file} and \cb{--packages-file}."
+ dump one of the manifests. If the \cb{--deep} option is specified, then
+ in the resulting packages manifest the \cb{*-file} values are replaced
+ with the contents of the referenced files and the \c{*-build} values
+ are automatically added (unless the corresponding files are
+ absent). See also \cb{--ignore-unknown}, \cb{--repositories-file}, and
+ \cb{--packages-file}."
+ }
+
+ bool --ignore-unknown
+ {
+ "Ignore unknown manifest entries. Note that this option also ignores
+ the version constraints in the special toolchain build-time
+ dependencies. This option is implied if \cb{--manifest} is not
+ specified."
}
bool --deep
{
- "Verify files referenced by the \cb{*-file} manifest values."
+ "Verify the presence of the required \c{*-build} values/files and
+ the validity of files referenced by the \cb{*-file} manifest values."
}
path --repositories-file
diff --git a/bpkg/rep-info.cxx b/bpkg/rep-info.cxx
index 1a23528..190a210 100644
--- a/bpkg/rep-info.cxx
+++ b/bpkg/rep-info.cxx
@@ -5,8 +5,7 @@
#include <iostream> // cout
-#include <libbutl/sha256.mxx> // sha256_to_fingerprint()
-#include <libbutl/manifest-serializer.mxx>
+#include <libbutl/manifest-serializer.hxx>
#include <libbpkg/manifest.hxx>
@@ -49,12 +48,33 @@ namespace bpkg
// unknown manifest entries unless we are dumping them.
//
dir_path d (o.directory ());
+
+ const dir_path* conf (o.directory_specified () && d.empty ()
+ ? nullptr
+ : &d);
+
+ // If --directory|-d is not specified and the current working directory is
+ // a configuration directory, then initialize the temporary directory
+ // inside it, so that we can always move a version control-based
+ // repository into and out of it (see pkg_checkout() for details).
+ //
+ if (conf != nullptr && conf->empty ())
+ conf = exists (bpkg_dir) ? &current_dir : nullptr;
+
+ assert (conf == nullptr || !conf->empty ());
+
+ init_tmp (conf != nullptr ? *conf : empty_dir_path);
+
+ bool ignore_unknown (!o.manifest () || o.ignore_unknown ());
+
rep_fetch_data rfd (
rep_fetch (o,
- o.directory_specified () && d.empty () ? nullptr : &d,
+ conf,
rl,
- !o.manifest () /* ignore_unknow */,
- o.deep () /* expand_values */));
+ ignore_unknown,
+ ignore_unknown /* ignore_toolchain */,
+ o.deep () /* expand_values */,
+ o.deep () /* load_buildfiles */));
// Now print.
//
@@ -328,7 +348,6 @@ namespace bpkg
}
else
serialize (cout, "stdout");
-
}
else
{
diff --git a/bpkg/rep-list.cxx b/bpkg/rep-list.cxx
index 5b961c0..67b25bf 100644
--- a/bpkg/rep-list.cxx
+++ b/bpkg/rep-list.cxx
@@ -107,7 +107,7 @@ namespace bpkg
fail << "unexpected argument '" << args.next () << "'" <<
info << "run 'bpkg help rep-list' for more information";
- database db (open (c, trace));
+ database db (c, trace, false /* pre_attach */);
transaction t (db);
session s; // Repository dependencies can have cycles.
diff --git a/bpkg/rep-mask.cxx b/bpkg/rep-mask.cxx
new file mode 100644
index 0000000..d7f9c6a
--- /dev/null
+++ b/bpkg/rep-mask.cxx
@@ -0,0 +1,368 @@
+// file : bpkg/rep-mask.cxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#include <bpkg/rep-mask.hxx>
+
+#include <bpkg/package.hxx>
+#include <bpkg/package-odb.hxx>
+#include <bpkg/database.hxx>
+#include <bpkg/diagnostics.hxx>
+#include <bpkg/package-query.hxx> // repo_configs
+#include <bpkg/manifest-utility.hxx> // repository_name()
+
+using namespace std;
+using namespace butl;
+
+namespace bpkg
+{
+ static optional<database_map<strings>> unmasked_repositories;
+ static optional<database_map<strings>> unmasked_repository_fragments;
+
+ // Note: defined in rep-remove.cxx.
+ //
+ void
+ rep_remove (database&,
+ transaction&,
+ const shared_ptr<repository>&,
+ bool mask);
+
+ // The overall plan is as follows:
+ //
+ // - Start the transaction.
+ //
+ // - Remove all the specified repositories recursively in all the
+ // configurations specified by repo_configs (repos) and/or configurations
+ // specified explicitly via UUIDs (config_uuid_repos).
+ //
+ // - Collect the remaining repositories and repository fragments as unmasked.
+ //
+ // - Rollback the transaction.
+ //
+ // Later on, the rep_masked*() functions will refer to the configuration-
+ // specific unmasked repositories and repository fragments lists to decide
+ // if the repository/fragment is masked or not in the specific configuration.
+ //
+ void
+ rep_mask (const strings& repos,
+ const strings& config_uuid_repos,
+ linked_databases& current_configs)
+ {
+ tracer trace ("rep_mask");
+
+ assert (!repo_configs.empty ());
+
+ database& mdb (repo_configs.front ());
+ tracer_guard tg (mdb, trace);
+
+ // Temporary "suspend" session before modifying the database.
+ //
+ session* sess (session::current_pointer ());
+ if (sess != nullptr)
+ session::reset_current ();
+
+ vector<lazy_weak_ptr<repository>> rs;
+ vector<bool> found_repos (repos.size(), false);
+
+ transaction t (mdb);
+
+ // Add a repository from a database, suppressing duplicates.
+ //
+ auto add_repo = [&rs] (database& db, shared_ptr<repository>&& r)
+ {
+ if (find_if (rs.begin (), rs.end (),
+ [&db, &r] (const lazy_weak_ptr<repository>& lr)
+ {
+ return lr.database () == db && lr.object_id () == r->name;
+ }) == rs.end ())
+ rs.emplace_back (db, move (r));
+ };
+
+ // Collect the repositories masked in all configurations.
+ //
+ for (database& db: repo_configs)
+ {
+ for (size_t i (0); i != repos.size (); ++i)
+ {
+ // Add a repository, suppressing duplicates, and mark it as found.
+ //
+ auto add = [&db,
+ &found_repos, i,
+ &add_repo] (shared_ptr<repository>&& r)
+ {
+ add_repo (db, move (r));
+ found_repos[i] = true;
+ };
+
+ const string& rp (repos[i]);
+
+ if (repository_name (rp))
+ {
+ if (shared_ptr<repository> r = db.find<repository> (rp))
+ add (move (r));
+ }
+ else
+ {
+ using query = query<repository>;
+
+ // Verify that the repository URL is not misspelled or empty.
+ //
+ try
+ {
+ repository_url u (rp);
+ assert (!u.empty ());
+ }
+ catch (const invalid_argument& e)
+ {
+ fail << "repository '" << rp << "' cannot be masked: "
+ << "invalid repository location: " << e;
+ }
+
+ for (shared_ptr<repository> r:
+ pointer_result (
+ db.query<repository> (query::location.url == rp)))
+ add (move (r));
+ }
+ }
+ }
+
+ // Fail if any of the specified repositories is not found in any database.
+ //
+ for (size_t i (0); i != repos.size (); ++i)
+ {
+ if (!found_repos[i])
+ fail << "repository '" << repos[i] << "' cannot be masked: not found";
+ }
+
+ // Collect the repositories masked in specific configurations.
+ //
+ for (const string& cr: config_uuid_repos)
+ {
+ auto bad = [&cr] (const string& d)
+ {
+ fail << "configuration repository '" << cr << "' cannot be masked: "
+ << d;
+ };
+
+ size_t p (cr.find ('='));
+
+ if (p == string::npos)
+ bad ("missing '='");
+
+ uuid uid;
+ string uid_str (cr, 0, p);
+
+ try
+ {
+ uid = uuid (uid_str);
+ }
+ catch (const invalid_argument& e)
+ {
+ bad ("invalid configuration uuid '" + uid_str + "': " + e.what ());
+ }
+
+ database* db (nullptr);
+
+ for (database& cdb: current_configs)
+ {
+ if ((db = cdb.try_find_dependency_config (uid)) != nullptr)
+ break;
+ }
+
+ if (db == nullptr)
+ bad ("no configuration with uuid " + uid.string () +
+ " is linked with " +
+ (current_configs.size () == 1
+ ? mdb.config_orig.representation ()
+ : "specified current configurations"));
+
+ string rp (cr, p + 1);
+
+ if (repository_name (rp))
+ {
+ if (shared_ptr<repository> r = db->find<repository> (rp))
+ add_repo (*db, move (r));
+ else
+ bad ("repository name '" + rp + "' not found in configuration " +
+ uid.string ());
+ }
+ else
+ {
+ using query = query<repository>;
+
+ // Verify that the repository URL is not misspelled or empty.
+ //
+ try
+ {
+ repository_url u (rp);
+ assert (!u.empty ());
+ }
+ catch (const invalid_argument& e)
+ {
+ bad ("invalid repository location '" + rp + "': " + e.what ());
+ }
+
+ bool found (false);
+ for (shared_ptr<repository> r:
+ pointer_result (
+ db->query<repository> (query::location.url == rp)))
+ {
+ add_repo (*db, move (r));
+ found = true;
+ }
+
+ if (!found)
+ bad ("repository location '" + rp + "' not found in configuration " +
+ uid.string ());
+ }
+ }
+
+ // First, remove the repository references from the dependent repository
+ // fragments. Note that rep_remove() removes the dangling repositories.
+ //
+ // Note that for efficiency we un-reference all the repositories before
+ // starting to delete them.
+ //
+ for (const lazy_weak_ptr<repository>& r: rs)
+ {
+ database& db (r.database ());
+ const string& nm (r.object_id ());
+
+ // Remove from complements of the dependents.
+ //
+ for (const auto& rf: db.query<repository_complement_dependent> (
+ query<repository_complement_dependent>::complement::name == nm))
+ {
+ const shared_ptr<repository_fragment>& f (rf);
+ repository_fragment::dependencies& cs (f->complements);
+
+ auto i (cs.find (r));
+ assert (i != cs.end ());
+
+ cs.erase (i);
+ db.update (f);
+ }
+
+ // Remove from prerequisites of the dependents.
+ //
+ for (const auto& rf:
+ db.query<repository_prerequisite_dependent> (
+ query<repository_prerequisite_dependent>::prerequisite::name ==
+ nm))
+ {
+ const shared_ptr<repository_fragment>& f (rf);
+ repository_fragment::dependencies& ps (f->prerequisites);
+
+ auto i (ps.find (r));
+ assert (i != ps.end ());
+
+ ps.erase (i);
+ db.update (f);
+ }
+ }
+
+ // Remove the now dangling repositories.
+ //
+ for (const lazy_weak_ptr<repository>& r: rs)
+ rep_remove (r.database (), t, r.load (), true /* mask */);
+
+ // Collect the repositories and fragments which have remained after the
+ // removal.
+ //
+ unmasked_repositories = database_map<strings> ();
+ unmasked_repository_fragments = database_map<strings> ();
+
+ for (database& db: repo_configs)
+ {
+ // Add the repository location canonical name to the database-specific
+ // unmasked repositories or repository fragments lists. Note that
+ // repository location is used only for tracing.
+ //
+ auto add = [&db, &trace] (string&& n,
+ database_map<strings>& m,
+ const repository_location& loc,
+ const char* what)
+ {
+ auto i (m.find (db));
+ if (i == m.end ())
+ i = m.insert (db, strings ()).first;
+
+ l4 ([&]{trace << "unmasked " << what << ": '" << n
+ << "' '" << loc.url () << "'" << db;});
+
+ i->second.push_back (move (n));
+ };
+
+ for (shared_ptr<repository> r: pointer_result (db.query<repository> ()))
+ add (move (r->name),
+ *unmasked_repositories,
+ r->location,
+ "repository");
+
+ for (shared_ptr<repository_fragment> f:
+ pointer_result (db.query<repository_fragment> ()))
+ add (move (f->name),
+ *unmasked_repository_fragments,
+ f->location,
+ "repository fragment");
+ }
+
+ // Rollback the transaction and restore the session, if present.
+ //
+ t.rollback ();
+
+ if (sess != nullptr)
+ session::current_pointer (sess);
+ }
+
+ static inline bool
+ masked (database& db,
+ const string& name,
+ const optional<database_map<strings>>& m)
+ {
+ if (!m)
+ return false;
+
+ auto i (m->find (db));
+ if (i != m->end ())
+ {
+ const strings& ns (i->second);
+ return find (ns.begin (), ns.end (), name) == ns.end ();
+ }
+
+ return true;
+ }
+
+ bool
+ rep_masked (database& db, const shared_ptr<repository>& r)
+ {
+ return masked (db, r->name, unmasked_repositories);
+ }
+
+ bool
+ rep_masked (const lazy_weak_ptr<repository>& r)
+ {
+ // Should not be transient.
+ //
+ assert (!(r.lock ().get_eager () != nullptr && !r.loaded ()));
+
+ return masked (r.database (), r.object_id (), unmasked_repositories);
+ }
+
+ bool
+ rep_masked_fragment (database& db, const shared_ptr<repository_fragment>& f)
+ {
+ return masked (db, f->name, unmasked_repository_fragments);
+ }
+
+ bool
+ rep_masked_fragment (const lazy_shared_ptr<repository_fragment>& f)
+ {
+ // Should not be transient.
+ //
+ assert (!(f.get_eager () != nullptr && !f.loaded ()));
+
+ return masked (f.database (),
+ f.object_id (),
+ unmasked_repository_fragments);
+ }
+}
diff --git a/bpkg/rep-mask.hxx b/bpkg/rep-mask.hxx
new file mode 100644
index 0000000..2185af2
--- /dev/null
+++ b/bpkg/rep-mask.hxx
@@ -0,0 +1,73 @@
+// file : bpkg/rep-mask.hxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#ifndef BPKG_REP_MASK_HXX
+#define BPKG_REP_MASK_HXX
+
+#include <bpkg/types.hxx>
+#include <bpkg/forward.hxx> // database, repository
+#include <bpkg/utility.hxx>
+
+namespace bpkg
+{
+ // Note: not a command (at least not yet).
+ //
+ // Mask repositories to pretend they don't exist in the configurations that
+ // are used as the repository information sources (repo_configs;
+ // repositories argument) and/or specific configurations
+ // (config_uuid_repositories argument). Also mask their complement and
+ // prerequisite repositories, recursively, excluding those which are
+ // complements and/or prerequisites of other unmasked repositories. The
+ // repositories can be specified either as repository location canonical
+ // names or URLs. Issue diagnostics and fail if any of the specified
+ // repositories don't exist in any configuration.
+ //
+ // Notes:
+ //
+ // - The current configurations are only used to resolve the configuration
+ // UUIDs, if any.
+ //
+ // - A repository may end up being masked in one configuration but not in
+ // another.
+ //
+ // - Using a canonical name potentially masks repositories with different
+ // URLs in different configurations (think of local and remote pkg
+ // repository locations).
+ //
+ // - Using a URL potentially masks repositories with different canonical
+ // names in the same configuration (think of directory and local git
+ // repository locations).
+ //
+ // NOTE: repo_configs needs to be filled prior to the function call.
+ //
+ void
+ rep_mask (const strings& repositories, // <rep>
+ const strings& config_uuid_repositories, // <config-uuid>=<rep>
+ linked_databases& current_configs);
+
+ // Return true if a repository is masked in the specified configuration.
+ //
+ bool
+ rep_masked (database&, const shared_ptr<repository>&);
+
+ // Note: the argument must refer to a persistent object which incorporates
+ // the configuration information (database).
+ //
+ bool
+ rep_masked (const lazy_weak_ptr<repository>&);
+
+ // Return true if a repository fragment in the specified configuration
+ // belongs to the masked repositories only and is therefore masked (see
+ // package.hxx for the fragment/repository relationship details).
+ //
+ bool
+ rep_masked_fragment (database&, const shared_ptr<repository_fragment>&);
+
+ // Note: the argument must refer to a persistent object which incorporates
+ // the configuration information (database).
+ //
+ bool
+ rep_masked_fragment (const lazy_shared_ptr<repository_fragment>&);
+}
+
+#endif // BPKG_REP_MASK_HXX
diff --git a/bpkg/rep-remove.cxx b/bpkg/rep-remove.cxx
index c377fc5..ad10f56 100644
--- a/bpkg/rep-remove.cxx
+++ b/bpkg/rep-remove.cxx
@@ -5,7 +5,7 @@
#include <set>
-#include <libbutl/filesystem.mxx> // dir_iterator
+#include <libbutl/filesystem.hxx> // dir_iterator
#include <bpkg/package.hxx>
#include <bpkg/package-odb.hxx>
@@ -25,8 +25,7 @@ namespace bpkg
// prerequisites. Thus we need to make sure that the repository was not
// traversed yet.
//
- using repositories = set<reference_wrapper<const shared_ptr<repository>>,
- compare_reference_target>;
+ using repositories = set<shared_ptr<repository>>;
static bool
reachable (database& db,
@@ -94,11 +93,12 @@ namespace bpkg
}
void
- rep_remove_package_locations (transaction& t, const string& fragment_name)
+ rep_remove_package_locations (database& db,
+ transaction&,
+ const string& fragment_name)
{
tracer trace ("rep_remove_package_locations");
- database& db (t.database ());
tracer_guard tg (db, trace);
using query = query<repository_fragment_package>;
@@ -129,9 +129,12 @@ namespace bpkg
// the chances for the operation to succeed.
//
static void
- rmdir (const dir_path& d)
+ rmdir (const dir_path& cfg, const dir_path& d)
{
- dir_path td (temp_dir / d.leaf ());
+ auto i (tmp_dirs.find (cfg));
+ assert (i != tmp_dirs.end ());
+
+ dir_path td (i->second / d.leaf ());
if (exists (td))
rm_r (td);
@@ -140,16 +143,25 @@ namespace bpkg
rm_r (td, true /* dir_itself */, 3, rm_error_mode::warn);
}
+ static void
+ rep_remove_fragment (database&,
+ transaction&,
+ const shared_ptr<repository_fragment>&,
+ bool mask);
+
+ // In the mask repositories mode don't cleanup the repository state in the
+ // filesystem (see rep-mask.hxx for the details on repository masking).
+ //
void
- rep_remove (const dir_path& c,
+ rep_remove (database& db,
transaction& t,
- const shared_ptr<repository>& r)
+ const shared_ptr<repository>& r,
+ bool mask)
{
assert (!r->name.empty ()); // Can't be the root repository.
tracer trace ("rep_remove");
- database& db (t.database ());
tracer_guard tg (db, trace);
if (reachable (db, r))
@@ -164,7 +176,7 @@ namespace bpkg
// Remove dangling repository fragments.
//
for (const repository::fragment_type& fr: r->fragments)
- rep_remove_fragment (c, t, fr.fragment.load ());
+ rep_remove_fragment (db, t, fr.fragment.load (), mask);
// If there are no repositories stayed in the database then no repository
// fragments should stay either.
@@ -172,8 +184,8 @@ namespace bpkg
if (db.query_value<repository_count> () == 0)
assert (db.query_value<repository_fragment_count> () == 0);
- // Cleanup the repository state if present and there are no more
- // repositories referring this state.
+ // Unless in the mask repositories mode, cleanup the repository state if
+ // present and there are no more repositories referring this state.
//
// Note that this step is irreversible on failure. If something goes wrong
// we will end up with a state-less fetched repository and the
@@ -184,48 +196,61 @@ namespace bpkg
// then remove them after committing the transaction. Though, we still may
// fail in the middle due to the filesystem error.
//
- dir_path d (repository_state (r->location));
-
- if (!d.empty ())
+ if (!mask)
{
- dir_path sd (c / repos_dir / d);
+ dir_path d (repository_state (r->location));
- if (exists (sd))
+ if (!d.empty ())
{
- // There is no way to get the list of repositories that share this
- // state other than traversing all repositories of this type.
- //
- bool rm (true);
-
- using query = query<repository>;
+ dir_path sd (db.config_orig / repos_dir / d);
- for (shared_ptr<repository> rp:
- pointer_result (
- db.query<repository> (
- query::name != "" &&
- query::location.type == to_string (r->location.type ()))))
+ if (exists (sd))
{
- if (repository_state (rp->location) == d)
+ // There is no way to get the list of repositories that share this
+ // state other than traversing all repositories of this type.
+ //
+ bool rm (true);
+
+ using query = query<repository>;
+
+ for (shared_ptr<repository> rp:
+ pointer_result (
+ db.query<repository> (
+ query::name != "" &&
+ query::location.type == to_string (r->location.type ()))))
{
- rm = false;
- break;
+ if (repository_state (rp->location) == d)
+ {
+ rm = false;
+ break;
+ }
}
- }
- if (rm)
- rmdir (sd);
+ if (rm)
+ rmdir (db.config_orig, sd);
+ }
}
}
}
void
- rep_remove_fragment (const dir_path& c,
+ rep_remove (database& db, transaction& t, const shared_ptr<repository>& r)
+ {
+ rep_remove (db, t, r, false /* mask */);
+ }
+
+ // In the mask repositories mode don't remove the repository fragment from
+ // locations of the available packages it contains (see rep-mask.hxx for the
+ // details on repository masking).
+ //
+ static void
+ rep_remove_fragment (database& db,
transaction& t,
- const shared_ptr<repository_fragment>& rf)
+ const shared_ptr<repository_fragment>& rf,
+ bool mask)
{
tracer trace ("rep_remove_fragment");
- database& db (t.database ());
tracer_guard tg (db, trace);
// Bail out if the repository fragment is still used.
@@ -236,11 +261,12 @@ namespace bpkg
"fragment=" + query::_val (rf->name)) != 0)
return;
- // Remove the repository fragment from locations of the available packages
- // it contains. Note that this must be done before the repository fragment
- // removal.
+ // Unless in the mask repositories mode, remove the repository fragment
+ // from locations of the available packages it contains. Note that this
+ // must be done before the repository fragment removal.
//
- rep_remove_package_locations (t, rf->name);
+ if (!mask)
+ rep_remove_package_locations (db, t, rf->name);
// Remove the repository fragment.
//
@@ -256,8 +282,8 @@ namespace bpkg
//
if (db.query_value<repository_fragment_count> () == 0)
{
- assert (db.query_value<repository_count> () == 0);
- assert (db.query_value<available_package_count> () == 0);
+ assert (db.query_value<repository_count> () == 0);
+ assert (mask || db.query_value<available_package_count> () == 0);
}
// Remove dangling complements and prerequisites.
@@ -265,10 +291,10 @@ namespace bpkg
// Prior to removing a prerequisite/complement we need to make sure it
// still exists, which may not be the case due to the dependency cycle.
//
- auto remove = [&c, &db, &t] (const lazy_weak_ptr<repository>& rp)
+ auto remove = [&db, &t, mask] (const lazy_weak_ptr<repository>& rp)
{
if (shared_ptr<repository> r = db.find<repository> (rp.object_id ()))
- rep_remove (c, t, r);
+ rep_remove (db, t, r, mask);
};
for (const lazy_weak_ptr<repository>& cr: rf->complements)
@@ -285,10 +311,15 @@ namespace bpkg
}
void
- rep_remove_clean (const common_options& o,
- const dir_path& c,
- database& db,
- bool quiet)
+ rep_remove_fragment (database& db,
+ transaction& t,
+ const shared_ptr<repository_fragment>& rf)
+ {
+ return rep_remove_fragment (db, t, rf, false /* mask */);
+ }
+
+ void
+ rep_remove_clean (const common_options& o, database& db, bool quiet)
{
tracer trace ("rep_remove_clean");
tracer_guard tg (db, trace);
@@ -336,14 +367,14 @@ namespace bpkg
// Remove repository state subdirectories.
//
- dir_path rd (c / repos_dir);
+ dir_path rd (db.config_orig / repos_dir);
try
{
- for (const dir_entry& de: dir_iterator (rd, false /* ignore_dangling */))
+ for (const dir_entry& de: dir_iterator (rd, dir_iterator::no_follow))
{
if (de.ltype () == entry_type::directory)
- rmdir (rd / path_cast<dir_path> (de.path ()));
+ rmdir (db.config_orig, rd / path_cast<dir_path> (de.path ()));
}
}
catch (const system_error& e)
@@ -384,13 +415,13 @@ namespace bpkg
dr << info << "run 'bpkg help rep-remove' for more information";
}
- database db (open (c, trace));
+ database db (c, trace, false /* pre_attach */);
// Clean the configuration if requested.
//
if (o.clean ())
{
- rep_remove_clean (o, c, db, false /* quiet */);
+ rep_remove_clean (o, db, false /* quiet */);
return 0;
}
@@ -484,7 +515,7 @@ namespace bpkg
//
for (const lazy_shared_ptr<repository>& r: repos)
{
- rep_remove (c, t, r.load ());
+ rep_remove (db, t, r.load ());
if (verb && !o.no_result ())
text << "removed " << r.object_id ();
diff --git a/bpkg/rep-remove.hxx b/bpkg/rep-remove.hxx
index f85aec5..0fc82e8 100644
--- a/bpkg/rep-remove.hxx
+++ b/bpkg/rep-remove.hxx
@@ -5,7 +5,7 @@
#define BPKG_REP_REMOVE_HXX
#include <bpkg/types.hxx>
-#include <bpkg/forward.hxx> // database, transaction, repository
+#include <bpkg/forward.hxx> // transaction, repository
#include <bpkg/utility.hxx>
#include <bpkg/rep-remove-options.hxx>
@@ -20,15 +20,13 @@ namespace bpkg
// repository fragments.
//
void
- rep_remove (const dir_path& conf,
- transaction&,
- const shared_ptr<repository>&);
+ rep_remove (database&, transaction&, const shared_ptr<repository>&);
// Remove a repository fragment if it is not referenced by any repository,
// also removing its unreachable complements and prerequisites.
//
void
- rep_remove_fragment (const dir_path& conf,
+ rep_remove_fragment (database&,
transaction&,
const shared_ptr<repository_fragment>&);
@@ -50,16 +48,15 @@ namespace bpkg
// - Remove all available packages.
//
void
- rep_remove_clean (const common_options&,
- const dir_path& conf,
- database&,
- bool quiet = true);
+ rep_remove_clean (const common_options&, database&, bool quiet = true);
// Remove a repository fragment from locations of the available packages it
// contains. Remove packages that come from only this repository fragment.
//
void
- rep_remove_package_locations (transaction&, const string& fragment_name);
+ rep_remove_package_locations (database&,
+ transaction&,
+ const string& fragment_name);
}
#endif // BPKG_REP_REMOVE_HXX
diff --git a/bpkg/repository-signing.cli b/bpkg/repository-signing.cli
index 1796497..656599d 100644
--- a/bpkg/repository-signing.cli
+++ b/bpkg/repository-signing.cli
@@ -133,7 +133,8 @@ Add the \cb{certificate:} field for the base repository (\cb{role: base})
in the \cb{repositories} manifest file(s):
\
-certificate: \
+certificate:
+\\
<cert>
\\
\
@@ -143,7 +144,8 @@ Replace \i{cert} with the entire contents of \cb{cert.pem} (including the
an entry like this:
\
-certificate: \
+certificate:
+\\
-----BEGIN CERTIFICATE-----
MIIDQjCCAiqgAwIBAgIJAIUgsIqSnesGMA0GCSqGSIb3DQEBCwUAMDkxFzAVBgNV
.
@@ -193,11 +195,13 @@ just \cb{--key} as at step 4 (\c{\"SIGN key\"} is the label for the slot
\c{9c} private key):
\
-bpkg rep-create \
- --openssl-option rsautl:-engine --openssl-option rsautl:pkcs11 \
- --openssl-option rsautl:-keyform --openssl-option rsautl:engine \
+bpkg rep-create \
+ --openssl-option pkeyutl:-engine --openssl-option pkeyutl:pkcs11 \
+ --openssl-option pkeyutl:-keyform --openssl-option pkeyutl:engine \
--key \"pkcs11:object=SIGN%20key\" /path/to/repository
\
+Note that for \cb{openssl} versions prior to \cb{3.0.0} \cb{bpkg} uses the
+\cb{rsautl} command instead of \cb{pkeyutl} for the data signing operation.
||
"
diff --git a/bpkg/repository-types.cli b/bpkg/repository-types.cli
index 21ddaf9..1692a13 100644
--- a/bpkg/repository-types.cli
+++ b/bpkg/repository-types.cli
@@ -189,7 +189,9 @@ that they support fetching minimal history for tags and branches and may or
may not support this for commit ids depending on the server configuration.
Note, however, that unlike for \cb{http(s)://}, for these protocols \cb{bpkg}
does not try to sense if fetching unadvertised commits is allowed and always
-assumes that it is not.
+assumes that it is not. Also note that the sensed or assumed protocol
+capabilities can be overridden for a \cb{git} repository URL prefix using the
+\cb{--git-capabilities} option (\l{bpkg-common-options(1)}).
Based on this information, to achieve optimal results the recommended protocol
for remote repositories is smart \cb{https://}. Additionally, if you are
diff --git a/bpkg/satisfaction.cxx b/bpkg/satisfaction.cxx
index 52def32..4229004 100644
--- a/bpkg/satisfaction.cxx
+++ b/bpkg/satisfaction.cxx
@@ -34,13 +34,19 @@ namespace bpkg
//
if (c.min_version)
{
- int i (ev.compare (*c.min_version, !c.min_version->revision));
+ int i (ev.compare (*c.min_version,
+ !c.min_version->revision,
+ true /* ignore_iteration */));
+
s = c.min_open ? i > 0 : i >= 0;
}
if (s && c.max_version)
{
- int i (ev.compare (*c.max_version, !c.max_version->revision));
+ int i (ev.compare (*c.max_version,
+ !c.max_version->revision,
+ true /* ignore_iteration */));
+
s = c.max_open ? i < 0 : i <= 0;
}
@@ -85,7 +91,10 @@ namespace bpkg
version lv (norm (*l.min_version, true /* min */, l.min_open));
version rv (norm (*r.min_version, true /* min */, r.min_open));
- int i (lv.compare (rv, false /* ignore_revision */));
+ int i (lv.compare (rv,
+ false /* ignore_revision */,
+ true /* ignore_iteration */));
+
if (l.min_open)
// Doesn't matter if r is min_open or not.
//
@@ -108,7 +117,10 @@ namespace bpkg
version lv (norm (*l.max_version, false /* min */, l.max_open));
version rv (norm (*r.max_version, false /* min */, r.max_open));
- int i (lv.compare (rv, false /* ignore_revision */));
+ int i (lv.compare (rv,
+ false /* ignore_revision */,
+ true /* ignore_iteration */));
+
if (l.max_open)
// Doesn't matter if r is max_open or not.
//
@@ -128,12 +140,10 @@ namespace bpkg
return s;
}
- static version build2_version;
+ version build2_version;
- void
- satisfy_build2 (const common_options& o,
- const package_name& pkg,
- const dependency& d)
+ bool
+ satisfy_build2 (const common_options& o, const dependency& d)
{
assert (d.name == "build2");
@@ -180,18 +190,13 @@ namespace bpkg
fail << "unable to determine build2 version of " << name_b (o);
}
- if (!satisfies (build2_version, d.constraint))
- fail << "unable to satisfy constraint (" << d << ") for package "
- << pkg <<
- info << "available build2 version is " << build2_version;
+ return satisfies (build2_version, d.constraint);
}
- static version bpkg_version;
+ version bpkg_version;
- void
- satisfy_bpkg (const common_options&,
- const package_name& pkg,
- const dependency& d)
+ bool
+ satisfy_bpkg (const common_options&, const dependency& d)
{
assert (d.name == "bpkg");
@@ -200,9 +205,6 @@ namespace bpkg
if (bpkg_version.empty ())
bpkg_version = version (BPKG_VERSION_STR);
- if (!satisfies (bpkg_version, d.constraint))
- fail << "unable to satisfy constraint (" << d << ") for package "
- << pkg <<
- info << "available bpkg version is " << bpkg_version;
+ return satisfies (bpkg_version, d.constraint);
}
}
diff --git a/bpkg/satisfaction.hxx b/bpkg/satisfaction.hxx
index 7046a92..174e375 100644
--- a/bpkg/satisfaction.hxx
+++ b/bpkg/satisfaction.hxx
@@ -12,8 +12,13 @@
namespace bpkg
{
- // Note: all of the following functions expect the package version
- // constraints to be complete.
+ // Notes:
+ //
+ // - All of the following functions expect the package version constraints
+ // to be complete.
+ //
+ // - The version iterations are ignored on version comparisons.
+ //
// Return true if version satisfies the constraint.
//
@@ -42,13 +47,15 @@ namespace bpkg
// Special build-time dependencies.
//
- void
- satisfy_build2 (const common_options&,
- const package_name&,
- const dependency&);
+ extern version build2_version; // Set on the first satisfy_build2() call.
- void
- satisfy_bpkg (const common_options&, const package_name&, const dependency&);
+ bool
+ satisfy_build2 (const common_options&, const dependency&);
+
+ extern version bpkg_version; // Set on the first satisfy_bpkg() call.
+
+ bool
+ satisfy_bpkg (const common_options&, const dependency&);
}
#endif // BPKG_SATISFACTION_HXX
diff --git a/bpkg/satisfaction.test.cxx b/bpkg/satisfaction.test.cxx
index 9caa716..8082678 100644
--- a/bpkg/satisfaction.test.cxx
+++ b/bpkg/satisfaction.test.cxx
@@ -3,6 +3,9 @@
#include <bpkg/satisfaction.hxx>
+#undef NDEBUG
+#include <cassert>
+
namespace bpkg
{
static int
diff --git a/bpkg/system-package-manager-archive.cxx b/bpkg/system-package-manager-archive.cxx
new file mode 100644
index 0000000..d46e6d6
--- /dev/null
+++ b/bpkg/system-package-manager-archive.cxx
@@ -0,0 +1,794 @@
+// file : bpkg/system-package-manager-archive.cxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#include <bpkg/system-package-manager-archive.hxx>
+
+#include <map>
+
+#include <bpkg/diagnostics.hxx>
+
+#include <bpkg/pkg-bindist-options.hxx>
+
+using namespace butl;
+
+namespace bpkg
+{
+ system_package_manager_archive::
+ system_package_manager_archive (bpkg::os_release&& osr,
+ const target_triplet& h,
+ string a,
+ optional<bool> progress,
+ const pkg_bindist_options* options)
+ : system_package_manager (move (osr), h, "", progress), ops (options)
+ {
+ if (!a.empty ())
+ {
+ assert (ops != nullptr);
+
+ try
+ {
+ target = target_triplet (a);
+ }
+ catch (const invalid_argument& e)
+ {
+ fail << "invalid --architecture target triplet value '" << a << "': "
+ << e;
+ }
+
+ if (!ops->os_release_id_specified ())
+ fail << "--architecture requires explict --os-release-id";
+
+ if (!ops->archive_install_root_specified () &&
+ !ops->archive_install_config ())
+ fail << "--architecture requires explict --archive-install-root";
+ }
+ else
+ target = host;
+
+ arch = target.string (); // Set since queried (e.g., JSON value).
+ }
+
+ // env --chdir=<root> tar|zip ... <base>.<ext> <base>
+ //
+ // Return the archive file path.
+ //
+ static path
+ archive (const dir_path& root,
+ const string& base,
+ const string& e /* ext */)
+ {
+ // NOTE: similar code in build2 (libbuild2/dist/operation.cxx).
+
+ path an (base + '.' + e);
+ path ap (root / an);
+
+ // Use zip for .zip archives. Also recognize and handle a few well-known
+ // tar.xx cases (in case tar doesn't support -a or has other issues like
+ // MSYS). Everything else goes to tar in the auto-compress mode (-a).
+ //
+ // Note also that we pass the archive path as name (an) instead of path
+ // (ap) since we are running from the root directory (see below).
+ //
+ cstrings args;
+
+ // Separate compressor (gzip, xz, etc) state.
+ //
+ size_t i (0); // Command line start or 0 if not used.
+ auto_rmfile out_rm; // Output file cleanup (must come first).
+ auto_fd out_fd; // Output file.
+
+ if (e == "zip")
+ {
+ // On Windows we use libarchive's bsdtar (zip is an MSYS executable).
+ //
+ // While not explicitly stated, the compression-level option works
+ // for zip archives.
+ //
+#ifdef _WIN32
+ args = {"bsdtar",
+ "-a", // -a with the .zip extension seems to be the only way.
+ "--options=compression-level=9",
+ "-cf", an.string ().c_str (),
+ base.c_str (),
+ nullptr};
+#else
+ args = {"zip",
+ "-9",
+ "-rq", an.string ().c_str (),
+ base.c_str (),
+ nullptr};
+#endif
+ }
+ else
+ {
+ // On Windows we use libarchive's bsdtar with auto-compression (tar
+ // itself and quite a few compressors are MSYS executables).
+ //
+ // OpenBSD tar does not support --format but it appear ustar is the
+ // default (while this is not said explicitly in tar(1), it is said in
+ // pax(1) and confirmed on the mailing list). Nor does it support -a, at
+ // least as of 7.1 but we will let this play out naturally, in case this
+ // support gets added.
+ //
+ // Note also that in the future we may switch to libarchive in order to
+ // generate reproducible archives.
+ //
+ const char* l (nullptr); // Compression level (option).
+
+#ifdef _WIN32
+ args = {"bsdtar", "--format", "ustar"};
+
+ if (e == "tar.gz" || e == "tar.xz")
+ l = "--options=compression-level=9";
+#else
+ args = {"tar"
+#ifndef __OpenBSD__
+ , "--format", "ustar"
+#endif
+ };
+
+ // For gzip it's a good idea to use -9 by default. While for xz, -9 is
+ // not recommended as the default due to memory requirements, in our
+ // case (large binary archives on development machines), this is
+ // unlikely to be an issue.
+ //
+ // Note also that the compression level can be altered via the GZIP
+ // (GZIP_OPT also seems to work) and XZ_OPT environment variables,
+ // respectively.
+ //
+ const char* c (nullptr);
+
+ if (e == "tar.gz") { c = "gzip"; l = "-9"; }
+ else if (e == "tar.xz")
+ {
+ // At least as of Mac OS 13 and Xcode 15, there is no standalone xz
+ // utility but tar seem to be capable of producing .tar.xz.
+ //
+#ifdef __APPLE__
+ l = "--options=compression-level=9";
+#else
+ c = "xz"; l = "-9";
+#endif
+ }
+
+ if (c != nullptr)
+ {
+ args.push_back ("-cf");
+ args.push_back ("-");
+ args.push_back (base.c_str ());
+ args.push_back (nullptr); i = args.size ();
+ args.push_back (c);
+ if (l != nullptr)
+ args.push_back (l);
+ args.push_back (nullptr);
+ args.push_back (nullptr); // Pipe end.
+
+ try
+ {
+ out_fd = fdopen (ap,
+ fdopen_mode::out | fdopen_mode::binary |
+ fdopen_mode::truncate | fdopen_mode::create);
+ out_rm = auto_rmfile (ap);
+ }
+ catch (const io_error& e)
+ {
+ fail << "unable to open " << ap << ": " << e;
+ }
+ }
+ else
+#endif
+ {
+ if (e != "tar")
+ {
+ args.push_back ("-a");
+ if (l != nullptr)
+ args.push_back (l);
+ }
+
+ args.push_back ("-cf");
+ args.push_back (an.string ().c_str ());
+ args.push_back (base.c_str ());
+ args.push_back (nullptr);
+ }
+ }
+
+ size_t what (0); // Failed program name index in args.
+ try
+ {
+ process_path app; // Archiver path.
+ process_path cpp; // Compressor path.
+
+ app = process::path_search (args[what = 0]);
+
+ if (i != 0)
+ cpp = process::path_search (args[what = i]);
+
+ // Change the archiver's working directory to root.
+ //
+ process_env ape (app, root);
+
+ // Note: print the command line unless quiet similar to other package
+ // manager implementations.
+ //
+ if (verb >= 1)
+ print_process (ape, args);
+
+ what = 0;
+ process apr (app,
+ args.data (), // No auto-pipe.
+ 0 /* stdin */,
+ (i != 0 ? -1 : 1) /* stdout */,
+ 2 /* stderr */,
+ ape.cwd->string ().c_str (),
+ ape.vars);
+
+ // Start the compressor if required.
+ //
+ process cpr;
+ if (i != 0)
+ {
+ what = i;
+ cpr = process (cpp,
+ args.data () + i,
+ apr.in_ofd.get () /* stdin */,
+ out_fd.get () /* stdout */,
+ 2 /* stderr */);
+
+ cpr.in_ofd.reset (); // Close the archiver's stdout on our side.
+ }
+
+ // Delay throwing until we diagnose both ends of the pipe.
+ //
+ bool fail (false);
+
+ what = 0;
+ if (!apr.wait ())
+ {
+ diag_record dr (error);
+ dr << args[0] << " exited with non-zero code";
+
+ if (verb == 0)
+ {
+ info << "command line: ";
+ print_process (dr, ape, args.data ());
+ }
+
+ fail = true;
+ }
+
+ if (i != 0)
+ {
+ what = i;
+ if (!cpr.wait ())
+ {
+ diag_record dr (error);
+ dr << args[i] << " exited with non-zero code";
+
+ if (verb == 0)
+ {
+ info << "command line: ";
+ print_process (dr, args.data () + i);
+ }
+
+ fail = true;
+ }
+ }
+
+ if (fail)
+ throw failed ();
+ }
+ catch (const process_error& e)
+ {
+ error << "unable to execute " << args[what] << ": " << e;
+
+ if (e.child)
+ exit (1);
+
+ throw failed ();
+ }
+
+ out_rm.cancel ();
+ return ap;
+ }
+
+ // NOTE: THE BELOW DESCRIPTION IS ALSO REWORDED IN BPKG-PKG-BINDIST(1).
+ //
+ // The overall plan is to invoke the build system and install all the
+ // packages directly from their bpkg locations into the binary package
+ // directory as a chroot. Then tar/zip this directory to produce one or more
+ // binary package archives.
+ //
+ auto system_package_manager_archive::
+ generate (const packages& pkgs,
+ const packages& deps,
+ const strings& vars,
+ const dir_path& /*cfg_dir*/,
+ const package_manifest& pm,
+ const string& pt,
+ const small_vector<language, 1>& langs,
+ optional<bool> recursive_full,
+ bool first) -> binary_files
+ {
+ tracer trace ("system_package_manager_archive::generate");
+
+ assert (!langs.empty ()); // Should be effective.
+
+ // We require explicit output root.
+ //
+ if (!ops->output_root_specified ())
+ fail << "output root directory must be specified explicitly with "
+ << "--output-root|-o";
+
+ const dir_path& out (ops->output_root ()); // Cannot be empty.
+
+ const shared_ptr<selected_package>& sp (pkgs.front ().selected);
+ const package_name& pn (sp->name);
+ const version& pv (sp->version);
+
+ // Use version without iteration in paths, etc.
+ //
+ string pvs (pv.string (false /* ignore_revision */,
+ true /* ignore_iteration */));
+
+ bool lib (pt == "lib");
+ bool priv (ops->private_ ()); // Private installation.
+
+ // Return true if this package uses the specified language, only as
+ // interface language if intf_only is true.
+ //
+ auto lang = [&langs] (const char* n, bool intf_only = false) -> bool
+ {
+ return find_if (langs.begin (), langs.end (),
+ [n, intf_only] (const language& l)
+ {
+ return (!intf_only || !l.impl) && l.name == n;
+ }) != langs.end ();
+ };
+
+ bool lang_c (lang ("c"));
+ bool lang_cxx (lang ("c++"));
+ bool lang_cc (lang ("cc"));
+
+ if (verb >= 3)
+ {
+ auto print_status = [] (diag_record& dr, const selected_package& p)
+ {
+ dr << (p.substate == package_substate::system ? "sys:" : "")
+ << p.name << ' ' << p.version;
+ };
+
+ {
+ diag_record dr (trace);
+ dr << "package: " ;
+ print_status (dr, *sp);
+ }
+
+ for (const package& p: deps)
+ {
+ diag_record dr (trace);
+ dr << "dependency: ";
+ print_status (dr, *p.selected);
+ }
+ }
+
+ // Should we override config.install.* or just use whatever configured
+ // (sans the root)? While using whatever configure seemed like a good idea
+ // at first, it's also a good idea to have the ability to tweak the
+ // installation directory structure on the per-platform basis (like, say,
+ // lib/libexec split or pkgconfig/ location on FreeBSD; in a sense, the
+ // user may choose to install to /usr and it would be good if things ended
+ // up in the expected places -- this is still a @@ TODO).
+ //
+ // So unless instructed otherwise with --archive-install-config, we
+ // override every config.install.* variable in order not to pick anything
+ // configured. Note that we add some more in the command line below.
+ //
+ // We make use of the <project> substitution since in the recursive mode
+ // we may be installing multiple projects. Note that the <private>
+ // directory component is automatically removed if this functionality is
+ // not enabled.
+ //
+ bool ovr_install (!ops->archive_install_config ());
+
+ strings config;
+ {
+ const string& c (target.class_);
+
+ dir_path root;
+ if (ops->archive_install_root_specified ())
+ {
+ // If specified, we override it even with --archive-install-config.
+ //
+ root = ops->archive_install_root (); // Cannot be empty.
+ }
+ else if (ovr_install)
+ {
+ if (c == "windows")
+ {
+ // Using C:\<project>\ looks like the best we can do (if the
+ // installation is not relocatable, at least related packages will
+ // be grouped together).
+ //
+ root = dir_path ("C:\\" + pm.effective_project ().string ());
+ }
+ else
+ root = dir_path ("/usr/local");
+ }
+
+ auto add = [&config] (auto&& v)
+ {
+ config.push_back (string ("config.install.") + v);
+ };
+
+ if (!root.empty ())
+ add ("root='" + root.representation () + '\'');
+
+ if (ovr_install)
+ {
+ add ("data_root=root/");
+ add ("exec_root=root/");
+
+ add ("bin=exec_root/bin/");
+ add ("sbin=exec_root/sbin/");
+
+ add ("lib=exec_root/lib/<private>/");
+ add ("libexec=exec_root/libexec/<private>/<project>/");
+ add ("pkgconfig=lib/pkgconfig/");
+
+ add ("etc=data_root/etc/");
+ add ("include=data_root/include/<private>/");
+ add ("include_arch=include/");
+ add ("share=data_root/share/");
+ add ("data=share/<private>/<project>/");
+ add ("buildfile=share/build2/export/<project>/");
+
+ add ("doc=share/doc/<private>/<project>/");
+ add ("legal=doc/");
+ add ("man=share/man/");
+ add ("man1=man/man1/");
+ add ("man2=man/man2/");
+ add ("man3=man/man3/");
+ add ("man4=man/man4/");
+ add ("man5=man/man5/");
+ add ("man6=man/man6/");
+ add ("man7=man/man7/");
+ add ("man8=man/man8/");
+
+ add ("private=" + (priv ? pn.string () : "[null]"));
+
+ // If this is a C-based language, add rpath for private installation,
+ // unless targeting Windows.
+ //
+ if (priv && (lang_c || lang_cxx || lang_cc) && c != "windows")
+ {
+ dir_path l ((dir_path (root) /= "lib") /= pn.string ());
+ config.push_back ("config.bin.rpath='" + l.representation () + '\'');
+ }
+ }
+ }
+
+ // Add user-specified configuration variables last to allow them to
+ // override anything.
+ //
+ for (const string& v: vars)
+ config.push_back (v);
+
+ // Note that we can use weak install scope for the auto recursive mode
+ // since we know dependencies cannot be spread over multiple linked
+ // configurations.
+ //
+ string scope (!recursive_full || *recursive_full ? "project" : "weak");
+
+ // The plan is to create the archive directory (with the same name as the
+ // archive base; we call it "destination directory") inside the output
+ // directory and then tar/zip it up placing the resulting archives next to
+ // it.
+ //
+ // Let's require clean output directory to keep things simple.
+ //
+ // Also, by default, we are going to keep all the intermediate files on
+ // failure for troubleshooting.
+ //
+ if (first && exists (out) && !empty (out))
+ {
+ if (!ops->wipe_output ())
+ fail << "output root directory " << out << " is not empty" <<
+ info << "use --wipe-output to clean it up but be careful";
+
+ rm_r (out, false);
+ }
+
+ // NOTE: THE BELOW DESCRIPTION IS ALSO REWORDED IN BPKG-PKG-BINDIST(1).
+ //
+ // Our archive directory/file base have the following form:
+ //
+ // <package>-<version>-<build_metadata>
+ //
+ // Where <build_metadata> in turn has the following form (unless overriden
+ // with --archive-build-mata):
+ //
+ // <cpu>-<os>[-<langrt>...]
+ //
+ // For example:
+ //
+ // hello-1.2.3-x86_64-windows10
+ // libhello-1.2.3-x86_64-windows10-msvc17.4
+ // libhello-1.2.3-x86_64-debian11-gcc12-rust1.62
+ //
+ bool md_s (ops->archive_build_meta_specified ());
+ const string& md (ops->archive_build_meta ());
+
+ bool md_f (false);
+ bool md_b (false);
+ if (md_s && !md.empty ())
+ {
+ md_f = md.front () == '+';
+ md_b = md.back () == '+';
+
+ if (md_f && md_b) // Note: covers just `+`.
+ fail << "invalid build metadata '" << md << "'";
+ }
+
+ vector<reference_wrapper<const pair<const string, string>>> langrt;
+ if (!md_s || md_f || md_b)
+ {
+ // First collect the interface languages and then add implementation.
+ // This way if different languages map to the same runtimes (e.g., C and
+ // C++ mapped to gcc12), then we will always prefer the interface
+ // version over the implementation (which could be different, for
+ // example, libstdc++6 vs libstdc++-12-dev; but it's not clear how this
+ // will be specified, won't they end up with different names as opposed
+ // to gcc6 and gcc12 -- still fuzzy/unclear).
+ //
+ // @@ We will need to split id and version to be able to pick the
+ // highest version.
+ //
+ // @@ Maybe we should just do "soft" version like in <distribution>?
+ //
+ // Note that we allow multiple values for the same language to support
+ // cases like --archive-lang cc=gcc12 --archive-lang cc=g++12. But
+ // we treat an empty value as a request to clear all the previous
+ // entries.
+ //
+
+ auto find = [] (const std::multimap<string, string>& m, const string& n)
+ {
+ auto p (m.equal_range (n));
+
+ if (p.first == p.second)
+ {
+ // If no mapping for c/c++, fallback to cc.
+ //
+ if (n == "c" || n == "c++")
+ p = m.equal_range ("cc");
+ }
+
+ return p;
+ };
+
+ // We don't want to clear entries specified with --*-lang with an empty
+ // value specified with --*-lang-impl.
+ //
+ size_t clear_limit (0);
+
+ auto add = [&langrt, &clear_limit] (const pair<const string, string>& p)
+ {
+ // Suppress duplicates.
+ //
+ if (!p.second.empty ())
+ {
+ if (find_if (langrt.begin (), langrt.end (),
+ [&p] (const pair<const string, string>& x)
+ {
+ // @@ TODO: keep highest version.
+ return p.second == x.second;
+ }) == langrt.end ())
+ {
+ langrt.push_back (p);
+ }
+ }
+ else if (clear_limit != langrt.size ())
+ {
+ for (auto i (langrt.begin () + clear_limit); i != langrt.end (); )
+ {
+ if (i->get ().first == p.first)
+ i = langrt.erase (i);
+ else
+ ++i;
+ }
+ }
+ };
+
+ auto& implm (ops->archive_lang_impl ());
+
+ // The interface/implementation distinction is only relevant to
+ // libraries. For everything else we treat all the languages as
+ // implementation.
+ //
+ if (lib)
+ {
+ auto& intfm (ops->archive_lang ());
+
+ for (const language& l: langs)
+ {
+ if (l.impl)
+ continue;
+
+ auto p (find (intfm, l.name));
+
+ if (p.first == p.second)
+ p = find (implm, l.name);
+
+ if (p.first == p.second)
+ fail << "no runtime mapping for language " << l.name <<
+ info << "consider specifying with --archive-lang[-impl]" <<
+ info << "or alternatively specify --archive-build-meta";
+
+ for (auto i (p.first); i != p.second; ++i)
+ add (*i);
+ }
+
+ clear_limit = langrt.size ();
+ }
+
+ for (const language& l: langs)
+ {
+ if (lib && !l.impl)
+ continue;
+
+ auto p (find (implm, l.name));
+
+ if (p.first == p.second)
+ continue; // Unimportant.
+
+ for (auto i (p.first); i != p.second; ++i)
+ add (*i);
+ }
+ }
+
+ // If there is no split, reduce to empty key and empty filter.
+ //
+ binary_files r;
+ for (const pair<const string, string>& kf:
+ ops->archive_split_specified ()
+ ? ops->archive_split ()
+ : std::map<string, string> {{string (), string ()}})
+ {
+ string sys_name (pn.string ());
+
+ if (!kf.first.empty ())
+ sys_name += '-' + kf.first;
+
+ string base (sys_name);
+
+ base += '-' + pvs;
+
+ if (md_s && !(md_f || md_b))
+ {
+ if (!md.empty ())
+ base += '-' + md;
+ }
+ else
+ {
+ if (md_b)
+ {
+ base += '-';
+ base.append (md, 0, md.size () - 1);
+ }
+
+ if (!ops->archive_no_cpu ())
+ base += '-' + target.cpu;
+
+ if (!ops->archive_no_os ())
+ base += '-' + os_release.name_id + os_release.version_id;
+
+ for (const pair<const string, string>& p: langrt)
+ base += '-' + p.second;
+
+ if (md_f)
+ {
+ base += '-';
+ base.append (md, 1, md.size () - 1);
+ }
+ }
+
+ dir_path dst (out / dir_path (base));
+ mk_p (dst);
+
+ // Install.
+ //
+ // In a sense, this is a special version of pkg-install.
+ //
+ {
+ strings dirs;
+ for (const package& p: pkgs)
+ dirs.push_back (p.out_root.representation ());
+
+ string filter;
+ if (!kf.second.empty ())
+ filter = "config.install.filter=" + kf.second;
+
+ run_b (*ops,
+ verb_b::normal,
+ (ops->jobs_specified ()
+ ? strings ({"--jobs", to_string (ops->jobs ())})
+ : strings ()),
+ "config.install.chroot='" + dst.representation () + '\'',
+ (ovr_install ? "config.install.sudo=[null]" : nullptr),
+ (!filter.empty () ? filter.c_str () : nullptr),
+ config,
+ "!config.install.scope=" + scope,
+ "install:",
+ dirs);
+
+ // @@ TODO: call install.json? Or manifest-install.json. Place in
+ // data/ (would need support in build2 to use install.* values)?
+ //
+#if 0
+ args.push_back ("!config.install.manifest=-");
+#endif
+ }
+
+ if (ops->archive_prepare_only ())
+ {
+ if (verb >= 1)
+ text << "prepared " << dst;
+
+ continue;
+ }
+
+ // Create the archive.
+ //
+ // Should the default archive type be based on host or target? I guess
+ // that depends on where the result will be unpacked, and it feels like
+ // target is more likely.
+ //
+ // @@ What about the ownerhip of the resulting file in the archive?
+ // We don't do anything for source archives, not sure why we should
+ // do something here.
+ //
+ for (string t: (ops->archive_type_specified ()
+ ? ops->archive_type ()
+ : strings {target.class_ == "windows" ? "zip" : "tar.xz"}))
+ {
+ // Help the user out if the extension is specified with the leading
+ // dot.
+ //
+ if (t.size () > 1 && t.front () == '.')
+ t.erase (0, 1);
+
+ path f (archive (out, base, t));
+
+ // Using archive type as file type seems appropriate. Add key before
+ // the archive type, if any.
+ //
+ if (!kf.first.empty ())
+ t = kf.first + '.' + t;
+
+ r.push_back (binary_file {move (t), move (f), sys_name});
+ }
+
+ // Cleanup intermediate files unless requested not to.
+ //
+ if (!ops->keep_output ())
+ {
+ rm_r (dst);
+ }
+ }
+
+ return r;
+ }
+
+ optional<const system_package_status*> system_package_manager_archive::
+ status (const package_name&, const available_packages*)
+ {
+ assert (false);
+ return nullopt;
+ }
+
+ void system_package_manager_archive::
+ install (const vector<package_name>&)
+ {
+ assert (false);
+ }
+}
diff --git a/bpkg/system-package-manager-archive.hxx b/bpkg/system-package-manager-archive.hxx
new file mode 100644
index 0000000..01c4a2a
--- /dev/null
+++ b/bpkg/system-package-manager-archive.hxx
@@ -0,0 +1,55 @@
+// file : bpkg/system-package-manager-archive.hxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#ifndef BPKG_SYSTEM_PACKAGE_MANAGER_ARCHIVE_HXX
+#define BPKG_SYSTEM_PACKAGE_MANAGER_ARCHIVE_HXX
+
+#include <bpkg/types.hxx>
+#include <bpkg/utility.hxx>
+
+#include <bpkg/system-package-manager.hxx>
+
+namespace bpkg
+{
+ // The system package manager implementation for the installation archive
+ // packages, production only.
+ //
+ class system_package_manager_archive: public system_package_manager
+ {
+ public:
+ virtual binary_files
+ generate (const packages&,
+ const packages&,
+ const strings&,
+ const dir_path&,
+ const package_manifest&,
+ const string&,
+ const small_vector<language, 1>&,
+ optional<bool>,
+ bool) override;
+
+ virtual optional<const system_package_status*>
+ status (const package_name&, const available_packages*) override;
+
+ virtual void
+ install (const vector<package_name>&) override;
+
+ public:
+ // Note: options can only be NULL when testing functions that don't need
+ // them.
+ //
+ system_package_manager_archive (bpkg::os_release&&,
+ const target_triplet& host,
+ string arch,
+ optional<bool> progress,
+ const pkg_bindist_options*);
+
+ protected:
+ // Only for production.
+ //
+ const pkg_bindist_options* ops = nullptr;
+ target_triplet target;
+ };
+}
+
+#endif // BPKG_SYSTEM_PACKAGE_MANAGER_ARCHIVE_HXX
diff --git a/bpkg/system-package-manager-debian.cxx b/bpkg/system-package-manager-debian.cxx
new file mode 100644
index 0000000..8b27e37
--- /dev/null
+++ b/bpkg/system-package-manager-debian.cxx
@@ -0,0 +1,3616 @@
+// file : bpkg/system-package-manager-debian.cxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#include <bpkg/system-package-manager-debian.hxx>
+
+#include <locale>
+
+#include <libbutl/timestamp.hxx>
+#include <libbutl/filesystem.hxx> // permissions
+
+#include <bpkg/diagnostics.hxx>
+
+#include <bpkg/pkg-bindist-options.hxx>
+
+using namespace butl;
+
+namespace bpkg
+{
+ using package_status = system_package_status_debian;
+
+ // Translate host CPU to Debian package architecture.
+ //
+ string system_package_manager_debian::
+ arch_from_target (const target_triplet& h)
+ {
+ const string& c (h.cpu);
+ return
+ c == "x86_64" ? "amd64" :
+ c == "aarch64" ? "arm64" :
+ c == "i386" || c == "i486" || c == "i586" || c == "i686" ? "i386" :
+ c;
+ }
+
+ // Parse the debian-name (or alike) value. The first argument is the package
+ // type.
+ //
+ // Note that for now we treat all the packages from the non-main groups as
+ // extras omitting the -common package (assuming it's pulled by the main
+ // package) as well as -doc and -dbg unless requested with the
+ // extra_{doc,dbg} arguments.
+ //
+ package_status system_package_manager_debian::
+ parse_name_value (const string& pt,
+ const string& nv,
+ bool extra_doc,
+ bool extra_dbg)
+ {
+ auto split = [] (const string& s, char d) -> strings
+ {
+ strings r;
+ for (size_t b (0), e (0); next_word (s, b, e, d); )
+ r.push_back (string (s, b, e - b));
+ return r;
+ };
+
+ auto suffix = [] (const string& n, const string& s) -> bool
+ {
+ size_t nn (n.size ());
+ size_t sn (s.size ());
+ return nn > sn && n.compare (nn - sn, sn, s) == 0;
+ };
+
+ auto parse_group = [&split, &suffix] (const string& g, const string* pt)
+ {
+ strings ns (split (g, ' '));
+
+ if (ns.empty ())
+ fail << "empty package group";
+
+ package_status r;
+
+ // Handle the "dev instead of main" special case for libraries.
+ //
+ // Check that the following name does not end with -dev. This will be
+ // the only way to disambiguate the case where the library name happens
+ // to end with -dev (e.g., libfoo-dev libfoo-dev-dev).
+ //
+ {
+ string& m (ns[0]);
+
+ if (pt != nullptr &&
+ *pt == "lib" &&
+ suffix (m, "-dev") &&
+ !(ns.size () > 1 && suffix (ns[1], "-dev")))
+ {
+ r = package_status ("", move (m));
+ }
+ else
+ r = package_status (move (m));
+ }
+
+ // Handle the rest.
+ //
+ for (size_t i (1); i != ns.size (); ++i)
+ {
+ string& n (ns[i]);
+
+ const char* w;
+ if (string* v = (suffix (n, (w = "-dev")) ? &r.dev :
+ suffix (n, (w = "-doc")) ? &r.doc :
+ suffix (n, (w = "-dbg")) ? &r.dbg :
+ suffix (n, (w = "-common")) ? &r.common : nullptr))
+ {
+ if (!v->empty ())
+ fail << "multiple " << w << " package names in '" << g << "'" <<
+ info << "did you forget to separate package groups with comma?";
+
+ *v = move (n);
+ }
+ else
+ r.extras.push_back (move (n));
+ }
+
+ return r;
+ };
+
+ strings gs (split (nv, ','));
+ assert (!gs.empty ()); // *-name value cannot be empty.
+
+ package_status r;
+ for (size_t i (0); i != gs.size (); ++i)
+ {
+ if (i == 0) // Main group.
+ r = parse_group (gs[i], &pt);
+ else
+ {
+ package_status g (parse_group (gs[i], nullptr));
+
+ if (!g.main.empty ()) r.extras.push_back (move (g.main));
+ if (!g.dev.empty ()) r.extras.push_back (move (g.dev));
+ if (!g.doc.empty () && extra_doc) r.extras.push_back (move (g.doc));
+ if (!g.dbg.empty () && extra_dbg) r.extras.push_back (move (g.dbg));
+ if (!g.common.empty () && false) r.extras.push_back (move (g.common));
+ if (!g.extras.empty ()) r.extras.insert (
+ r.extras.end (),
+ make_move_iterator (g.extras.begin ()),
+ make_move_iterator (g.extras.end ()));
+ }
+ }
+
+ return r;
+ }
+
+ // Attempt to determine the main package name from its -dev package based on
+ // the extracted Depends value. Return empty string if unable to.
+ //
+ string system_package_manager_debian::
+ main_from_dev (const string& dev_name,
+ const string& dev_ver,
+ const string& depends)
+ {
+ // The format of the Depends value is a comma-seperated list of dependency
+ // expressions. For example:
+ //
+ // Depends: libssl3 (= 3.0.7-1), libc6 (>= 2.34), libfoo | libbar
+ //
+ // For the main package we look for a dependency in the form:
+ //
+ // <dev-stem>* (= <dev-ver>)
+ //
+ // Usually it is the first one.
+ //
+ string dev_stem (dev_name, 0, dev_name.rfind ("-dev"));
+
+ string r;
+ for (size_t b (0), e (0); next_word (depends, b, e, ','); )
+ {
+ string d (depends, b, e - b);
+ trim (d);
+
+ size_t p (d.find (' '));
+ if (p != string::npos)
+ {
+ if (d.compare (0, dev_stem.size (), dev_stem) == 0) // <dev-stem>*
+ {
+ size_t q (d.find ('(', p + 1));
+ if (q != string::npos && d.back () == ')') // (...)
+ {
+ if (d[q + 1] == '=' && d[q + 2] == ' ') // Equal.
+ {
+ string v (d, q + 3, d.size () - q - 3 - 1);
+ trim (v);
+
+ if (v == dev_ver)
+ {
+ r.assign (d, 0, p);
+ break;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ return r;
+ }
+
+ // Do we use apt or apt-get? From apt(8):
+ //
+ // "The apt(8) commandline is designed as an end-user tool and it may change
+ // behavior between versions. [...]
+ //
+ // All features of apt(8) are available in dedicated APT tools like
+ // apt-get(8) and apt-cache(8) as well. [...] So you should prefer using
+ // these commands (potentially with some additional options enabled) in
+ // your scripts as they keep backward compatibility as much as possible."
+ //
+ // Note also that for some reason both apt-cache and apt-get exit with 100
+ // code on error.
+ //
+ static process_path apt_cache_path;
+ static process_path apt_get_path;
+ static process_path sudo_path;
+
+ // Obtain the installed and candidate versions for the specified list of
+ // Debian packages by executing `apt-cache policy`.
+ //
+ // If the n argument is not 0, then only query the first n packages.
+ //
+ void system_package_manager_debian::
+ apt_cache_policy (vector<package_policy>& pps, size_t n)
+ {
+ if (n == 0)
+ n = pps.size ();
+
+ assert (n != 0 && n <= pps.size ());
+
+ // The --quiet option makes sure we don't get a noice (N) printed to
+ // stderr if the package is unknown. It does not appear to affect error
+ // diagnostics (try temporarily renaming /var/lib/dpkg/status).
+ //
+ cstrings args {"apt-cache", "policy", "--quiet"};
+
+ for (size_t i (0); i != n; ++i)
+ {
+ package_policy& pp (pps[i]);
+
+ const string& n (pp.name);
+ assert (!n.empty ());
+
+ pp.installed_version.clear ();
+ pp.candidate_version.clear ();
+
+ args.push_back (n.c_str ());
+ }
+
+ args.push_back (nullptr);
+
+ // Run with the C locale to make sure there is no localization. Note that
+ // this is not without potential drawbacks, see Debian bug #643787. But
+ // for now it seems to work and feels like the least of two potential
+ // evils.
+ //
+ const char* evars[] = {"LC_ALL=C", nullptr};
+
+ try
+ {
+ if (apt_cache_path.empty () && !simulate_)
+ apt_cache_path = process::path_search (args[0], false /* init */);
+
+ process_env pe (apt_cache_path, evars);
+
+ if (verb >= 3)
+ print_process (pe, args);
+
+ // Redirect stdout to a pipe. For good measure also redirect stdin to
+ // /dev/null to make sure there are no prompts of any kind.
+ //
+ process pr;
+ if (!simulate_)
+ pr = process (apt_cache_path,
+ args,
+ -2 /* stdin */,
+ -1 /* stdout */,
+ 2 /* stderr */,
+ nullptr /* cwd */,
+ evars);
+ else
+ {
+ strings k;
+ for (size_t i (0); i != n; ++i)
+ k.push_back (pps[i].name);
+
+ const path* f (nullptr);
+ if (installed_)
+ {
+ auto i (simulate_->apt_cache_policy_installed_.find (k));
+ if (i != simulate_->apt_cache_policy_installed_.end ())
+ f = &i->second;
+ }
+ if (f == nullptr && fetched_)
+ {
+ auto i (simulate_->apt_cache_policy_fetched_.find (k));
+ if (i != simulate_->apt_cache_policy_fetched_.end ())
+ f = &i->second;
+ }
+ if (f == nullptr)
+ {
+ auto i (simulate_->apt_cache_policy_.find (k));
+ if (i != simulate_->apt_cache_policy_.end ())
+ f = &i->second;
+ }
+
+ diag_record dr (text);
+ print_process (dr, pe, args);
+ dr << " <" << (f == nullptr || f->empty () ? "/dev/null" : f->string ());
+
+ pr = process (process_exit (0));
+ pr.in_ofd = f == nullptr || f->empty ()
+ ? fdopen_null ()
+ : (f->string () == "-"
+ ? fddup (stdin_fd ())
+ : fdopen (*f, fdopen_mode::in));
+ }
+
+ try
+ {
+ ifdstream is (move (pr.in_ofd), fdstream_mode::skip, ifdstream::badbit);
+
+ // The output of `apt-cache policy <pkg1> <pkg2> ...` are blocks of
+ // lines in the following form:
+ //
+ // <pkg1>:
+ // Installed: 1.2.3-1
+ // Candidate: 1.3.0-2
+ // Version table:
+ // <...>
+ // <pkg2>:
+ // Installed: (none)
+ // Candidate: 1.3.0+dfsg-2+b1
+ // Version table:
+ // <...>
+ //
+ // Where <...> are further lines indented with at least one space. If
+ // a package is unknown, then the entire block (including the first
+ // <pkg>: line) is omitted. The blocks appear in the same order as
+ // packages on the command line and multiple entries for the same
+ // package result in multiple corresponding blocks. It looks like
+ // there should be not blank lines but who really knows.
+ //
+ // Note also that if Installed version is not (none), then the
+ // Candidate version will be that version of better.
+ //
+ {
+ auto df = make_diag_frame (
+ [&pe, &args] (diag_record& dr)
+ {
+ dr << info << "while parsing output of ";
+ print_process (dr, pe, args);
+ });
+
+ size_t i (0);
+
+ string l;
+ for (getline (is, l); !eof (is); )
+ {
+ // Parse the first line of the block.
+ //
+ if (l.empty () || l.front () == ' ' || l.back () != ':')
+ fail << "expected package name instead of '" << l << "'";
+
+ l.pop_back ();
+
+ // Skip until this package.
+ //
+ for (; i != n && pps[i].name != l; ++i) ;
+
+ if (i == n)
+ fail << "unexpected package name '" << l << "'";
+
+ package_policy& pp (pps[i]);
+
+ auto parse_version = [&l] (const string& n) -> string
+ {
+ size_t s (n.size ());
+
+ if (l[0] == ' ' &&
+ l[1] == ' ' &&
+ l.compare (2, s, n) == 0 &&
+ l[2 + s] == ':')
+ {
+ string v (l, 2 + s + 1);
+ trim (v);
+
+ if (!v.empty ())
+ return v == "(none)" ? string () : move (v);
+ }
+
+ fail << "invalid " << n << " version line '" << l << "'" << endf;
+ };
+
+ // Get the installed version line.
+ //
+ if (eof (getline (is, l)))
+ fail << "expected Installed version line after package name";
+
+ pp.installed_version = parse_version ("Installed");
+
+ // Get the candidate version line.
+ //
+ if (eof (getline (is, l)))
+ fail << "expected Candidate version line after Installed version";
+
+ pp.candidate_version = parse_version ("Candidate");
+
+ // Candidate should fallback to Installed.
+ //
+ assert (pp.installed_version.empty () ||
+ !pp.candidate_version.empty ());
+
+ // Skip the rest of the indented lines (or blanks, just in case).
+ //
+ while (!eof (getline (is, l)) && (l.empty () || l.front () == ' ')) ;
+ }
+ }
+
+ is.close ();
+ }
+ catch (const io_error& e)
+ {
+ if (pr.wait ())
+ fail << "unable to read " << args[0] << " policy output: " << e;
+
+ // Fall through.
+ }
+
+ if (!pr.wait ())
+ {
+ diag_record dr (fail);
+ dr << args[0] << " policy exited with non-zero code";
+
+ if (verb < 3)
+ {
+ dr << info << "command line: ";
+ print_process (dr, pe, args);
+ }
+ }
+ }
+ catch (const process_error& e)
+ {
+ error << "unable to execute " << args[0] << ": " << e;
+
+ if (e.child)
+ exit (1);
+
+ throw failed ();
+ }
+ }
+
+ // Execute `apt-cache show` and return the Depends value, if any, for the
+ // specified package and version. Fail if either package or version is
+ // unknown.
+ //
+ string system_package_manager_debian::
+ apt_cache_show (const string& name, const string& ver)
+ {
+ assert (!name.empty () && !ver.empty ());
+
+ string spec (name + '=' + ver);
+
+ // In particular, --quiet makes sure we don't get noices (N) printed to
+ // stderr. It does not appear to affect error diagnostics (try showing
+ // information for an unknown package).
+ //
+ const char* args[] = {
+ "apt-cache", "show", "--quiet", spec.c_str (), nullptr};
+
+ // Note that for this command there seems to be no need to run with the C
+ // locale since the output is presumably not localizable. But let's do it
+ // for good measure and also seeing that we try to backfit some
+ // diagnostics into apt-cache (see no_version below).
+ //
+ const char* evars[] = {"LC_ALL=C", nullptr};
+
+ string r;
+ try
+ {
+ if (apt_cache_path.empty () && !simulate_)
+ apt_cache_path = process::path_search (args[0], false /* init */);
+
+ process_env pe (apt_cache_path, evars);
+
+ if (verb >= 3)
+ print_process (pe, args);
+
+ // Redirect stdout to a pipe. For good measure also redirect stdin to
+ // /dev/null to make sure there are no prompts of any kind.
+ //
+ process pr;
+ if (!simulate_)
+ pr = process (apt_cache_path,
+ args,
+ -2 /* stdin */,
+ -1 /* stdout */,
+ 2 /* stderr */,
+ nullptr /* cwd */,
+ evars);
+ else
+ {
+ pair<string, string> k (name, ver);
+
+ const path* f (nullptr);
+ if (fetched_)
+ {
+ auto i (simulate_->apt_cache_show_fetched_.find (k));
+ if (i != simulate_->apt_cache_show_fetched_.end ())
+ f = &i->second;
+ }
+ if (f == nullptr)
+ {
+ auto i (simulate_->apt_cache_show_.find (k));
+ if (i != simulate_->apt_cache_show_.end ())
+ f = &i->second;
+ }
+
+ diag_record dr (text);
+ print_process (dr, pe, args);
+ dr << " <" << (f == nullptr || f->empty () ? "/dev/null" : f->string ());
+
+ if (f == nullptr || f->empty ())
+ {
+ text << "E: No packages found";
+ pr = process (process_exit (100));
+ }
+ else
+ {
+ pr = process (process_exit (0));
+ pr.in_ofd = f->string () == "-"
+ ? fddup (stdin_fd ())
+ : fdopen (*f, fdopen_mode::in);
+ }
+ }
+
+ bool no_version (false);
+ try
+ {
+ ifdstream is (move (pr.in_ofd), fdstream_mode::skip, ifdstream::badbit);
+
+ // The output of `apt-cache show <pkg>=<ver>` appears to be a single
+ // Debian control file in the RFC 822 encoding followed by a blank
+ // line. See deb822(5) for details. Here is a representative example:
+ //
+ // Package: libcurl4
+ // Version: 7.85.0-1
+ // Depends: libbrotli1 (>= 0.6.0), libc6 (>= 2.34), ...
+ // Description-en: easy-to-use client-side URL transfer library
+ // libcurl is an easy-to-use client-side URL transfer library.
+ //
+ // Note that if the package is unknown, then we get an error but if
+ // the version is unknown, we get no output (and a note if running
+ // without --quiet).
+ //
+ string l;
+ if (eof (getline (is, l)))
+ {
+ // The unknown version case. Issue diagnostics consistent with the
+ // unknown package case, at least for the English locale.
+ //
+ text << "E: No package version found";
+ no_version = true;
+ }
+ else
+ {
+ auto df = make_diag_frame (
+ [&pe, &args] (diag_record& dr)
+ {
+ dr << info << "while parsing output of ";
+ print_process (dr, pe, args);
+ });
+
+ do
+ {
+ // This line should be the start of a field unless it's a comment
+ // or the terminating blank line. According to deb822(5), there
+ // can be no leading whitespaces before `#`.
+ //
+ if (l.empty ())
+ break;
+
+ if (l[0] == '#')
+ {
+ getline (is, l);
+ continue;
+ }
+
+ size_t p (l.find (':'));
+
+ if (p == string::npos)
+ fail << "expected field name instead of '" << l << "'";
+
+ // Extract the field name. Note that field names are case-
+ // insensitive.
+ //
+ string n (l, 0, p);
+ trim (n);
+
+ // Extract the field value.
+ //
+ string v (l, p + 1);
+ trim (v);
+
+ // If we have more lines see if the following line is part of this
+ // value.
+ //
+ while (!eof (getline (is, l)) && (l[0] == ' ' || l[0] == '\t'))
+ {
+ // This can either be a "folded" or a "multiline" field and
+ // which one it is depends on the field semantics. Here we only
+ // care about Depends and so treat them all as folded (it's
+ // unclear whether Depends must be a simple field).
+ //
+ trim (l);
+ v += ' ';
+ v += l;
+ }
+
+ // See if this is a field of interest.
+ //
+ if (icasecmp (n, "Package") == 0)
+ {
+ assert (v == name); // Sanity check.
+ }
+ else if (icasecmp (n, "Version") == 0)
+ {
+ assert (v == ver); // Sanity check.
+ }
+ else if (icasecmp (n, "Depends") == 0)
+ {
+ r = move (v);
+
+ // Let's not waste time reading any further.
+ //
+ break;
+ }
+ }
+ while (!eof (is));
+ }
+
+ is.close ();
+ }
+ catch (const io_error& e)
+ {
+ if (pr.wait ())
+ fail << "unable to read " << args[0] << " show output: " << e;
+
+ // Fall through.
+ }
+
+ if (!pr.wait () || no_version)
+ {
+ diag_record dr (fail);
+ dr << args[0] << " show exited with non-zero code";
+
+ if (verb < 3)
+ {
+ dr << info << "command line: ";
+ print_process (dr, pe, args);
+ }
+ }
+ }
+ catch (const process_error& e)
+ {
+ error << "unable to execute " << args[0] << ": " << e;
+
+ if (e.child)
+ exit (1);
+
+ throw failed ();
+ }
+
+ return r;
+ }
+
+ // Prepare the common `apt-get <command>` options.
+ //
+ pair<cstrings, const process_path&> system_package_manager_debian::
+ apt_get_common (const char* command, strings& args_storage)
+ {
+ // Pre-allocate the required number of entries in the arguments storage.
+ //
+ if (fetch_timeout_)
+ args_storage.reserve (1);
+
+ cstrings args;
+
+ if (!sudo_.empty ())
+ args.push_back (sudo_.c_str ());
+
+ args.push_back ("apt-get");
+ args.push_back (command);
+
+ // Map our verbosity/progress to apt-get --quiet[=<level>]. The levels
+ // appear to have the following behavior:
+ //
+ // 1 -- shows URL being downloaded but no percentage progress is shown.
+ //
+ // 2 -- only shows diagnostics (implies --assume-yes which cannot be
+ // overriden with --assume-no).
+ //
+ // It also appears to automatically use level 1 if stderr is not a
+ // terminal. This can be overrident with --quiet=0.
+ //
+ // Note also that --show-progress does not apply to apt-get update. For
+ // apt-get install it shows additionally progress during unpacking which
+ // looks quite odd.
+ //
+ if (progress_ && *progress_)
+ {
+ args.push_back ("--quiet=0");
+ }
+ else if (verb == 0)
+ {
+ // Only use level 2 if assuming yes.
+ //
+ args.push_back (yes_ ? "--quiet=2" : "--quiet");
+ }
+ else if (progress_ && !*progress_)
+ {
+ args.push_back ("--quiet");
+ }
+
+ if (yes_)
+ {
+ args.push_back ("--assume-yes");
+ }
+ else if (!stderr_term)
+ {
+ // Suppress any prompts if stderr is not a terminal for good measure.
+ //
+ args.push_back ("--assume-no");
+ }
+
+ // Add the network operations timeout options, if requested.
+ //
+ if (fetch_timeout_)
+ {
+ args.push_back ("-o");
+
+ args_storage.push_back (
+ "Acquire::http::Timeout=" + to_string (*fetch_timeout_));
+
+ args.push_back (args_storage.back ().c_str ());
+ }
+
+ try
+ {
+ const process_path* pp (nullptr);
+
+ if (!sudo_.empty ())
+ {
+ if (sudo_path.empty () && !simulate_)
+ sudo_path = process::path_search (args[0], false /* init */);
+
+ pp = &sudo_path;
+ }
+ else
+ {
+ if (apt_get_path.empty () && !simulate_)
+ apt_get_path = process::path_search (args[0], false /* init */);
+
+ pp = &apt_get_path;
+ }
+
+ return pair<cstrings, const process_path&> (move (args), *pp);
+ }
+ catch (const process_error& e)
+ {
+ fail << "unable to execute " << args[0] << ": " << e << endf;
+ }
+ }
+
+ // Execute `apt-get update` to update the package index.
+ //
+ void system_package_manager_debian::
+ apt_get_update ()
+ {
+ strings args_storage;
+ pair<cstrings, const process_path&> args_pp (
+ apt_get_common ("update", args_storage));
+
+ cstrings& args (args_pp.first);
+ const process_path& pp (args_pp.second);
+
+ args.push_back (nullptr);
+
+ try
+ {
+ if (verb >= 2)
+ print_process (args);
+ else if (verb == 1)
+ text << "updating " << os_release.name_id << " package index...";
+
+ process pr;
+ if (!simulate_)
+ {
+ // Redirect stdout to stderr since apt-get prints some of its
+ // diagnostics to stdout.
+ //
+ pr = process (pp, args, 0 /* stdin */, 2 /* stdout */);
+ }
+ else
+ {
+ print_process (args);
+ pr = process (process_exit (simulate_->apt_get_update_fail_ ? 100 : 0));
+ }
+
+ if (!pr.wait ())
+ {
+ diag_record dr (fail);
+ dr << "apt-get update exited with non-zero code";
+
+ if (verb < 2)
+ {
+ dr << info << "command line: ";
+ print_process (dr, args);
+ }
+ }
+
+ if (verb == 1)
+ text << "updated " << os_release.name_id << " package index";
+ }
+ catch (const process_error& e)
+ {
+ error << "unable to execute " << args[0] << ": " << e;
+
+ if (e.child)
+ exit (1);
+
+ throw failed ();
+ }
+ }
+
+ // Execute `apt-get install` to install the specified packages/versions
+ // (e.g., libfoo or libfoo=1.2.3).
+ //
+ void system_package_manager_debian::
+ apt_get_install (const strings& pkgs)
+ {
+ assert (!pkgs.empty ());
+
+ strings args_storage;
+ pair<cstrings, const process_path&> args_pp (
+ apt_get_common ("install", args_storage));
+
+ cstrings& args (args_pp.first);
+ const process_path& pp (args_pp.second);
+
+ for (const string& p: pkgs)
+ args.push_back (p.c_str ());
+
+ args.push_back (nullptr);
+
+ try
+ {
+ if (verb >= 2)
+ print_process (args);
+ else if (verb == 1)
+ text << "installing " << os_release.name_id << " packages...";
+
+ process pr;
+ if (!simulate_)
+ {
+ // Redirect stdout to stderr since apt-get prints some of its
+ // diagnostics to stdout.
+ //
+ pr = process (pp, args, 0 /* stdin */, 2 /* stdout */);
+ }
+ else
+ {
+ print_process (args);
+ pr = process (process_exit (simulate_->apt_get_install_fail_ ? 100 : 0));
+ }
+
+ if (!pr.wait ())
+ {
+ diag_record dr (fail);
+ dr << "apt-get install exited with non-zero code";
+
+ if (verb < 2)
+ {
+ dr << info << "command line: ";
+ print_process (dr, args);
+ }
+
+ dr << info << "consider resolving the issue manually and retrying "
+ << "the bpkg command";
+ }
+
+ if (verb == 1)
+ text << "installed " << os_release.name_id << " packages";
+ }
+ catch (const process_error& e)
+ {
+ error << "unable to execute " << args[0] << ": " << e;
+
+ if (e.child)
+ exit (1);
+
+ throw failed ();
+ }
+ }
+
+ optional<const system_package_status*> system_package_manager_debian::
+ status (const package_name& pn, const available_packages* aps)
+ {
+ // First check the cache.
+ //
+ {
+ auto i (status_cache_.find (pn));
+
+ if (i != status_cache_.end ())
+ return i->second ? &*i->second : nullptr;
+
+ if (aps == nullptr)
+ return nullopt;
+ }
+
+ optional<package_status> r (status (pn, *aps));
+
+ // Cache.
+ //
+ auto i (status_cache_.emplace (pn, move (r)).first);
+ return i->second ? &*i->second : nullptr;
+ }
+
+ optional<package_status> system_package_manager_debian::
+ status (const package_name& pn, const available_packages& aps)
+ {
+ tracer trace ("system_package_manager_debian::status");
+
+ // For now we ignore -doc and -dbg package components (but we may want to
+ // have options controlling this later). Note also that we assume -common
+ // is pulled automatically by the main package so we ignore it as well
+ // (see equivalent logic in parse_name_value()).
+ //
+ bool need_doc (false);
+ bool need_dbg (false);
+
+ vector<package_status> candidates;
+
+ // Translate our package name to the Debian package names.
+ //
+ {
+ auto df = make_diag_frame (
+ [this, &pn] (diag_record& dr)
+ {
+ dr << info << "while mapping " << pn << " to "
+ << os_release.name_id << " package name";
+ });
+
+ // Without explicit type, the best we can do in trying to detect whether
+ // this is a library is to check for the lib prefix. Libraries without
+ // the lib prefix and non-libraries with the lib prefix (both of which
+ // we do not recomment) will have to provide a manual mapping (or
+ // explicit type).
+ //
+ // Note that using the first (latest) available package as a source of
+ // type information seems like a reasonable choice.
+ //
+ const string& pt (!aps.empty ()
+ ? aps.front ().first->effective_type ()
+ : package_manifest::effective_type (nullopt, pn));
+
+ strings ns;
+ if (!aps.empty ())
+ ns = system_package_names (aps,
+ os_release.name_id,
+ os_release.version_id,
+ os_release.like_ids,
+ true /* native */);
+ if (ns.empty ())
+ {
+ // Attempt to automatically translate our package name (see above for
+ // details).
+ //
+ const string& n (pn.string ());
+
+ if (pt == "lib")
+ {
+ // Keep the main package name empty as an indication that it is to
+ // be discovered.
+ //
+ // @@ It seems that quite often the header-only library -dev package
+ // name doesn't start with 'lib'. Here are some randomly chosen
+ // packages: libeigen3-dev, libmdds-dev, rapidjson-dev, etl-dev,
+ // seqan-dev, catch2. Should we implement the fallback similar to
+ // the Fedora implementation? Maybe one day.
+ //
+ candidates.push_back (package_status ("", n + "-dev"));
+ }
+ else
+ candidates.push_back (package_status (n));
+ }
+ else
+ {
+ // Parse each manual mapping.
+ //
+ for (const string& n: ns)
+ {
+ package_status s (parse_name_value (pt, n, need_doc, need_dbg));
+
+ // Suppress duplicates for good measure based on the main package
+ // name (and falling back to -dev if empty).
+ //
+ auto i (find_if (candidates.begin (), candidates.end (),
+ [&s] (const package_status& x)
+ {
+ // Note that it's possible for one mapping to be
+ // specified as -dev only while the other as main
+ // and -dev.
+ //
+ return s.main.empty () || x.main.empty ()
+ ? s.dev == x.dev
+ : s.main == x.main;
+ }));
+ if (i == candidates.end ())
+ candidates.push_back (move (s));
+ else
+ {
+ // Should we verify the rest matches for good measure? But what if
+ // we need to override, as in:
+ //
+ // debian_10-name: libcurl4 libcurl4-openssl-dev
+ // debian_9-name: libcurl4 libcurl4-dev
+ //
+ // Note that for this to work we must get debian_10 values before
+ // debian_9, which is the semantics guaranteed by
+ // system_package_names().
+ }
+ }
+ }
+ }
+
+ // Guess unknown main package given the -dev package and its version.
+ // Failed that, assume the package to be a binless library and leave the
+ // main member of the package_status object empty.
+ //
+ auto guess_main = [this, &trace] (package_status& s, const string& ver)
+ {
+ string depends (apt_cache_show (s.dev, ver));
+
+ s.main = main_from_dev (s.dev, ver, depends);
+
+ if (s.main.empty ())
+ {
+ l4 ([&]{trace << "unable to guess main package for " << s.dev << ' '
+ << ver << ", Depends value: " << depends;});
+ }
+ };
+
+ // Calculate the package status from individual package components.
+ // Return nullopt if there is a component without installed or candidate
+ // version (which means the package cannot be installed).
+ //
+ // The main argument specifies the size of the main group. Only components
+ // from this group are considered for partially_installed determination.
+ //
+ // @@ TODO: we should probably prioritize partially installed with fully
+ // installed main group. Add almost_installed next to partially_installed?
+ //
+ using status_type = package_status::status_type;
+
+ auto status = [] (const vector<package_policy>& pps, size_t main)
+ -> optional<status_type>
+ {
+ bool i (false), u (false);
+
+ for (size_t j (0); j != pps.size (); ++j)
+ {
+ const package_policy& pp (pps[j]);
+
+ if (pp.installed_version.empty ())
+ {
+ if (pp.candidate_version.empty ())
+ return nullopt;
+
+ u = true;
+ }
+ else if (j < main)
+ i = true;
+ }
+
+ return (!u ? package_status::installed :
+ !i ? package_status::not_installed :
+ package_status::partially_installed);
+ };
+
+ // First look for an already fully installed package.
+ //
+ optional<package_status> r;
+
+ {
+ diag_record dr; // Ambiguity diagnostics.
+
+ for (package_status& ps: candidates)
+ {
+ vector<package_policy>& pps (ps.package_policies);
+
+ if (!ps.main.empty ()) pps.emplace_back (ps.main);
+ if (!ps.dev.empty ()) pps.emplace_back (ps.dev);
+ if (!ps.doc.empty () && need_doc) pps.emplace_back (ps.doc);
+ if (!ps.dbg.empty () && need_dbg) pps.emplace_back (ps.dbg);
+ if (!ps.common.empty () && false) pps.emplace_back (ps.common);
+ ps.package_policies_main = pps.size ();
+ for (const string& n: ps.extras) pps.emplace_back (n);
+
+ apt_cache_policy (pps);
+
+ // Handle the unknown main package.
+ //
+ if (ps.main.empty ())
+ {
+ const package_policy& dev (pps.front ());
+
+ // Note that at this stage we can only use the installed -dev
+ // package (since the candidate version may change after fetch).
+ //
+ if (dev.installed_version.empty ())
+ continue;
+
+ guess_main (ps, dev.installed_version);
+
+ if (!ps.main.empty ()) // Not a binless library?
+ {
+ pps.emplace (pps.begin (), ps.main);
+ ps.package_policies_main++;
+ apt_cache_policy (pps, 1);
+ }
+ }
+
+ optional<status_type> s (status (pps, ps.package_policies_main));
+
+ if (!s || *s != package_status::installed)
+ continue;
+
+ const package_policy& main (pps.front ()); // Main/dev.
+
+ ps.status = *s;
+ ps.system_name = main.name;
+ ps.system_version = main.installed_version;
+
+ if (!r)
+ {
+ r = move (ps);
+ continue;
+ }
+
+ if (dr.empty ())
+ {
+ dr << fail << "multiple installed " << os_release.name_id
+ << " packages for " << pn <<
+ info << "candidate: " << r->system_name << ' ' << r->system_version;
+ }
+
+ dr << info << "candidate: " << ps.system_name << ' '
+ << ps.system_version;
+ }
+
+ if (!dr.empty ())
+ dr << info << "consider specifying the desired version manually";
+ }
+
+ // Next look for available versions if we are allowed to install. Indicate
+ // the non-installable candidates by setting both their main and -dev
+ // package names to empty strings.
+ //
+ if (!r && install_)
+ {
+ // If we weren't instructed to fetch or we already fetched, then we
+ // don't need to re-run apt_cache_policy().
+ //
+ bool requery;
+ if ((requery = fetch_ && !fetched_))
+ {
+ apt_get_update ();
+ fetched_ = true;
+ }
+
+ {
+ diag_record dr; // Ambiguity diagnostics.
+
+ for (package_status& ps: candidates)
+ {
+ vector<package_policy>& pps (ps.package_policies);
+
+ if (requery)
+ apt_cache_policy (pps);
+
+ // Handle the unknown main package.
+ //
+ if (ps.main.empty ())
+ {
+ const package_policy& dev (pps.front ());
+
+ // Note that this time we use the candidate version.
+ //
+ if (dev.candidate_version.empty ())
+ {
+ // Not installable.
+ //
+ ps.dev.clear ();
+ continue;
+ }
+
+ guess_main (ps, dev.candidate_version);
+
+ if (!ps.main.empty ()) // Not a binless library?
+ {
+ pps.emplace (pps.begin (), ps.main);
+ ps.package_policies_main++;
+ apt_cache_policy (pps, 1);
+ }
+ }
+
+ optional<status_type> s (status (pps, ps.package_policies_main));
+
+ if (!s)
+ {
+ // Not installable.
+ //
+ ps.main.clear ();
+ ps.dev.clear ();
+ continue;
+ }
+
+ assert (*s != package_status::installed); // Sanity check.
+
+ const package_policy& main (pps.front ()); // Main/dev.
+
+ // Note that if we are installing something for this main package,
+ // then we always go for the candidate version even though it may
+ // have an installed version that may be good enough (especially if
+ // what we are installing are extras). The reason is that it may as
+ // well not be good enough (especially if we are installing the -dev
+ // package) and there is no straightforward way to change our mind.
+ //
+ ps.status = *s;
+ ps.system_name = main.name;
+ ps.system_version = main.candidate_version;
+
+ // Prefer partially installed to not installed. This makes detecting
+ // ambiguity a bit trickier so we handle partially installed here
+ // and not installed in a separate loop below.
+ //
+ if (ps.status != package_status::partially_installed)
+ continue;
+
+ if (!r)
+ {
+ r = move (ps);
+ continue;
+ }
+
+ auto print_missing = [&dr] (const package_status& s)
+ {
+ for (const package_policy& pp: s.package_policies)
+ if (pp.installed_version.empty ())
+ dr << ' ' << pp.name;
+ };
+
+ if (dr.empty ())
+ {
+ dr << fail << "multiple partially installed "
+ << os_release.name_id << " packages for " << pn;
+
+ dr << info << "candidate: " << r->system_name << ' '
+ << r->system_version << ", missing components:";
+ print_missing (*r);
+ }
+
+ dr << info << "candidate: " << ps.system_name << ' '
+ << ps.system_version << ", missing components:";
+ print_missing (ps);
+ }
+
+ if (!dr.empty ())
+ dr << info << "consider fully installing the desired package "
+ << "manually and retrying the bpkg command";
+ }
+
+ if (!r)
+ {
+ diag_record dr; // Ambiguity diagnostics.
+
+ for (package_status& ps: candidates)
+ {
+ if (ps.main.empty () && ps.dev.empty ()) // Not installable?
+ continue;
+
+ assert (ps.status == package_status::not_installed); // Sanity check.
+
+ if (!r)
+ {
+ r = move (ps);
+ continue;
+ }
+
+ if (dr.empty ())
+ {
+ dr << fail << "multiple available " << os_release.name_id
+ << " packages for " << pn <<
+ info << "candidate: " << r->system_name << ' '
+ << r->system_version;
+ }
+
+ dr << info << "candidate: " << ps.system_name << ' '
+ << ps.system_version;
+ }
+
+ if (!dr.empty ())
+ dr << info << "consider installing the desired package manually and "
+ << "retrying the bpkg command";
+ }
+ }
+
+ if (r)
+ {
+ // Map the Debian version to the bpkg version. But first strip the
+ // revision from Debian version ([<epoch>:]<upstream>[-<revision>]), if
+ // any.
+ //
+ // Note that according to deb-version(5), <upstream> may contain `:`/`-`
+ // but in these cases <epoch>/<revision> must be specified explicitly,
+ // respectively.
+ //
+ string sv (r->system_version, 0, r->system_version.rfind ('-'));
+
+ // Debian package versions sometimes include metadata introduced with
+ // the `+` character that established relationships between Debian and
+ // upstream packages, backports, and some other murky stuff (see Debian
+ // Policy with a strong cup of coffee for details). Normally such
+ // metadata is included in the revision, as in, for example,
+ // 1.4.0-1+deb10u1. However, sometimes you see it included in the
+ // version directly, as in, for example: 3.2.4+debian-1 (see Debian bug
+ // 542288 for a potential explanation; might need to refill you cup).
+ //
+ // Since our upstream version may not contain `+`, it feels reasonable
+ // to remove such metadata. What's less clear is whether we should do
+ // the same before trying the to-downstream mapping. In fact, it's
+ // possible `+` belongs to the upstream version rather than Debian
+ // metadata or that there are both (there is also no guarantee that
+ // Debian metadata may not contain multiple `+`). So what we are going
+ // to do is try the mapping with more and more `+` components stripped
+ // (naturally ending up with all of them stripped for the fallback
+ // below).
+ //
+ optional<version> v;
+ for (size_t p (sv.size ()); p != string::npos; p = sv.rfind ('+'))
+ {
+ sv.resize (p);
+
+ if (!aps.empty ())
+ {
+ v = downstream_package_version (sv,
+ aps,
+ os_release.name_id,
+ os_release.version_id,
+ os_release.like_ids);
+ if (v)
+ break;
+ }
+ }
+
+ if (!v)
+ {
+ // Fallback to using system version as downstream version. But first
+ // strip the epoch, if any. Also convert the potential pre-release
+ // separator to the bpkg version pre-release separator.
+ //
+ size_t p (sv.find (':'));
+ if (p != string::npos)
+ sv.erase (0, p + 1);
+
+ // Consider the first '~' character as a pre-release separator. Note
+ // that if there are more of them, then we will fail since '~' is an
+ // invalid character for bpkg version.
+ //
+ p = sv.find ('~');
+ if (p != string::npos)
+ sv[p] = '-';
+
+ try
+ {
+ v = version (sv);
+ }
+ catch (const invalid_argument& e)
+ {
+ fail << "unable to map " << os_release.name_id << " package "
+ << r->system_name << " version " << sv << " to bpkg package "
+ << pn << " version" <<
+ info << os_release.name_id << " version is not a valid bpkg "
+ << "version: " << e.what () <<
+ info << "consider specifying explicit mapping in " << pn
+ << " package manifest";
+ }
+ }
+
+ r->version = move (*v);
+ }
+
+ return r;
+ }
+
+ void system_package_manager_debian::
+ install (const vector<package_name>& pns)
+ {
+ assert (!pns.empty ());
+
+ assert (install_ && !installed_);
+ installed_ = true;
+
+ // Collect and merge all the Debian packages/versions for the specified
+ // bpkg packages.
+ //
+ struct package
+ {
+ string name;
+ string version; // Empty if unspecified.
+ };
+ vector<package> pkgs;
+
+ for (const package_name& pn: pns)
+ {
+ auto it (status_cache_.find (pn));
+ assert (it != status_cache_.end () && it->second);
+
+ const package_status& ps (*it->second);
+
+ // At first it may seem we don't need to do anything for already fully
+ // installed packages. But it's possible some of them were automatically
+ // installed, meaning that they can be automatically removed if they no
+ // longer have any dependents (see apt-mark(8) for details). Which in
+ // turn means that things may behave differently depending on whether
+ // we've installed a package ourselves or if it was already installed.
+ // So instead we are going to also pass the already fully installed
+ // packages which will make sure they are all set to manually installed.
+ // But we must be careful not to force their upgrade. To achieve this
+ // we will specify the installed version as the desired version.
+ //
+ // Note also that for partially/not installed we don't specify the
+ // version, expecting the candidate version to be installed.
+ //
+ bool fi (ps.status == package_status::installed);
+
+ for (const package_policy& pp: ps.package_policies)
+ {
+ string n (pp.name);
+ string v (fi ? pp.installed_version : string ());
+
+ auto i (find_if (pkgs.begin (), pkgs.end (),
+ [&n] (const package& p)
+ {
+ return p.name == n;
+ }));
+
+ if (i != pkgs.end ())
+ {
+ if (i->version.empty ())
+ i->version = move (v);
+ else
+ // Feels like this cannot happen since we always use the installed
+ // version of the package.
+ //
+ assert (i->version == v);
+ }
+ else
+ pkgs.push_back (package {move (n), move (v)});
+ }
+ }
+
+ // Install.
+ //
+ {
+ // Convert to the `apt-get install` <pkg>[=<ver>] form.
+ //
+ strings specs;
+ specs.reserve (pkgs.size ());
+ for (const package& p: pkgs)
+ {
+ string s (p.name);
+ if (!p.version.empty ())
+ {
+ s += '=';
+ s += p.version;
+ }
+ specs.push_back (move (s));
+ }
+
+ apt_get_install (specs);
+ }
+
+ // Verify that versions we have promised in status() match what actually
+ // got installed.
+ //
+ {
+ vector<package_policy> pps;
+
+ // Here we just check the main package component of each package.
+ //
+ for (const package_name& pn: pns)
+ {
+ const package_status& ps (*status_cache_.find (pn)->second);
+
+ if (find_if (pps.begin (), pps.end (),
+ [&ps] (const package_policy& pp)
+ {
+ return pp.name == ps.system_name;
+ }) == pps.end ())
+ {
+ pps.push_back (package_policy (ps.system_name));
+ }
+ }
+
+ apt_cache_policy (pps);
+
+ for (const package_name& pn: pns)
+ {
+ const package_status& ps (*status_cache_.find (pn)->second);
+
+ auto i (find_if (pps.begin (), pps.end (),
+ [&ps] (const package_policy& pp)
+ {
+ return pp.name == ps.system_name;
+ }));
+ assert (i != pps.end ());
+
+ const package_policy& pp (*i);
+
+ if (pp.installed_version != ps.system_version)
+ {
+ fail << "unexpected " << os_release.name_id << " package version "
+ << "for " << ps.system_name <<
+ info << "expected: " << ps.system_version <<
+ info << "installed: " << pp.installed_version <<
+ info << "consider retrying the bpkg command";
+ }
+ }
+ }
+ }
+
+ // Map non-system bpkg package to system package name(s) and version.
+ //
+ // This is used both to map the package being generated and its
+ // dependencies. What should we do with extras returned in package_status?
+ // We can't really generate any of them (which files would we place in
+ // them?) nor can we list them as dependencies (we don't know their system
+ // versions). So it feels like the only sensible choice is to ignore extras.
+ //
+ // In a sense, we have a parallel arrangement going on here: binary packages
+ // that we generate don't have extras (i.e., they include everything
+ // necessary in the "standard" packages from the main group) and when we
+ // punch a system dependency based on a non-system bpkg package, we assume
+ // it was generated by us and thus doesn't have any extras. Or, to put it
+ // another way, if you want the system dependency to refer to a "native"
+ // system package with extras you need to configure it as a system bpkg
+ // package.
+ //
+ // In fact, this extends to package names. For example, unless custom
+ // mapping is specified, we will generate libsqlite3 and libsqlite3-dev
+ // while native names are libsqlite3-0 and libsqlite3-dev. While this
+ // duality is not ideal, presumably we will normally only be producing our
+ // binary packages if there are no suitable native packages. And for a few
+ // exceptions (e.g., our package is "better" in some way, such as configured
+ // differently or fixes a critical bug), we will just have to provide
+ // appropriate manual mapping that makes sure the names match (the extras is
+ // still a potential problem though -- we will only have them as
+ // dependencies if we build against a native system package; maybe we can
+ // add them manually with an option).
+ //
+ package_status system_package_manager_debian::
+ map_package (const package_name& pn,
+ const version& pv,
+ const available_packages& aps,
+ const optional<string>& build_metadata) const
+ {
+ // We should only have one available package corresponding to this package
+ // name/version.
+ //
+ assert (aps.size () == 1);
+
+ const shared_ptr<available_package>& ap (aps.front ().first);
+ const lazy_shared_ptr<repository_fragment>& rf (aps.front ().second);
+
+ // Without explicit type, the best we can do in trying to detect whether
+ // this is a library is to check for the lib prefix. Libraries without the
+ // lib prefix and non-libraries with the lib prefix (both of which we do
+ // not recomment) will have to provide a manual mapping (or explicit
+ // type).
+ //
+ const string& pt (ap->effective_type ());
+
+ strings ns (system_package_names (aps,
+ os_release.name_id,
+ os_release.version_id,
+ os_release.like_ids,
+ false /* native */));
+ package_status r;
+ if (ns.empty ())
+ {
+ // Automatically translate our package name similar to the consumption
+ // case above. Except here we don't attempt to deduce main from -dev,
+ // naturally.
+ //
+ const string& n (pn.string ());
+
+ if (pt == "lib")
+ r = package_status (n, n + "-dev");
+ else
+ r = package_status (n);
+ }
+ else
+ {
+ // Even though we only pass one available package, we may still end up
+ // with multiple mappings. In this case we take the first, per the
+ // documentation.
+ //
+ r = parse_name_value (pt,
+ ns.front (),
+ false /* need_doc */,
+ false /* need_dbg */);
+
+ // If this is -dev without main, then derive main by stripping the -dev
+ // suffix. This feels tighter than just using the bpkg package name.
+ //
+ if (r.main.empty ())
+ {
+ assert (!r.dev.empty ());
+ r.main.assign (r.dev, 0, r.dev.size () - 4);
+ }
+ }
+
+ // Map the version.
+ //
+ // NOTE: THE BELOW DESCRIPTION IS ALSO REPRODUCED IN THE BPKG MANUAL.
+ //
+ // To recap, a Debian package version has the following form:
+ //
+ // [<epoch>:]<upstream>[-<revision>]
+ //
+ // For details on the ordering semantics, see the Version control file
+ // field documentation in the Debian Policy Manual. While overall
+ // unsurprising, one notable exception is `~`, which sorts before anything
+ // else and is commonly used for upstream pre-releases. For example,
+ // 1.0~beta1~svn1245 sorts earlier than 1.0~beta1, which sorts earlier
+ // than 1.0.
+ //
+ // There are also various special version conventions (such as all the
+ // revision components in 1.4-5+deb10u1~bpo9u1) but they all appear to
+ // express relationships between native packages and/or their upstream and
+ // thus do not apply to our case.
+ //
+ // Ok, so how do we map our version to that? To recap, the bpkg version
+ // has the following form:
+ //
+ // [+<epoch>-]<upstream>[-<prerel>][+<revision>]
+ //
+ // Let's start with the case where neither distribution nor upstream
+ // version is specified and we need to derive everything from the bpkg
+ // version.
+ //
+ // <epoch>
+ //
+ // On one hand, if we keep the epoch, it won't necessarily match
+ // Debian's native package epoch. But on the other it will allow our
+ // binary packages from different epochs to co-exist. Seeing that this
+ // can be easily overridden with a custom distribution version, let's
+ // keep it.
+ //
+ // Note that while the Debian start/default epoch is 0, ours is 1 (we
+ // use the 0 epoch for stub packages). So we will need to shift this
+ // value range.
+ //
+ //
+ // <upstream>[-<prerel>]
+ //
+ // Our upstream version maps naturally to Debian's. That is, our
+ // upstream version format/semantics is a subset of Debian's.
+ //
+ // If this is a pre-release, then we could fail (that is, don't allow
+ // pre-releases) but then we won't be able to test on pre-release
+ // packages, for example, to make sure the name mapping is correct.
+ // Plus sometimes it's useful to publish pre-releases. We could ignore
+ // it, but then such packages will be indistinguishable from each other
+ // and the final release, which is not ideal. On the other hand, Debian
+ // has the mechanism (`~`) which is essentially meant for this, so let's
+ // use it. We will use <prerel> as is since its format is the same as
+ // upstream and thus should map naturally.
+ //
+ //
+ // <revision>
+ //
+ // Similar to epoch, our revision won't necessarily match Debian's
+ // native package revision. But on the other hand it will allow us to
+ // establish a correspondence between source and binary packages. Plus,
+ // upgrades between binary package revisions will be handled naturally.
+ // Seeing that we allow overriding the revision with a custom
+ // distribution version (see below), let's keep it.
+ //
+ // Note also that both Debian and our revision start/default is 0.
+ // However, it is Debian's convention to start revision from 1. But it
+ // doesn't seem worth it for us to do any shifting here and so we will
+ // use our revision as is.
+ //
+ // Another related question is whether we should also include some
+ // metadata that identifies the distribution and its version that this
+ // package is for. The strongest precedent here is probably Ubuntu's
+ // PPA. While there doesn't appear to be a consistent approach, one can
+ // often see versions like these:
+ //
+ // 2.1.0-1~ppa0~ubuntu14.04.1,
+ // 1.4-5-1.2.1~ubuntu20.04.1~ppa1
+ // 22.12.2-0ubuntu1~ubuntu23.04~ppa1
+ //
+ // Seeing that this is a non-sortable component (what in semver would be
+ // called "build metadata"), using `~` is probably not the worst choice.
+ //
+ // So we follow this lead and add the ~<name_id><version_id> component
+ // to revision. Note that this also means we will have to make the 0
+ // revision explicit. For example:
+ //
+ // 1.2.3-1~debian10
+ // 1.2.3-0~ubuntu20.04
+ //
+ // The next case to consider is when we have the upstream version
+ // (upstream-version manifest value). After some rumination it feels
+ // correct to use it in place of the <epoch>-<upstream> components in the
+ // above mapping (upstream version itself cannot have epoch). In other
+ // words, we will add the pre-release and revision components from the
+ // bpkg version. If this is not the desired semantics, then it can always
+ // be overrided with the distribution version.
+ //
+ // Finally, we have the distribution version. The Debian <epoch> and
+ // <upstream> components are straightforward: they should be specified by
+ // the distribution version as required. This leaves pre-release and
+ // revision. It feels like in most cases we would want these copied over
+ // from the bpkg version automatically -- it's too tedious and error-
+ // prone to maintain them manually. However, we want the user to have the
+ // full override ability. So instead, if empty revision is specified, as
+ // in 1.2.3-, then we automatically add the bpkg revision. Similarly, if
+ // empty pre-release is specified, as in 1.2.3~, then we add the bpkg
+ // pre-release. To add both automatically, we would specify 1.2.3~- (other
+ // combinations are 1.2.3~b.1- and 1.2.3~-1).
+ //
+ // Note also that per the Debian version specification, if upstream
+ // contains `:` and/or `-`, then epoch and/or revision must be specified
+ // explicitly, respectively. Note that the bpkg upstream version may not
+ // contain either.
+ //
+ string& sv (r.system_version);
+
+ bool no_build_metadata (build_metadata && build_metadata->empty ());
+
+ if (optional<string> ov = system_package_version (ap,
+ rf,
+ os_release.name_id,
+ os_release.version_id,
+ os_release.like_ids))
+ {
+ string& dv (*ov);
+ size_t n (dv.size ());
+
+ // Find the revision and pre-release positions, if any.
+ //
+ size_t rp (dv.rfind ('-'));
+ size_t pp (dv.rfind ('~', rp));
+
+ // Copy over the [<epoch>:]<upstream> part.
+ //
+ sv.assign (dv, 0, pp < rp ? pp : rp);
+
+ // Add pre-release copying over the bpkg version value if empty.
+ //
+ if (pp != string::npos)
+ {
+ if (size_t pn = (rp != string::npos ? rp : n) - (pp + 1))
+ {
+ sv.append (dv, pp, pn + 1);
+ }
+ else
+ {
+ if (pv.release)
+ {
+ assert (!pv.release->empty ()); // Cannot be earliest special.
+ sv += '~';
+ sv += *pv.release;
+ }
+ }
+ }
+
+ // Add revision copying over the bpkg version value if empty.
+ //
+ // Omit the default -0 revision if we have no build metadata.
+ //
+ if (rp != string::npos)
+ {
+ if (size_t rn = n - (rp + 1))
+ {
+ sv.append (dv, rp, rn + 1);
+ }
+ else if (pv.revision || !no_build_metadata)
+ {
+ sv += '-';
+ sv += to_string (pv.revision ? *pv.revision : 0);
+ }
+ }
+ else if (!no_build_metadata)
+ sv += "-0"; // Default revision (for build metadata; see below).
+ }
+ else
+ {
+ if (ap->upstream_version)
+ {
+ const string& uv (*ap->upstream_version);
+
+ // Add explicit epoch if upstream contains `:`.
+ //
+ // Note that we don't need to worry about `-` since we always add
+ // revision (see below).
+ //
+ if (uv.find (':') != string::npos)
+ sv = "0:";
+
+ sv += uv;
+ }
+ else
+ {
+ // Add epoch unless maps to 0.
+ //
+ assert (pv.epoch != 0); // Cannot be a stub.
+ if (pv.epoch != 1)
+ {
+ sv = to_string (pv.epoch - 1);
+ sv += ':';
+ }
+
+ sv += pv.upstream;
+ }
+
+ // Add pre-release.
+ //
+ if (pv.release)
+ {
+ assert (!pv.release->empty ()); // Cannot be earliest special.
+ sv += '~';
+ sv += *pv.release;
+ }
+
+ // Add revision.
+ //
+ if (pv.revision || !no_build_metadata)
+ {
+ sv += '-';
+ sv += to_string (pv.revision ? *pv.revision : 0);
+ }
+ }
+
+ // Add build matadata.
+ //
+ if (!no_build_metadata)
+ {
+ sv += '~';
+
+ if (!build_metadata)
+ {
+ sv += os_release.name_id;
+ sv += os_release.version_id; // Could be empty.
+ }
+ else
+ {
+ const string& md (*build_metadata);
+
+ bool f (md.front () == '+');
+ bool b (md.back () == '+');
+
+ if (f && b) // Note: covers just `+`.
+ fail << "invalid build metadata '" << md << "'";
+
+ if (f || b)
+ {
+ if (b)
+ sv.append (md, 0, md.size () - 1);
+
+ sv += os_release.name_id;
+ sv += os_release.version_id;
+
+ if (f)
+ sv.append (md, 1, md.size () - 1);
+ }
+ else
+ sv += md;
+ }
+ }
+
+ return r;
+ }
+
+ // Some background on creating Debian packages (for a bit more detailed
+ // overview see the Debian Packaging Tutorial).
+ //
+ // A binary Debian package (.deb) is an ar archive which itself contains a
+ // few tar archives normally compressed with gz or xz. So it's possible to
+ // create the package completely manually without using any of the Debian
+ // tools and while some implementations (for example, cargo-deb) do it this
+ // way, we are not going to go this route because it does not scale well to
+ // more complex packages which may require additional functionality (such as
+ // managing systemd files) and which is covered by the Debian tools (for an
+ // example of where this leads, see the partial debhelper re-implementation
+ // in cargo-deb). Another issues with this approach is that it's not
+ // amenable to customizations, at least not in a way familiar to Debian
+ // users.
+ //
+ // At the lowest level of the Debian tools for creating packages sits the
+ // dpkg-deb --build|-b command (also accessible as dpkg --build|-b). Given a
+ // directory with all the binary package contents (including the package
+ // metadata, such as the control file, in the debian/ subdirectory) this
+ // command will pack everything up into a .deb file. While an improvement
+ // over the fully manual packaging, this approach has essentially the same
+ // drawbacks. In particular, this command generates a single package which
+ // means we will have to manually sort out things into -dev, -doc, etc.
+ //
+ // Next up the stack is dpkg-buildpackage. This tool expects the package to
+ // follow the Debian way of packaging, that is, to provide the debian/rules
+ // makefile with a number of required targets which it then invokes to
+ // build, install, and pack a package from source (and sometime during this
+ // process it calls dpkg-deb --build). The dpkg-buildpackage(1) man page has
+ // an overview of all the steps that this command performs and it is the
+ // recommended, lower-level, way to build packages on Debian.
+ //
+ // At the top of the stack sits debuild which calls dpkg-buildpackage, then
+ // lintian, and finally design (though signing can also be performed by
+ // dpkg-buildpackage itself).
+ //
+ // Based on this our plan is to use dpkg-buildpackage which brings us to the
+ // Debian way of packaging with debian/rules at its core. As it turns out,
+ // it can also be implemented in a number of alternative ways. So let's
+ // discuss those.
+ //
+ // As mentioned earlier, debian/rules is a makefile that is expected to
+ // provide a number of targets, such as build, install, etc. And
+ // theoretically these targets can be implemented completely manually. In
+ // practice, however, the Debian way is to use the debhelper(1) packaging
+ // helper tools. For example, there are helpers for stripping binaries,
+ // compressing man pages, fixing permissions, and managing systemd files.
+ //
+ // While debhelper tools definitely simplify debian/rules, there is often
+ // still a lot of boilerplate code. So second-level helpers are often used,
+ // with the dominant option being the dh(1) command sequencer (there is also
+ // CDBS but it appears to be fading into obsolescence).
+ //
+ // Based on that our options appear to be classic debhelper and dh. Looking
+ // at the statistics, it's clear that the majority of packages (including
+ // fairly complex ones) tend to prefer dh and there is no reason for us to
+ // try to buck this trend.
+ //
+ // NOTE: THE BELOW DESCRIPTION IS ALSO REWORDED IN BPKG-PKG-BINDIST(1).
+ //
+ // So, to sum up, the plan is to produce debian/rules that uses the dh
+ // command sequencer and then invoke dpkg-buildpackage to produce the binary
+ // package from that. While this approach is normally used to build things
+ // from source, it feels like we should be able to pretend that we are.
+ // Specifially, we can override the install target to invoke the build
+ // system and install all the packages directly from their bpkg locations.
+ //
+ // Note that the -dbgsym packages are generated by default and all we need
+ // to do from our side is to compile with debug information (-g), failed
+ // which we get a warning from debhelper.
+ //
+ // Note: this setup requires dpkg-dev (or build-essential) and debhelper
+ // packages.
+ //
+ auto system_package_manager_debian::
+ generate (const packages& pkgs,
+ const packages& deps,
+ const strings& vars,
+ const dir_path& cfg_dir,
+ const package_manifest& pm,
+ const string& pt,
+ const small_vector<language, 1>& langs,
+ optional<bool> recursive_full,
+ bool first) -> binary_files
+ {
+ tracer trace ("system_package_manager_debian::generate");
+
+ assert (!langs.empty ()); // Should be effective.
+
+ // We require explicit output root.
+ //
+ if (!ops_->output_root_specified ())
+ fail << "output root directory must be specified explicitly with "
+ << "--output-root|-o";
+
+ const dir_path& out (ops_->output_root ()); // Cannot be empty.
+
+ optional<string> build_metadata;
+ if (ops_->debian_build_meta_specified ())
+ build_metadata = ops_->debian_build_meta ();
+
+ const shared_ptr<selected_package>& sp (pkgs.front ().selected);
+ const package_name& pn (sp->name);
+ const version& pv (sp->version);
+
+ // Use version without iteration in paths, etc (`#` breaks dpkg
+ // machinery).
+ //
+ string pvs (pv.string (false /* ignore_revision */,
+ true /* ignore_iteration */));
+
+ const available_packages& aps (pkgs.front ().available);
+
+ bool lib (pt == "lib");
+ bool priv (ops_->private_ ()); // Private installation.
+
+ // For now we only know how to handle libraries with C-common interface
+ // languages. But we allow other implementation languages.
+ //
+ if (lib)
+ {
+ for (const language& l: langs)
+ if (!l.impl && l.name != "c" && l.name != "c++" && l.name != "cc")
+ fail << l.name << " libraries are not yet supported";
+ }
+
+ // Return true if this package uses the specified language, only as
+ // interface language if intf_only is true.
+ //
+ auto lang = [&langs] (const char* n, bool intf_only = false) -> bool
+ {
+ return find_if (langs.begin (), langs.end (),
+ [n, intf_only] (const language& l)
+ {
+ return (!intf_only || !l.impl) && l.name == n;
+ }) != langs.end ();
+ };
+
+ // As a first step, figure out the system names and version of the package
+ // we are generating and all the dependencies, diagnosing anything fishy.
+ // If the main package is not present for a dependency, then set the main
+ // package name to an empty string.
+ //
+ // Note that there should be no duplicate dependencies and we can sidestep
+ // the status cache.
+ //
+ package_status st (map_package (pn, pv, aps, build_metadata));
+
+ vector<package_status> sdeps;
+ sdeps.reserve (deps.size ());
+ for (const package& p: deps)
+ {
+ const shared_ptr<selected_package>& sp (p.selected);
+ const available_packages& aps (p.available);
+
+ package_status s;
+ if (sp->substate == package_substate::system)
+ {
+ // Note that for a system dependency the main package name is already
+ // empty if it is not present in the distribution.
+ //
+ optional<package_status> os (status (sp->name, aps));
+
+ if (!os || os->status != package_status::installed)
+ fail << os_release.name_id << " package for " << sp->name
+ << " system package is no longer installed";
+
+ // For good measure verify the mapped back version still matches
+ // configured. Note that besides the normal case (queried by the
+ // system package manager), it could have also been specified by the
+ // user as an actual version or a wildcard. Ignoring this check for a
+ // wildcard feels consistent with the overall semantics.
+ //
+ if (sp->version != wildcard_version && sp->version != os->version)
+ {
+ fail << "current " << os_release.name_id << " package version for "
+ << sp->name << " system package does not match configured" <<
+ info << "configured version: " << sp->version <<
+ info << "current version: " << os->version << " ("
+ << os->system_version << ')';
+ }
+
+ s = move (*os);
+ }
+ else
+ {
+ s = map_package (sp->name, sp->version, aps, build_metadata);
+
+ // Set the main package name to an empty string if we wouldn't be
+ // generating the main package for this dependency (binless library
+ // without the -common package).
+ //
+ assert (aps.size () == 1);
+
+ const optional<string>& t (aps.front ().first->type);
+
+ if (s.common.empty () &&
+ package_manifest::effective_type (t, sp->name) == "lib")
+ {
+ strings sos (package_manifest::effective_type_sub_options (t));
+
+ if (find (sos.begin (), sos.end (), "binless") != sos.end ())
+ s.main.clear ();
+ }
+ }
+
+ sdeps.push_back (move (s));
+ }
+
+ if (!st.dbg.empty ())
+ fail << "generation of obsolete manual -dbg packages not supported" <<
+ info << "use automatic -dbgsym packages instead";
+
+ // We override every config.install.* variable in order not to pick
+ // anything configured. Note that we add some more in the rules file
+ // below.
+ //
+ // We make use of the <project> substitution since in the recursive mode
+ // we may be installing multiple projects. Note that the <private>
+ // directory component is automatically removed if this functionality is
+ // not enabled. One side-effect of using <project> is that we will be
+ // using the bpkg package name instead of the main Debian package name.
+ // But perhaps that's correct: on Debian it's usually the source package
+ // name, which is the same. To keep things consistent we use the bpkg
+ // package name for <private> as well.
+ //
+ // Note that some libraries have what looks like architecture-specific
+ // configuration files in /usr/include/$(DEB_HOST_MULTIARCH)/ which is
+ // what we use for our config.install.include_arch location.
+ //
+ // Note: we need to quote values that contain `$` so that they don't get
+ // expanded as build2 variables in the installed_entries() call.
+ //
+ // NOTE: make sure to update .install files below if changing anything
+ // here.
+ //
+ strings config {
+ "config.install.root=/usr/",
+ "config.install.data_root=root/",
+ "config.install.exec_root=root/",
+
+ "config.install.bin=exec_root/bin/",
+ "config.install.sbin=exec_root/sbin/",
+
+ // On Debian shared libraries should not be executable. Also,
+ // libexec/ is the same as lib/ (note that executables that get
+ // installed there will still have the executable bit set).
+ //
+ "config.install.lib='exec_root/lib/$(DEB_HOST_MULTIARCH)/<private>/'",
+ "config.install.lib.mode=644",
+ "config.install.libexec=lib/<project>/",
+ "config.install.pkgconfig=lib/pkgconfig/",
+
+ "config.install.etc=/etc/",
+ "config.install.include=data_root/include/<private>/",
+ "config.install.include_arch='data_root/include/$(DEB_HOST_MULTIARCH)/<private>/'",
+ "config.install.share=data_root/share/",
+ "config.install.data=share/<private>/<project>/",
+ "config.install.buildfile=share/build2/export/<project>/",
+
+ "config.install.doc=share/doc/<private>/<project>/",
+ "config.install.legal=doc/",
+ "config.install.man=share/man/",
+ "config.install.man1=man/man1/",
+ "config.install.man2=man/man2/",
+ "config.install.man3=man/man3/",
+ "config.install.man4=man/man4/",
+ "config.install.man5=man/man5/",
+ "config.install.man6=man/man6/",
+ "config.install.man7=man/man7/",
+ "config.install.man8=man/man8/"};
+
+ config.push_back ("config.install.private=" +
+ (priv ? pn.string () : "[null]"));
+
+ // Add user-specified configuration variables last to allow them to
+ // override anything.
+ //
+ for (const string& v: vars)
+ config.push_back (v);
+
+ // Note that we can use weak install scope for the auto recursive mode
+ // since we know dependencies cannot be spread over multiple linked
+ // configurations.
+ //
+ string scope (!recursive_full || *recursive_full ? "project" : "weak");
+
+ // Get the map of files that will end up in the binary packages.
+ //
+ // Note that we are passing quoted values with $(DEB_HOST_MULTIARCH) which
+ // will be treated literally.
+ //
+ installed_entry_map ies (installed_entries (*ops_, pkgs, config, scope));
+
+ if (ies.empty ())
+ fail << "specified package(s) do not install any files";
+
+ if (verb >= 4)
+ {
+ for (const auto& p: ies)
+ {
+ diag_record dr (trace);
+ dr << "installed entry: " << p.first;
+
+ if (p.second.target != nullptr)
+ dr << " -> " << p.second.target->first; // Symlink.
+ else
+ dr << ' ' << p.second.mode;
+ }
+ }
+
+ // Installed entry directories for sorting out which files belong where.
+ //
+ // Let's tighten things up and only look in <private>/ (if specified) to
+ // make sure there is nothing stray.
+ //
+ string pd (priv ? pn.string () + '/' : "");
+
+ // NOTE: keep consistent with the config.install.* values above.
+ //
+ // We put exported buildfiles into the main package, which makes sense
+ // after some meditation: they normally contain rules and are bundled
+ // either with a tool (say, thrift), a module (say, libbuild2-thrift), or
+ // an add-on package (say, thrift-build2).
+ //
+ dir_path bindir ("/usr/bin/");
+ dir_path sbindir ("/usr/sbin/");
+ dir_path etcdir ("/etc/");
+ dir_path incdir ("/usr/include/" + pd);
+ dir_path incarchdir ("/usr/include/$(DEB_HOST_MULTIARCH)/" + pd);
+ //dir_path bfdir ("/usr/share/build2/export/");
+ dir_path libdir ("/usr/lib/$(DEB_HOST_MULTIARCH)/" + pd);
+ dir_path pkgdir (libdir / dir_path ("pkgconfig"));
+ dir_path sharedir ("/usr/share/" + pd);
+ dir_path docdir ("/usr/share/doc/" + pd);
+ dir_path mandir ("/usr/share/man/");
+
+ // As an optimization, don't generate the main and -dbgsym packages for a
+ // binless library unless it also specifies the -common package.
+ //
+ // If this is a binless library, then verify that it doesn't install any
+ // executable, library, or configuration files. Also verify that it has
+ // the -dev package.
+ //
+ bool binless (false);
+
+ if (lib)
+ {
+ assert (aps.size () == 1);
+
+ const shared_ptr<available_package>& ap (aps.front ().first);
+ strings sos (package_manifest::effective_type_sub_options (ap->type));
+
+ if (find (sos.begin (), sos.end (), "binless") != sos.end ())
+ {
+ // Verify installed files.
+ //
+ auto bad_install = [&pn, &pv] (const string& w)
+ {
+ fail << "binless library " << pn << ' ' << pv << " installs " << w;
+ };
+
+ auto verify_not_installed = [&ies, &bad_install] (const dir_path& d)
+ {
+ auto p (ies.find_sub (d));
+ if (p.first != p.second)
+ bad_install (p.first->first.string ());
+ };
+
+ verify_not_installed (bindir);
+ verify_not_installed (sbindir);
+
+ // It would probably be better not to fail here but generate the main
+ // package instead (as we do if the -common package is also being
+ // generated). Then, however, it would not be easy to detect if a
+ // dependency has the main package or not (see sdeps initialization
+ // for details).
+ //
+ verify_not_installed (etcdir);
+
+ for (auto p (ies.find_sub (libdir)); p.first != p.second; ++p.first)
+ {
+ const path& f (p.first->first);
+
+ if (!f.sub (pkgdir))
+ bad_install (f.string ());
+ }
+
+ // Verify packages.
+ //
+ if (st.dev.empty ())
+ fail << "binless library " << pn << ' ' << pv << " doesn't have "
+ << os_release.name_id << " -dev package";
+
+ binless = true;
+ }
+ }
+
+ bool gen_main (!binless || !st.common.empty ());
+
+ // If we don't generate the main package (and thus the -common package),
+ // then fail if there are any data files installed. It would probably be
+ // better not to fail but generate the main package instead in this
+ // case. Then, however, it would not be easy to detect if a dependency has
+ // the main package or not.
+ //
+ if (!gen_main)
+ {
+ // Note: covers bfdir.
+ //
+ for (auto p (ies.find_sub (sharedir)); p.first != p.second; ++p.first)
+ {
+ const path& f (p.first->first);
+
+ if (!f.sub (docdir) && !f.sub (mandir))
+ {
+ fail << "binless library " << pn << ' ' << pv << " installs " << f <<
+ info << "consider specifying -common package in explicit "
+ << os_release.name_id << " name mapping in package manifest";
+ }
+ }
+ }
+
+ if (verb >= 3)
+ {
+ auto print_status = [] (diag_record& dr,
+ const package_status& s,
+ const string& main)
+ {
+ dr << (main.empty () ? "" : " ") << main
+ << (s.dev.empty () ? "" : " ") << s.dev
+ << (s.doc.empty () ? "" : " ") << s.doc
+ << (s.dbg.empty () ? "" : " ") << s.dbg
+ << (s.common.empty () ? "" : " ") << s.common
+ << ' ' << s.system_version;
+ };
+
+ {
+ diag_record dr (trace);
+ dr << "package:";
+ print_status (dr, st, gen_main ? st.main : empty_string);
+ }
+
+ for (const package_status& st: sdeps)
+ {
+ diag_record dr (trace);
+ dr << "dependency:";
+ print_status (dr, st, st.main);
+ }
+ }
+
+ // Start assembling the package "source" directory.
+ //
+ // It's hard to predict all the files that will be generated (and
+ // potentially read), so we will just require a clean output directory.
+ //
+ // Also, by default, we are going to keep all the intermediate files on
+ // failure for troubleshooting.
+ //
+ if (first && exists (out) && !empty (out))
+ {
+ if (!ops_->wipe_output ())
+ fail << "output root directory " << out << " is not empty" <<
+ info << "use --wipe-output to clean it up but be careful";
+
+ rm_r (out, false);
+ }
+
+ // Normally the source directory is called <name>-<upstream-version>
+ // (e.g., as unpacked from the source archive).
+ //
+ dir_path src (out / dir_path (pn.string () + '-' + pvs));
+ dir_path deb (src / dir_path ("debian"));
+ mk_p (deb);
+
+ // The control file.
+ //
+ // See the "Control files and their fields" chapter in the Debian Policy
+ // Manual for details (for example, which fields are mandatory).
+ //
+ // Note that we try to do a reasonably thorough job (e.g., filling in
+ // sections, etc) with the view that this can be used as a starting point
+ // for manual packaging (and perhaps we could add a mode for this in the
+ // future, call it "starting point" mode).
+ //
+ // Also note that this file supports variable substitutions (for example,
+ // ${binary:Version}) as described in deb-substvars(5). While we could do
+ // without, it is widely used in manual packages so we do the same. Note,
+ // however, that we don't use the shlibs:Depends/misc:Depends mechanism
+ // (which automatically detects dependencies) since we have an accurate
+ // set and some of them may not be system packages.
+ //
+ string homepage (pm.package_url ? pm.package_url->string () :
+ pm.url ? pm.url->string () :
+ string ());
+
+ string maintainer;
+ if (ops_->debian_maintainer_specified ())
+ maintainer = ops_->debian_maintainer ();
+ else
+ {
+ const email* e (pm.package_email ? &*pm.package_email :
+ pm.email ? &*pm.email :
+ nullptr);
+
+ if (e == nullptr)
+ fail << "unable to determine package maintainer from manifest" <<
+ info << "specify explicitly with --debian-maintainer";
+
+ // In certain places (e.g., changelog), Debian expect this to be in the
+ // `John Doe <john@example.org>` form while we often specify just the
+ // email address (e.g., to the mailing list). Try to detect such a case
+ // and complete it to the desired format.
+ //
+ if (e->find (' ') == string::npos && e->find ('@') != string::npos)
+ {
+ // Try to use comment as name, if any.
+ //
+ if (!e->comment.empty ())
+ maintainer = e->comment;
+ else
+ maintainer = pn.string () + " package maintainer";
+
+ maintainer += " <" + *e + '>';
+ }
+ else
+ maintainer = *e;
+ }
+
+ path ctrl (deb / "control");
+ try
+ {
+ ofdstream os (ctrl);
+
+ // First comes the general (source package) stanza.
+ //
+ // Note that the Priority semantics is not the same as our priority.
+ // Rather it should reflect the overall importance of the package. Our
+ // priority is more appropriately mapped to urgency in the changelog.
+ //
+ // If this is not a library, then by default we assume its some kind of
+ // a development tool and use the devel section.
+ //
+ // Note also that we require the debhelper compatibility level 13 which
+ // has more advanced features that we rely on. Such as:
+ //
+ // - Variable substitutions in the debhelper config files.
+ //
+ string section (
+ ops_->debian_section_specified () ? ops_->debian_section () :
+ lib ? "libs" :
+ "devel");
+
+ string priority (
+ ops_->debian_priority_specified () ? ops_->debian_priority () :
+ "optional");
+
+ os << "Source: " << pn << '\n'
+ << "Section: " << section << '\n'
+ << "Priority: " << priority << '\n'
+ << "Maintainer: " << maintainer << '\n'
+ << "Standards-Version: " << "4.6.2" << '\n'
+ << "Build-Depends: " << "debhelper-compat (= 13)" << '\n'
+ << "Rules-Requires-Root: " << "no" << '\n';
+ if (!homepage.empty ())
+ os << "Homepage: " << homepage << '\n';
+ if (pm.src_url)
+ os << "Vcs-Browser: " << pm.src_url->string () << '\n';
+
+ // Then we have one or more binary package stanzas.
+ //
+ // Note that values from the source package stanza (such as Section,
+ // Priority) are used as defaults for the binary packages.
+ //
+ // We cannot easily detect architecture-independent packages (think
+ // libbutl.bash) and providing an option feels like the best we can do.
+ // Note that the value `any` means architecture-dependent while `all`
+ // means architecture-independent.
+ //
+ // The Multi-Arch hint can be `same` or `foreign`. The former means that
+ // a separate copy of the package may be installed for each architecture
+ // (e.g., library) while the latter -- that a single copy may be used by
+ // all architectures (e.g., executable, -doc, -common). Note that for
+ // some murky reasons Multi-Arch:foreign needs to be explicitly
+ // specified for Architecture:all.
+ //
+ // The Description field is quite messy: it requires both the short
+ // description (our summary) as a first line and a long description (our
+ // description) as the following lines in the multiline format.
+ // Converting our description to the Debian format is not going to be
+ // easy: it can be arbitrarily long and may not even be plain text (it's
+ // commonly the contents of the README.md file). So for now we fake it
+ // with a description of the package component. Note also that
+ // traditionally the Description field comes last.
+ //
+ string arch (ops_->debian_architecture_specified ()
+ ? ops_->debian_architecture ()
+ : "any");
+
+ string march (arch == "all" || !lib ? "foreign" : "same");
+
+ if (gen_main)
+ {
+ string depends;
+
+ auto add_depends = [&depends] (const string& v)
+ {
+ if (!depends.empty ())
+ depends += ", ";
+
+ depends += v;
+ };
+
+ if (!st.common.empty ())
+ add_depends (st.common + " (= ${binary:Version})");
+
+ for (const package_status& st: sdeps)
+ {
+ // Note that the constraints will include build metadata (e.g.,
+ // ~debian10). While it may be tempting to strip it, we cannot since
+ // the order is inverse. We could just make it empty `~`, though
+ // that will look a bit strange. But keeping it shouldn't cause any
+ // issues. Also note that the build metadata is part of the revision
+ // so we could strip the whole thing.
+ //
+ if (!st.main.empty ())
+ add_depends (st.main + " (>= " + st.system_version + ')');
+ }
+
+ if (ops_->debian_main_langdep_specified ())
+ {
+ if (!ops_->debian_main_langdep ().empty ())
+ {
+ if (!depends.empty ())
+ depends += ", ";
+
+ depends += ops_->debian_main_langdep ();
+ }
+ }
+ else
+ {
+ // Note that we are not going to add dependencies on libcN
+ // (currently libc6) or libstdc++N (currently libstdc++6) because
+ // it's not easy to determine N and they both are normally part of
+ // the base system.
+ //
+ // What about other language runtimes? Well, it doesn't seem like we
+ // can deduce those automatically so we will either have to add ad
+ // hoc support or the user will have to provide them manually with
+ // --debian-main-depends.
+ }
+
+ if (!ops_->debian_main_extradep ().empty ())
+ {
+ if (!depends.empty ())
+ depends += ", ";
+
+ depends += ops_->debian_main_extradep ();
+ }
+
+ os << '\n'
+ << "Package: " << st.main << '\n'
+ << "Architecture: " << arch << '\n'
+ << "Multi-Arch: " << march << '\n';
+ if (!depends.empty ())
+ os << "Depends: " << depends << '\n';
+ os << "Description: " << pm.summary << '\n'
+ << " This package contains the runtime files." << '\n';
+ }
+
+ if (!st.dev.empty ())
+ {
+ string depends (gen_main ? st.main + " (= ${binary:Version})" : "");
+
+ auto add_depends = [&depends] (const string& v)
+ {
+ if (!depends.empty ())
+ depends += ", ";
+
+ depends += v;
+ };
+
+ for (const package_status& st: sdeps)
+ {
+ // Doesn't look like we can distinguish between interface and
+ // implementation dependencies here. So better to over- than
+ // under-specify.
+ //
+ if (!st.dev.empty ())
+ add_depends (st.dev + " (>= " + st.system_version + ')');
+ }
+
+ if (ops_->debian_dev_langdep_specified ())
+ {
+ if (!ops_->debian_dev_langdep ().empty ())
+ {
+ add_depends (ops_->debian_dev_langdep ());
+ }
+ }
+ else
+ {
+ // Add dependency on libcN-dev and libstdc++-N-dev.
+ //
+ // Note: libcN-dev provides libc-dev and libstdc++N-dev provides
+ // libstdc++-dev. While it would be better to depend on the exact
+ // versions, determining N is not easy (and in case of listdc++
+ // there could be multiple installed at the same time).
+ //
+ // Note that we haven't seen just libc-dev in any native packages,
+ // it's always either libc6-dev or libc6-dev|libc-dev. So we will
+ // see how it goes.
+ //
+ // If this is an undetermined C-common library, we assume it may be
+ // C++ (better to over- than under-specify).
+ //
+ bool cc (lang ("cc", true));
+ if (cc || (cc = lang ("c++", true))) add_depends ("libstdc++-dev");
+ if (cc || (cc = lang ("c", true))) add_depends ("libc-dev");
+ }
+
+ if (!ops_->debian_dev_extradep ().empty ())
+ {
+ add_depends (ops_->debian_dev_extradep ());
+ }
+
+ // Feels like the architecture should be the same as for the main
+ // package.
+ //
+ os << '\n'
+ << "Package: " << st.dev << '\n'
+ << "Section: " << (lib ? "libdevel" : "devel") << '\n'
+ << "Architecture: " << arch << '\n'
+ << "Multi-Arch: " << march << '\n';
+ if (!st.doc.empty ())
+ os << "Suggests: " << st.doc << '\n';
+ if (!depends.empty ())
+ os << "Depends: " << depends << '\n';
+ os << "Description: " << pm.summary << '\n'
+ << " This package contains the development files." << '\n';
+ }
+
+ if (!st.doc.empty ())
+ {
+ os << '\n'
+ << "Package: " << st.doc << '\n'
+ << "Section: " << "doc" << '\n'
+ << "Architecture: " << "all" << '\n'
+ << "Multi-Arch: " << "foreign" << '\n'
+ << "Description: " << pm.summary << '\n'
+ << " This package contains the documentation." << '\n';
+ }
+
+ // Keep this in case we want to support it in the "starting point" mode.
+ //
+ if (!st.dbg.empty () && !binless)
+ {
+ string depends (st.main + " (= ${binary:Version})");
+
+ os << '\n'
+ << "Package: " << st.dbg << '\n'
+ << "Section: " << "debug" << '\n'
+ << "Priority: " << "extra" << '\n'
+ << "Architecture: " << arch << '\n'
+ << "Multi-Arch: " << march << '\n';
+ if (!depends.empty ())
+ os << "Depends: " << depends << '\n';
+ os << "Description: " << pm.summary << '\n'
+ << " This package contains the debugging information." << '\n';
+ }
+
+ if (!st.common.empty ())
+ {
+ // Generally, this package is not necessarily architecture-independent
+ // (for example, it could contain something shared between multiple
+ // binary packages produced from the same source package rather than
+ // something shared between all the architectures of a binary
+ // package). But seeing that we always generate one binary package,
+ // for us it only makes sense as architecture-independent.
+ //
+ // It's also not clear what dependencies we can deduce for this
+ // package. Assuming that it depends on all the dependency -common
+ // packages is probably unreasonable.
+ //
+ os << '\n'
+ << "Package: " << st.common << '\n'
+ << "Architecture: " << "all" << '\n'
+ << "Multi-Arch: " << "foreign" << '\n'
+ << "Description: " << pm.summary << '\n'
+ << " This package contains the architecture-independent files." << '\n';
+ }
+
+ os.close ();
+ }
+ catch (const io_error& e)
+ {
+ fail << "unable to write to " << ctrl << ": " << e;
+ }
+
+ // The changelog file.
+ //
+ // See the "Debian changelog" section in the Debian Policy Manual for
+ // details.
+ //
+ // In particular, this is the sole source of the package version.
+ //
+ timestamp now (system_clock::now ());
+
+ path chlog (deb / "changelog");
+ try
+ {
+ ofdstream os (chlog);
+
+ // The first line has the following format:
+ //
+ // <src-package> (<version>) <distribution>; urgency=<urgency>
+ //
+ // Note that <distribution> doesn't end up in the binary package.
+ // Normally all Debian packages start in unstable or experimental.
+ //
+ string urgency;
+ switch (pm.priority ? pm.priority->value : priority::low)
+ {
+ case priority::low: urgency = "low"; break;
+ case priority::medium: urgency = "medium"; break;
+ case priority::high: urgency = "high"; break;
+ case priority::security: urgency = "critical"; break;
+ }
+
+ os << pn << " (" << st.system_version << ") "
+ << (pv.release ? "experimental" : "unstable") << "; "
+ << "urgency=" << urgency << '\n';
+
+ // Next we have a bunch of "change details" lines that start with `*`
+ // indented with two spaces. They are traditionally seperated from the
+ // first and last lines with blank lines.
+ //
+ os << '\n'
+ << " * New bpkg package release " << pvs << '.' << '\n'
+ << '\n';
+
+ // The last line is the "maintainer signoff" and has the following
+ // form:
+ //
+ // -- <name> <email> <date>
+ //
+ // The <date> component shall have the following form in the English
+ // locale (Mon, Jan, etc):
+ //
+ // <day-of-week>, <dd> <month> <yyyy> <hh>:<mm>:<ss> +<zzzz>
+ //
+ os << " -- " << maintainer << " ";
+ std::locale l (os.imbue (std::locale ("C")));
+ to_stream (os,
+ now,
+ "%a, %d %b %Y %T %z",
+ false /* special */,
+ true /* local */);
+ os.imbue (l);
+ os << '\n';
+
+ os.close ();
+ }
+ catch (const io_error& e)
+ {
+ fail << "unable to write to " << chlog << ": " << e;
+ }
+
+ // The copyright file.
+ //
+ // See the "Machine-readable debian/copyright file" document for
+ // details.
+ //
+ // Note that while not entirely clear, it looks like there should be at
+ // least one Files stanza.
+ //
+ // Note also that there is currently no way for us to get accurate
+ // copyright information.
+ //
+ // @@ TODO: Strictly speaking, in the recursive mode, we should collect
+ // licenses of all the dependencies we are bundling.
+ //
+ path copyr (deb / "copyright");
+ try
+ {
+ ofdstream os (copyr);
+
+ string license;
+ for (const licenses& ls: pm.license_alternatives)
+ {
+ if (!license.empty ())
+ license += " or ";
+
+ for (auto b (ls.begin ()), i (b); i != ls.end (); ++i)
+ {
+ if (i != b)
+ license += " and ";
+
+ license += *i;
+ }
+ }
+
+ os << "Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/" << '\n'
+ << "Upstream-Name: " << pn << '\n'
+ << "Upstream-Contact: " << maintainer << '\n';
+ if (!homepage.empty ())
+ os << "Source: " << homepage << '\n';
+ os << "License: " << license << '\n'
+ << "Comment: See accompanying files for exact copyright information" << '\n'
+ << " and full license text(s)." << '\n';
+
+ // Note that for licenses mentioned in the Files stanza we either have
+ // to provide the license text(s) inline or as separate License stanzas.
+ //
+ os << '\n'
+ << "Files: *" << '\n'
+ << "Copyright: ";
+ to_stream (os, now, "%Y", false /* special */, true /* local */);
+ os << " the " << pn << " authors (see accompanying files for details)" << '\n'
+ << "License: " << license << '\n'
+ << " See accompanying files for full license text(s)." << '\n';
+
+ os.close ();
+ }
+ catch (const io_error& e)
+ {
+ fail << "unable to write to " << copyr << ": " << e;
+ }
+
+ // The source/format file.
+ //
+ dir_path deb_src (deb / dir_path ("source"));
+ mk (deb_src);
+
+ path format (deb_src / "format");
+ try
+ {
+ ofdstream os (format);
+ os << "3.0 (quilt)\n";
+ os.close ();
+ }
+ catch (const io_error& e)
+ {
+ fail << "unable to write to " << format << ": " << e;
+ }
+
+ // The rules makefile. Note that it must be executable.
+ //
+ // This file is executed by dpkg-buildpackage(1) which expects it to
+ // provide the following "API" make targets:
+ //
+ // clean
+ //
+ // build -- configure and build for all package
+ // build-arch -- configure and build for Architecture:any packages
+ // build-indep -- configure and build for Architecture:all packages
+ //
+ // binary -- make all binary packages
+ // binary-arch -- make Architecture:any binary packages
+ // binary-indep -- make Architecture:all binary packages
+ //
+ // The dh command sequencer provides the standard implementation of these
+ // API targets with the following customization point targets (for an
+ // overview of dh, start with the slides from the "Not Your Grandpa's
+ // Debhelper" presentation at DebConf 9 followed by the dh(1) man page):
+ //
+ // override_dh_auto_configure # ./configure --prefix=/usr
+ // override_dh_auto_build # make
+ // override_dh_auto_test # make test
+ // override_dh_auto_install # make install
+ // override_dh_auto_clean # make distclean
+ //
+ // Note that pretty much any dh_xxx command invoked by dh in order to
+ // implement the API targets can be customized with the corresponding
+ // override_dh_xxx target. To see what commands are executed for an API
+ // target, run `dh <target> --no-act`.
+ //
+ path rules (deb / "rules");
+ try
+ {
+ bool lang_c (lang ("c"));
+ bool lang_cxx (lang ("c++"));
+ bool lang_cc (lang ("cc"));
+
+ // See fdopen() for details (umask, etc).
+ //
+ permissions ps (permissions::ru | permissions::wu | permissions::xu |
+ permissions::rg | permissions::wg | permissions::xg |
+ permissions::ro | permissions::wo | permissions::xo);
+ ofdstream os (fdopen (rules,
+ fdopen_mode::out | fdopen_mode::create,
+ ps));
+
+ os << "#!/usr/bin/make -f\n"
+ << "# -*- makefile -*-\n"
+ << '\n';
+
+ // See debhelper(7) for details on these.
+ //
+ // Note that there is also the DEB_BUILD_OPTIONS=terse option. Perhaps
+ // for the "starting point" mode we should base DH_* values as well as
+ // the build system verbosity below on that value. See debian/rules in
+ // upstream mariadb for what looks like a sensible setup.
+ //
+ if (verb == 0)
+ os << "export DH_QUIET := 1\n"
+ << '\n';
+ else if (verb == 1)
+ os << "# Uncomment this to turn on verbose mode.\n"
+ << "#export DH_VERBOSE := 1\n"
+ << '\n';
+ else
+ os << "export DH_VERBOSE := 1\n"
+ << '\n';
+
+ // We could have instead called dpkg-architecture directly but seeing
+ // that we are also include buildflags.mk below, might as well use
+ // architecture.mk (in the packages that we sampled you see both
+ // approaches). Note that these come in the dpkg-dev package, the same
+ // as dpkg-buildpackage.
+ //
+ os << "# DEB_HOST_* (DEB_HOST_MULTIARCH, etc)" << '\n'
+ << "#" << '\n'
+ << "include /usr/share/dpkg/architecture.mk" << '\n'
+ << '\n';
+
+ if (ops_->debian_buildflags () != "ignore")
+ {
+ // While we could have called dpkg-buildflags directly, including
+ // buildflags.mk instead appears to be the standard practice.
+ //
+ // Note that theses flags are not limited to C-based languages (for
+ // example, they also cover Assembler, Fortran, and potentially others
+ // in the future).
+ //
+ string mo; // Include leading space if not empty.
+ if (ops_->debian_maint_option_specified ())
+ {
+ for (const string& o: ops_->debian_maint_option ())
+ {
+ if (!o.empty ())
+ {
+ mo += ' ';
+ mo += o;
+ }
+ }
+ }
+ else
+ mo = " hardening=+all";
+
+ os << "# *FLAGS (CFLAGS, CXXFLAGS, etc)" << '\n'
+ << "#" << '\n'
+ << "export DEB_BUILD_MAINT_OPTIONS :=" << mo << '\n'
+ << "include /usr/share/dpkg/buildflags.mk" << '\n'
+ << '\n';
+
+ if (!binless)
+ {
+ // Fixup -ffile-prefix-map option (if specified) which is used to
+ // strip source file path prefix in debug information (besides other
+ // places). By default it points to the source directory. We change
+ // it to point to the bpkg configuration directory. Note that this
+ // won't work for external packages with source out of configuration
+ // (e.g., managed by bdep).
+ //
+ if (lang_c || lang_cc)
+ {
+ // @@ TODO: OBJCFLAGS.
+
+ os << "CFLAGS := $(patsubst -ffile-prefix-map=%,-ffile-prefix-map="
+ << cfg_dir.string () << "=.,$(CFLAGS))" << '\n'
+ << '\n';
+ }
+
+ if (lang_cxx || lang_cc)
+ {
+ // @@ TODO: OBJCXXFLAGS.
+
+ os << "CXXFLAGS := $(patsubst -ffile-prefix-map=%,-ffile-prefix-map="
+ << cfg_dir.string () << "=.,$(CXXFLAGS))" << '\n'
+ << '\n';
+ }
+ }
+ }
+
+ // For a binless library the -dbgsym package is not supposed to be
+ // generated. Thus, we disable its automatic generation by adding the
+ // noautodbgsym flag to the DEB_BUILD_OPTIONS variable.
+ //
+ // This doesn't seem to be necessary (probably because there is no
+ // .so/.a).
+ //
+#if 0
+ if (binless)
+ os << "export DEB_BUILD_OPTIONS += noautodbgsym" << '\n'
+ << '\n';
+#endif
+
+ // The debian/tmp/ subdirectory appears to be the canonical destination
+ // directory (see dh_auto_install(1) for details).
+ //
+ os << "DESTDIR := $(CURDIR)/debian/tmp" << '\n'
+ << '\n';
+
+ // Let's use absolute path to the build system driver in case we are
+ // invoked with altered environment or some such.
+ //
+ // See --jobs documentation in dpkg-buildpackage(1) for details on
+ // parallel=N.
+ //
+ // Note: should be consistent with the invocation in installed_entries()
+ // above.
+ //
+ cstrings verb_args; string verb_arg;
+ map_verb_b (*ops_, verb_b::normal, verb_args, verb_arg);
+
+ os << "b := " << search_b (*ops_).effect_string ();
+ for (const char* o: verb_args) os << ' ' << o;
+ for (const string& o: ops_->build_option ()) os << ' ' << o;
+ os << '\n'
+ << '\n'
+ << "parallel := $(filter parallel=%,$(DEB_BUILD_OPTIONS))" << '\n'
+ << "ifneq ($(parallel),)" << '\n'
+ << " parallel := $(patsubst parallel=%,%,$(parallel))" << '\n'
+ << " ifeq ($(parallel),1)" << '\n'
+ << " b += --serial-stop" << '\n'
+ << " else" << '\n'
+ << " b += --jobs=$(parallel)" << '\n'
+ << " endif" << '\n'
+ << "endif" << '\n'
+ << '\n';
+
+ // Configuration variables.
+ //
+ // Note: we need to quote values that contain `<>`, `[]`, since they
+ // will be passed through shell. For simplicity, let's just quote
+ // everything.
+ //
+ os << "config := config.install.chroot='$(DESTDIR)/'" << '\n'
+ << "config += config.install.sudo='[null]'" << '\n';
+
+ // If this is a C-based language, add rpath for private installation.
+ //
+ if (priv && (lang_c || lang_cxx || lang_cc))
+ os << "config += config.bin.rpath='/usr/lib/$(DEB_HOST_MULTIARCH)/"
+ << pn << "/'" << '\n';
+
+ // Add build flags.
+ //
+ if (ops_->debian_buildflags () != "ignore")
+ {
+ const string& m (ops_->debian_buildflags ());
+
+ string o (m == "assign" ? "=" :
+ m == "append" ? "+=" :
+ m == "prepend" ? "=+" : "");
+
+ if (o.empty ())
+ fail << "unknown --debian-buildflags option value '" << m << "'";
+
+ // Note that config.cc.* doesn't play well with the append/prepend
+ // modes because the orders are:
+ //
+ // x.poptions cc.poptions
+ // cc.coptions x.coptions
+ // cc.loptions x.loptions
+ //
+ // Oh, well, hopefully it will be close enough for most cases.
+ //
+ // Note also that there are compiler mode options that are not
+ // overridden.
+ //
+ if (o == "=" && (lang_c || lang_cxx || lang_cc))
+ {
+ os << "config += config.cc.poptions='[null]'" << '\n'
+ << "config += config.cc.coptions='[null]'" << '\n'
+ << "config += config.cc.loptions='[null]'" << '\n';
+ }
+
+ if (lang_c || lang_cc)
+ {
+ // @@ TODO: OBJCFLAGS (we currently don't have separate options).
+ // Also see -ffile-prefix-map fixup above.
+
+ os << "config += config.c.poptions" << o << "'$(CPPFLAGS)'" << '\n'
+ << "config += config.c.coptions" << o << "'$(CFLAGS)'" << '\n'
+ << "config += config.c.loptions" << o << "'$(LDFLAGS)'" << '\n';
+ }
+
+ if (lang_cxx || lang_cc)
+ {
+ // @@ TODO: OBJCXXFLAGS (we currently don't have separate options).
+ // Also see -ffile-prefix-map fixup above.
+
+ os << "config += config.cxx.poptions" << o << "'$(CPPFLAGS)'" << '\n'
+ << "config += config.cxx.coptions" << o << "'$(CXXFLAGS)'" << '\n'
+ << "config += config.cxx.loptions" << o << "'$(LDFLAGS)'" << '\n';
+ }
+
+ // @@ TODO: ASFLAGS (when we have assembler support).
+ }
+
+ // Keep last to allow user-specified configuration variables to override
+ // anything.
+ //
+ for (const string& c: config)
+ {
+ // Quote the value unless already quoted (see above). Presense of
+ // potentially-quoted user variables complicates things a bit (can
+ // be partially quoted, double-quoted, etc).
+ //
+ size_t p (c.find_first_of ("=+ \t")); // End of name.
+ if (p != string::npos)
+ {
+ p = c.find_first_not_of ("=+ \t", p); // Beginning of value.
+ if (p != string::npos)
+ {
+ if (c.find_first_of ("'\"", p) == string::npos) // Not quoted.
+ {
+ os << "config += " << string (c, 0, p) << '\''
+ << string (c, p) << "'\n";
+ continue;
+ }
+ }
+ }
+
+ os << "config += " << c << '\n';
+ }
+
+ os << '\n';
+
+ // List of packages we need to install.
+ //
+ for (auto b (pkgs.begin ()), i (b); i != pkgs.end (); ++i)
+ {
+ os << "packages" << (i == b ? " := " : " += ")
+ << i->out_root.representation () << '\n';
+ }
+ os << '\n';
+
+ // Default to the dh command sequencer.
+ //
+ // Note that passing --buildsystem=none doesn't seem to make any
+ // difference (other than add some noise).
+ //
+ os << "%:\n"
+ << '\t' << "dh $@" << '\n'
+ << '\n';
+
+ // Override dh_auto_configure.
+ //
+ os << "# Everything is already configured.\n"
+ << "#\n"
+ << "override_dh_auto_configure:\n"
+ << '\n';
+
+ // Override dh_auto_build.
+ //
+ os << "override_dh_auto_build:\n"
+ << '\t' << "$b $(config) update-for-install: $(packages)" << '\n'
+ << '\n';
+
+ // Override dh_auto_test.
+ //
+ // Note that running tests after update-for-install may cause rebuild
+ // (e.g., relinking without rpath, etc) before tests and again before
+ // install. So doesn't seem worth the trouble.
+ //
+ os << "# Assume any testing has already been done.\n"
+ << "#\n"
+ << "override_dh_auto_test:\n"
+ << '\n';
+
+ // Override dh_auto_install.
+ //
+ os << "override_dh_auto_install:\n"
+ << '\t' << "$b $(config) '!config.install.scope=" << scope << "' "
+ << "install: $(packages)" << '\n'
+ << '\n';
+
+ // Override dh_auto_clean.
+ //
+ os << "# This is not a real source directory so nothing to clean.\n"
+ << "#\n"
+ << "override_dh_auto_clean:\n"
+ << '\n';
+
+ // Override dh_shlibdeps.
+ //
+ // Failed that we get a warning about calculated ${shlibs:Depends} being
+ // unused.
+ //
+ // Note that there is also dh_makeshlibs which is invoked just before
+ // but we shouldn't override it because (quoting its man page): "It will
+ // also ensure that ldconfig is invoked during install and removal when
+ // it finds shared libraries."
+ //
+ os << "# Disable dh_shlibdeps since we don't use ${shlibs:Depends}.\n"
+ << "#\n"
+ << "override_dh_shlibdeps:\n"
+ << '\n';
+
+ os.close ();
+ }
+ catch (const io_error& e)
+ {
+ fail << "unable to write to " << rules << ": " << e;
+ }
+
+ // Generate the dh_install (.install) config files for each package in
+ // order to sort out which files belong where.
+ //
+ // For documentation of the config file format see debhelper(1) and
+ // dh_install(1). But the summary is:
+ //
+ // - Supports only simple wildcards (?, *, [...]; no recursive/**).
+ // - But can install whole directories recursively.
+ // - An entry that doesn't match anything is an error (say, /usr/sbin/*).
+ // - Supports variable substitutions (${...}; since compat level 13).
+ //
+ // Keep in mind that wherever there is <project> in the config.install.*
+ // variable, we can end up with multiple different directories (bundled
+ // packages).
+ //
+ path main_install;
+ path dev_install;
+ path doc_install;
+ path dbg_install;
+ path common_install;
+
+ const path* cur_install (nullptr); // File being opened/written to.
+ try
+ {
+ ofdstream main_os;
+ ofdstream dev_os;
+ ofdstream doc_os;
+ ofdstream dbg_os;
+ ofdstream com_os;
+
+ pair<path&, ofdstream&> main (main_install, main_os);
+ pair<path&, ofdstream&> dev (dev_install, dev_os);
+ pair<path&, ofdstream&> doc (doc_install, doc_os);
+ pair<path&, ofdstream&> dbg (dbg_install, dbg_os);
+ pair<path&, ofdstream&> com (common_install, com_os);
+
+ auto open = [&deb, &cur_install] (pair<path&, ofdstream&>& os,
+ const string& n)
+ {
+ if (!n.empty ())
+ {
+ cur_install = &(os.first = deb / (n + ".install"));
+ os.second.open (os.first);
+ }
+ };
+
+ open (main, gen_main ? st.main : empty_string);
+ open (dev, st.dev);
+ open (doc, st.doc);
+ open (dbg, st.dbg);
+ open (com, st.common);
+
+ auto is_open = [] (pair<path&, ofdstream&>& os)
+ {
+ return os.second.is_open ();
+ };
+
+ auto add = [&cur_install] (pair<path&, ofdstream&>& os, const path& p)
+ {
+ // Strip root.
+ //
+ string s (p.leaf (p.root_directory ()).string ());
+
+ // Replace () with {}.
+ //
+ for (char& c: s)
+ {
+ if (c == '(') c = '{';
+ if (c == ')') c = '}';
+ }
+
+ cur_install = &os.first;
+ os.second << s << '\n';
+ };
+
+ // The main package contains everything that doesn't go to another
+ // packages.
+ //
+ if (gen_main)
+ {
+ if (ies.contains_sub (bindir)) add (main, bindir / "*");
+ if (ies.contains_sub (sbindir)) add (main, sbindir / "*");
+
+ // This could potentially go to -common but it could also be target-
+ // specific, who knows. So let's keep it in main for now.
+ //
+ if (ies.contains_sub (etcdir)) add (main, etcdir / "*");
+ }
+
+ if (!is_open (dev))
+ {
+ assert (gen_main); // Shouldn't be here otherwise.
+
+ if (ies.contains_sub (incdir)) add (main, incdir / "*");
+ if (ies.contains_sub (incarchdir)) add (main, incarchdir / "*");
+ if (ies.contains_sub (libdir)) add (main, libdir / "*");
+ }
+ else
+ {
+ if (ies.contains_sub (incdir)) add (dev, incdir / "*");
+ if (ies.contains_sub (incarchdir)) add (dev, incarchdir / "*");
+
+ // Ok, time for things to get hairy: we need to split the contents
+ // of lib/ into the main and -dev packages. The -dev package should
+ // contain three things:
+ //
+ // 1. Static libraries (.a).
+ // 2. Non-versioned shared library symlinks (.so).
+ // 3. Contents of the pkgconfig/ subdirectory.
+ //
+ // Everything else should go into the main package. In particular, we
+ // assume any subdirectories other than pkgconfig/ are the libexec
+ // stuff or similar.
+ //
+ // The (2) case (shared library) is tricky. Here we can have three
+ // plausible arrangements:
+ //
+ // A. Portably-versioned library:
+ //
+ // libfoo-1.2.so
+ // libfoo.so -> libfoo-1.2.so
+ //
+ // B. Natively-versioned library:
+ //
+ // libfoo.so.1.2.3
+ // libfoo.so.1.2 -> libfoo.so.1.2.3
+ // libfoo.so.1 -> libfoo.so.1.2
+ // libfoo.so -> libfoo.so.1
+ //
+ // C. Non-versioned library:
+ //
+ // libfoo.so
+ //
+ // Note that in the (C) case the library should go into the main
+ // package. Based on this, the criteria appears to be straightforward:
+ // the extension is .so and it's a symlink. For good measure we also
+ // check that there is the `lib` prefix (plugins, etc).
+ //
+ for (auto p (ies.find_sub (libdir)); p.first != p.second; )
+ {
+ const path& f (p.first->first);
+ const installed_entry& ie ((p.first++)->second);
+
+ path l (f.leaf (libdir));
+
+ if (l.simple ())
+ {
+ assert (gen_main); // Shouldn't be here otherwise.
+
+ string e (l.extension ());
+ const string& n (l.string ());
+
+ bool d (n.size () > 3 && n.compare (0, 3, "lib") == 0 &&
+ ((e == "a" ) ||
+ (e == "so" && ie.target != nullptr)));
+
+ add (d ? dev : main, libdir / l);
+ }
+ else
+ {
+ // Let's keep things tidy and use a wildcard rather than listing
+ // all the entries in subdirectories verbatim.
+ //
+ dir_path d (libdir / dir_path (*l.begin ()));
+
+ // Can only be a subdirectory of pkgdir/ if the main package is
+ // not being generated.
+ //
+ assert (d == pkgdir || gen_main);
+
+ add (d == pkgdir ? dev : main, d / "*");
+
+ // Skip all the other entries in this subdirectory (in the prefix
+ // map they will all be in a contiguous range).
+ //
+ while (p.first != p.second && p.first->first.sub (d))
+ ++p.first;
+ }
+ }
+ }
+
+ // We cannot just do usr/share/* since it will clash with doc/ and man/
+ // below. So we have to list all the top-level entries in usr/share/
+ // that are not doc/ or man/.
+ //
+ if (gen_main)
+ {
+ // Note: covers bfdir.
+ //
+ for (auto p (ies.find_sub (sharedir)); p.first != p.second; )
+ {
+ const path& f ((p.first++)->first);
+
+ if (f.sub (docdir) || f.sub (mandir))
+ continue;
+
+ path l (f.leaf (sharedir));
+
+ if (l.simple ())
+ add (is_open (com) ? com : main, sharedir / l);
+ else
+ {
+ // Let's keep things tidy and use a wildcard rather than listing
+ // all the entries in subdirectories verbatim.
+ //
+ dir_path d (sharedir / dir_path (*l.begin ()));
+
+ add (is_open (com) ? com : main, d / "*");
+
+ // Skip all the other entries in this subdirectory (in the prefix
+ // map they will all be in a contiguous range).
+ //
+ while (p.first != p.second && p.first->first.sub (d))
+ ++p.first;
+ }
+ }
+ }
+
+ // Should we put the documentation into -common if there is no -doc?
+ // While there doesn't seem to be anything explicit in the policy, there
+ // are packages that do it this way (e.g., libao, libaudit). And the
+ // same logic seems to apply to -dev (e.g., zlib).
+ //
+ {
+ auto& os (is_open (doc) ? doc :
+ is_open (com) ? com :
+ is_open (dev) ? dev :
+ main);
+
+ // We can only add doc files to the main or -common packages if we
+ // generate the main package.
+ //
+ assert ((&os != &main && &os != &com) || gen_main);
+
+ if (ies.contains_sub (docdir)) add (os, docdir / "*");
+ if (ies.contains_sub (mandir)) add (os, mandir / "*");
+ }
+
+ // Close.
+ //
+ auto close = [&cur_install] (pair<path&, ofdstream&>& os)
+ {
+ if (os.second.is_open ())
+ {
+ cur_install = &os.first;
+ os.second.close ();
+ }
+ };
+
+ close (main);
+ close (dev);
+ close (doc);
+ close (dbg);
+ close (com);
+ }
+ catch (const io_error& e)
+ {
+ fail << "unable to write to " << *cur_install << ": " << e;
+ }
+
+ // Run dpkg-buildpackage.
+ //
+ // Note that there doesn't seem to be any way to control its verbosity or
+ // progress.
+ //
+ // Note also that dpkg-buildpackage causes recompilation on every run by
+ // changing the SOURCE_DATE_EPOCH environment variable (which we track for
+ // changes since it affects GCC). Note that since we don't have this
+ // SOURCE_DATE_EPOCH during dry-run caused by installed_entries(), there
+ // would be a recompilation even if the value weren't changing.
+ //
+ cstrings args {
+ "dpkg-buildpackage",
+ "--build=binary", // Only build binary packages.
+ "--no-sign", // Do not sign anything.
+ "--target-arch", arch.c_str ()};
+
+ // Pass our --jobs value, if any.
+ //
+ string jobs_arg;
+ if (size_t n = ops_->jobs_specified () ? ops_->jobs () : 0)
+ {
+ // Note: only accepts the --jobs=N form.
+ //
+ args.push_back ((jobs_arg = "--jobs=" + to_string (n)).c_str ());
+ }
+
+ // Pass any additional options specified by the user.
+ //
+ for (const string& o: ops_->debian_build_option ())
+ args.push_back (o.c_str ());
+
+ args.push_back (nullptr);
+
+ if (ops_->debian_prepare_only ())
+ {
+ if (verb >= 1)
+ {
+ diag_record dr (text);
+
+ dr << "prepared " << src <<
+ text << "command line: ";
+
+ print_process (dr, args);
+ }
+
+ return binary_files {};
+ }
+
+ try
+ {
+ process_path pp (process::path_search (args[0]));
+ process_env pe (pp, src /* cwd */);
+
+ // There is going to be quite a bit of diagnostics so print the command
+ // line unless quiet.
+ //
+ if (verb >= 1)
+ print_process (pe, args);
+
+ // Redirect stdout to stderr since half of dpkg-buildpackage diagnostics
+ // goes there. For good measure also redirect stdin to /dev/null to make
+ // sure there are no prompts of any kind.
+ //
+ process pr (pp,
+ args,
+ -2 /* stdin */,
+ 2 /* stdout */,
+ 2 /* stderr */,
+ pe.cwd->string ().c_str (),
+ pe.vars);
+
+ if (!pr.wait ())
+ {
+ // Let's repeat the command line even if it was printed at the
+ // beginning to save the user a rummage through the logs.
+ //
+ diag_record dr (fail);
+ dr << args[0] << " exited with non-zero code" <<
+ info << "command line: "; print_process (dr, pe, args);
+ }
+ }
+ catch (const process_error& e)
+ {
+ error << "unable to execute " << args[0] << ": " << e;
+
+ if (e.child)
+ exit (1);
+
+ throw failed ();
+ }
+
+ // Cleanup intermediate files unless requested not to.
+ //
+ if (!ops_->keep_output ())
+ {
+ rm_r (src);
+ }
+
+ // Collect and return the binary package paths.
+ //
+ binary_files r;
+ r.system_version = st.system_version;
+
+ auto add = [&out, &r] (const string& f,
+ const char* t,
+ const string& n,
+ bool opt = false)
+ {
+ path p (out / f);
+
+ if (exists (p))
+ r.push_back (binary_file {t, move (p), n});
+ else if (!opt)
+ fail << "expected output file " << f << " does not exist";
+ };
+
+ // The resulting .deb file names have the <name>_<version>_<arch>.deb
+ // form. If the package is architecture-independent, then <arch> is the
+ // special `all` value.
+ //
+ const string& v (st.system_version);
+ const string& a (arch);
+
+ if (gen_main)
+ add (st.main + '_' + v + '_' + a + ".deb", "main.deb", st.main);
+
+ if (!binless)
+ add (st.main + "-dbgsym_" + v + '_' + a + ".deb",
+ "dbgsym.deb",
+ st.main + "-dbgsym",
+ true);
+
+ if (!st.dev.empty ())
+ add (st.dev + '_' + v + '_' + a + ".deb", "dev.deb", st.dev);
+
+ if (!st.doc.empty ())
+ add (st.doc + '_' + v + "_all.deb", "doc.deb", st.doc);
+
+ if (!st.common.empty ())
+ add (st.common + '_' + v + "_all.deb", "common.deb", st.common);
+
+ // Besides the binary packages (.deb) we also get the .buildinfo and
+ // .changes files, which could be useful. Note that their names are based
+ // on the source package name.
+ //
+ add (pn.string () + '_' + v + '_' + a + ".buildinfo", "buildinfo", "");
+ add (pn.string () + '_' + v + '_' + a + ".changes", "changes", "");
+
+ return r;
+ }
+}
diff --git a/bpkg/system-package-manager-debian.hxx b/bpkg/system-package-manager-debian.hxx
new file mode 100644
index 0000000..336f7a7
--- /dev/null
+++ b/bpkg/system-package-manager-debian.hxx
@@ -0,0 +1,271 @@
+// file : bpkg/system-package-manager-debian.hxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#ifndef BPKG_SYSTEM_PACKAGE_MANAGER_DEBIAN_HXX
+#define BPKG_SYSTEM_PACKAGE_MANAGER_DEBIAN_HXX
+
+#include <map>
+
+#include <bpkg/types.hxx>
+#include <bpkg/utility.hxx>
+
+#include <bpkg/system-package-manager.hxx>
+
+namespace bpkg
+{
+ // The system package manager implementation for Debian and alike (Ubuntu,
+ // etc) using the apt frontend (specifically, apt-get and apt-cache) for
+ // consumption and the dpkg-buildpackage/debhelper/dh tooling for
+ // production.
+ //
+ // NOTE: THE BELOW DESCRIPTION IS ALSO REPRODUCED IN THE BPKG MANUAL.
+ //
+ // For background, a library in Debian is normally split up into several
+ // packages: the shared library package (e.g., libfoo1 where 1 is the ABI
+ // version), the development files package (e.g., libfoo-dev), the
+ // documentation files package (e.g., libfoo-doc), the debug symbols package
+ // (e.g., libfoo1-dbg), and the (usually) architecture-independent files
+ // (e.g., libfoo1-common). All the packages except -dev are optional and
+ // there is quite a bit of variability here. Here are a few examples:
+ //
+ // libsqlite3-0 libsqlite3-dev
+ //
+ // libssl1.1 libssl-dev libssl-doc
+ // libssl3 libssl-dev libssl-doc
+ //
+ // libcurl4 libcurl4-openssl-dev libcurl4-doc
+ // libcurl3-gnutls libcurl4-gnutls-dev libcurl4-doc (yes, 3 and 4)
+ //
+ // Note that while most library package names in Debian start with lib (per
+ // the policy), there are exceptions (e.g., zlib1g zlib1g-dev). The
+ // header-only library package names may or may not start with lib and end
+ // with -dev (e.g., libeigen3-dev, rapidjson-dev, catch2).
+ //
+ // Also note that manual -dbg packages are obsolete in favor of automatic
+ // -dbgsym packages from Debian 9. So while we support -dbg for consumption,
+ // we only generate -dbgsym.
+ //
+ // Based on that, it seems our best bet when trying to automatically map our
+ // library package name to Debian package names is to go for the -dev
+ // package first and figure out the shared library package from that based
+ // on the fact that the -dev package should have the == dependency on the
+ // shared library package with the same version and its name should normally
+ // start with the -dev package's stem.
+ //
+ // For executable packages there is normally no -dev packages but -dbg,
+ // -doc, and -common are plausible.
+ //
+ // The format of the debian-name (or alike) manifest value is a comma-
+ // separated list of one or more package groups:
+ //
+ // <package-group> [, <package-group>...]
+ //
+ // Where each <package-group> is the space-separated list of one or more
+ // package names:
+ //
+ // <package-name> [ <package-name>...]
+ //
+ // All the packages in the group should be "package components" (for the
+ // lack of a better term) of the same "logical package", such as -dev, -doc,
+ // -common packages. They normally have the same version.
+ //
+ // The first group is called the main group and the first package in the
+ // group is called the main package. Note that all the groups are consumed
+ // (installed) but only the main group is produced (packaged).
+ //
+ // We allow/recommend specifying the -dev package instead of the main
+ // package for libraries (the bpkg package name starts with lib), seeing
+ // that we are capable of detecting the main package automatically. If the
+ // library name happens to end with -dev (which poses an ambiguity), then
+ // the -dev package should be specified explicitly as the second package to
+ // disambiguate this situation (if a non-library name happened to start with
+ // lib and end with -dev, well, you are out of luck, I guess).
+ //
+ // Note also that for now we treat all the packages from the non-main groups
+ // as extras but in the future we may decide to sort them out like the main
+ // group (see parse_name_value() for details).
+ //
+ // The Debian package version has the [<epoch>:]<upstream>[-<revision>] form
+ // (see deb-version(5) for details). If no explicit mapping to the bpkg
+ // version is specified with the debian-to-downstream-version (or alike)
+ // manifest values or none match, then we fallback to using the <upstream>
+ // part as the bpkg version. If explicit mapping is specified, then we match
+ // it against the [<epoch>:]<upstream> parts ignoring <revision>.
+ //
+ struct system_package_status_debian: system_package_status
+ {
+ string main;
+ string dev;
+ string doc;
+ string dbg;
+ string common;
+ strings extras;
+
+ // The `apt-cache policy` output.
+ //
+ struct package_policy
+ {
+ string name;
+ string installed_version; // Empty if none.
+ string candidate_version; // Empty if none and no installed_version.
+
+ explicit
+ package_policy (string n): name (move (n)) {}
+ };
+
+ vector<package_policy> package_policies;
+ size_t package_policies_main = 0; // Size of the main group.
+
+ explicit
+ system_package_status_debian (string m, string d = {})
+ : main (move (m)), dev (move (d))
+ {
+ assert (!main.empty () || !dev.empty ());
+ }
+
+ system_package_status_debian () = default;
+ };
+
+ class system_package_manager_debian: public system_package_manager
+ {
+ public:
+ virtual optional<const system_package_status*>
+ status (const package_name&, const available_packages*) override;
+
+ virtual void
+ install (const vector<package_name>&) override;
+
+ virtual binary_files
+ generate (const packages&,
+ const packages&,
+ const strings&,
+ const dir_path&,
+ const package_manifest&,
+ const string&,
+ const small_vector<language, 1>&,
+ optional<bool>,
+ bool) override;
+
+ public:
+ // Expect os_release::name_id to be "debian" or os_release::like_ids to
+ // contain "debian".
+ //
+ // @@ TODO: we currently don't handle non-host arch in consumption.
+ //
+ system_package_manager_debian (bpkg::os_release&& osr,
+ const target_triplet& h,
+ string a,
+ optional<bool> progress,
+ optional<size_t> fetch_timeout,
+ bool install,
+ bool fetch,
+ bool yes,
+ string sudo)
+ : system_package_manager (move (osr),
+ h,
+ a.empty () ? arch_from_target (h) : move (a),
+ progress,
+ fetch_timeout,
+ install,
+ fetch,
+ yes,
+ move (sudo)) {}
+
+ // Note: options can only be NULL when testing functions that don't need
+ // them.
+ //
+ system_package_manager_debian (bpkg::os_release&& osr,
+ const target_triplet& h,
+ string a,
+ optional<bool> progress,
+ const pkg_bindist_options* ops)
+ : system_package_manager (move (osr),
+ h,
+ a.empty () ? arch_from_target (h) : move (a),
+ progress),
+ ops_ (ops) {}
+
+ // Implementation details exposed for testing (see definitions for
+ // documentation).
+ //
+ public:
+ using package_status = system_package_status_debian;
+ using package_policy = package_status::package_policy;
+
+ void
+ apt_cache_policy (vector<package_policy>&, size_t = 0);
+
+ string
+ apt_cache_show (const string&, const string&);
+
+ void
+ apt_get_update ();
+
+ void
+ apt_get_install (const strings&);
+
+ pair<cstrings, const process_path&>
+ apt_get_common (const char*, strings& args_storage);
+
+ static package_status
+ parse_name_value (const string&, const string&, bool, bool);
+
+ static string
+ main_from_dev (const string&, const string&, const string&);
+
+ static string
+ arch_from_target (const target_triplet&);
+
+ package_status
+ map_package (const package_name&,
+ const version&,
+ const available_packages&,
+ const optional<string>&) const;
+
+ // If simulate is not NULL, then instead of executing the actual apt-cache
+ // and apt-get commands simulate their execution: (1) for apt-cache by
+ // printing their command lines and reading the results from files
+ // specified in the below apt_cache_* maps and (2) for apt-get by printing
+ // their command lines and failing if requested.
+ //
+ // In the (1) case if the corresponding map entry does not exist or the
+ // path is empty, then act as if the specified package/version is
+ // unknown. If the path is special "-" then read from stdin. For apt-cache
+ // different post-fetch and (for policy) post-install results can be
+ // specified (if the result is not found in one of the later maps, the
+ // previous map is used as a fallback). Note that the keys in the
+ // apt_cache_policy_* maps are the package sets and the corresponding
+ // result file is expected to contain (or not) the results for all of
+ // them. See apt_cache_policy() and apt_cache_show() implementations for
+ // details on the expected results.
+ //
+ struct simulation
+ {
+ std::map<strings, path> apt_cache_policy_;
+ std::map<strings, path> apt_cache_policy_fetched_;
+ std::map<strings, path> apt_cache_policy_installed_;
+
+ std::map<pair<string, string>, path> apt_cache_show_;
+ std::map<pair<string, string>, path> apt_cache_show_fetched_;
+
+ bool apt_get_update_fail_ = false;
+ bool apt_get_install_fail_ = false;
+ };
+
+ const simulation* simulate_ = nullptr;
+
+ private:
+ optional<system_package_status_debian>
+ status (const package_name&, const available_packages&);
+
+ private:
+ bool fetched_ = false; // True if already fetched metadata.
+ bool installed_ = false; // True if already installed.
+
+ std::map<package_name, optional<system_package_status_debian>> status_cache_;
+
+ const pkg_bindist_options* ops_ = nullptr; // Only for production.
+ };
+}
+
+#endif // BPKG_SYSTEM_PACKAGE_MANAGER_DEBIAN_HXX
diff --git a/bpkg/system-package-manager-debian.test.cxx b/bpkg/system-package-manager-debian.test.cxx
new file mode 100644
index 0000000..c85b231
--- /dev/null
+++ b/bpkg/system-package-manager-debian.test.cxx
@@ -0,0 +1,386 @@
+// file : bpkg/system-package-manager-debian.test.cxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#include <bpkg/system-package-manager-debian.hxx>
+
+#include <map>
+#include <iostream>
+
+#include <bpkg/types.hxx>
+#include <bpkg/utility.hxx>
+
+#undef NDEBUG
+#include <cassert>
+
+#include <bpkg/system-package-manager.test.hxx>
+
+using namespace std;
+
+namespace bpkg
+{
+ using package_status = system_package_status_debian;
+ using package_policy = package_status::package_policy;
+
+ using butl::manifest_parser;
+ using butl::manifest_parsing;
+
+ // Usage: args[0] <command> ...
+ //
+ // Where <command> is one of:
+ //
+ // apt-cache-policy <pkg>... result comes from stdin
+ //
+ // apt-cache-show <pkg> <ver> result comes from stdin
+ //
+ // parse-name-value <pkg> debian-name value from stdin
+ //
+ // main-from-dev <dev-pkg> <dev-ver> depends comes from stdin
+ //
+ // map-package [<build-metadata>] manifest comes from stdin
+ //
+ // build <query-pkg>... [--install [--no-fetch] <install-pkg>...]
+ //
+ // The stdin of the build command is used to read the simulation description
+ // which consists of lines in the following forms (blanks are ignored):
+ //
+ // manifest: <query-pkg> <file>
+ //
+ // Available package manifest for one of <query-pkg>. If none is
+ // specified, then a stub is automatically added.
+ //
+ // apt-cache-policy[-{fetched,installed}]: <sys-pkg>... <file>
+ //
+ // Values for simulation::apt_cache_policy_*. If <file> is the special `!`
+ // value, then make the entry empty.
+ //
+ // apt-cache-show[-fetched]: <sys-pkg> <sys-ver> <file>
+ //
+ // Values for simulation::apt_cache_show_*. If <file> is the special `!`
+ // value, then make the entry empty.
+ //
+ // apt-get-update-fail: true
+ // apt-get-install-fail: true
+ //
+ // Values for simulation::apt_get_{update,install}_fail_.
+ //
+ int
+ main (int argc, char* argv[])
+ try
+ {
+ assert (argc >= 2); // <command>
+
+ string cmd (argv[1]);
+
+ // @@ TODO: add option to customize? Maybe option before command?
+ //
+ os_release osr {"debian", {}, "10", "", "Debian", "", ""};
+
+ if (cmd == "apt-cache-policy")
+ {
+ assert (argc >= 3); // <pkg>...
+
+ strings key;
+ vector<package_policy> pps;
+ for (int i (2); i != argc; ++i)
+ {
+ key.push_back (argv[i]);
+ pps.push_back (package_policy (argv[i]));
+ }
+
+ system_package_manager_debian::simulation s;
+ s.apt_cache_policy_.emplace (move (key), path ("-"));
+
+ system_package_manager_debian m (move (osr),
+ host_triplet,
+ "" /* arch */,
+ nullopt /* progress */,
+ nullopt /* fetch_timeout */,
+ false /* install */,
+ false /* fetch */,
+ false /* yes */,
+ "sudo");
+ m.simulate_ = &s;
+
+ m.apt_cache_policy (pps);
+
+ for (const package_policy& pp: pps)
+ {
+ cout << pp.name << " '"
+ << pp.installed_version << "' '"
+ << pp.candidate_version << "'\n";
+ }
+ }
+ else if (cmd == "apt-cache-show")
+ {
+ assert (argc == 4); // <pkg> <ver>
+
+ pair<string, string> key (argv[2], argv[3]);
+
+ system_package_manager_debian::simulation s;
+ s.apt_cache_show_.emplace (key, path ("-"));
+
+ system_package_manager_debian m (move (osr),
+ host_triplet,
+ "" /* arch */,
+ nullopt /* progress */,
+ nullopt /* fetch_timeout */,
+ false /* install */,
+ false /* fetch */,
+ false /* yes */,
+ "sudo");
+ m.simulate_ = &s;
+
+ cout << m.apt_cache_show (key.first, key.second) << '\n';
+ }
+ else if (cmd == "parse-name-value")
+ {
+ assert (argc == 3); // <pkg>
+
+ package_name pn (argv[2]);
+ string pt (package_manifest::effective_type (nullopt, pn));
+
+ string v;
+ getline (cin, v);
+
+ package_status s (
+ system_package_manager_debian::parse_name_value (pt, v, false, false));
+
+ if (!s.main.empty ()) cout << "main: " << s.main << '\n';
+ if (!s.dev.empty ()) cout << "dev: " << s.dev << '\n';
+ if (!s.doc.empty ()) cout << "doc: " << s.doc << '\n';
+ if (!s.dbg.empty ()) cout << "dbg: " << s.dbg << '\n';
+ if (!s.common.empty ()) cout << "common: " << s.common << '\n';
+ if (!s.extras.empty ())
+ {
+ cout << "extras:";
+ for (const string& e: s.extras)
+ cout << ' ' << e;
+ cout << '\n';
+ }
+ }
+ else if (cmd == "main-from-dev")
+ {
+ assert (argc == 4); // <dev-pkg> <dev-ver>
+
+ string n (argv[2]);
+ string v (argv[3]);
+ string d;
+ getline (cin, d);
+
+ cout << system_package_manager_debian::main_from_dev (n, v, d) << '\n';
+ }
+ else if (cmd == "map-package")
+ {
+ assert (argc >= 2 && argc <= 3); // [<build-metadata>]
+
+ optional<string> bm;
+ if (argc > 2)
+ bm = argv[2];
+
+ available_packages aps;
+ aps.push_back (make_available_from_manifest ("", "-"));
+
+ const package_name& n (aps.front ().first->id.name);
+ const version& v (aps.front ().first->version);
+
+ system_package_manager_debian m (move (osr),
+ host_triplet,
+ "" /* arch */,
+ nullopt /* progress */,
+ nullptr /* options */);
+
+ package_status s (m.map_package (n, v, aps, bm));
+
+ cout << "version: " << s.system_version << '\n'
+ << "main: " << s.main << '\n';
+ if (!s.dev.empty ()) cout << "dev: " << s.dev << '\n';
+ if (!s.doc.empty ()) cout << "doc: " << s.doc << '\n';
+ if (!s.dbg.empty ()) cout << "dbg: " << s.dbg << '\n';
+ if (!s.common.empty ()) cout << "common: " << s.common << '\n';
+ }
+ else if (cmd == "build")
+ {
+ assert (argc >= 3); // <query-pkg>...
+
+ strings qps;
+ map<string, available_packages> aps;
+
+ // Parse <query-pkg>...
+ //
+ int argi (2);
+ for (; argi != argc; ++argi)
+ {
+ string a (argv[argi]);
+
+ if (a.compare (0, 2, "--") == 0)
+ break;
+
+ aps.emplace (a, available_packages {});
+ qps.push_back (move (a));
+ }
+
+ // Parse --install [--no-fetch]
+ //
+ bool install (false);
+ bool fetch (true);
+
+ for (; argi != argc; ++argi)
+ {
+ string a (argv[argi]);
+
+ if (a == "--install") install = true;
+ else if (a == "--no-fetch") fetch = false;
+ else break;
+ }
+
+ // Parse the description.
+ //
+ system_package_manager_debian::simulation s;
+
+ for (string l; !eof (getline (cin, l)); )
+ {
+ if (l.empty ())
+ continue;
+
+ size_t p (l.find (':')); assert (p != string::npos);
+ string k (l, 0, p);
+
+ if (k == "manifest")
+ {
+ size_t q (l.rfind (' ')); assert (q != string::npos);
+ string n (l, p + 2, q - p - 2); trim (n);
+ string f (l, q + 1); trim (f);
+
+ auto i (aps.find (n));
+ if (i == aps.end ())
+ fail << "unknown package " << n << " in '" << l << "'";
+
+ i->second.push_back (make_available_from_manifest (n, f));
+ }
+ else if (
+ map<strings, path>* policy =
+ k == "apt-cache-policy" ? &s.apt_cache_policy_ :
+ k == "apt-cache-policy-fetched" ? &s.apt_cache_policy_fetched_ :
+ k == "apt-cache-policy-installed" ? &s.apt_cache_policy_installed_ :
+ nullptr)
+ {
+ size_t q (l.rfind (' ')); assert (q != string::npos);
+ string n (l, p + 2, q - p - 2); trim (n);
+ string f (l, q + 1); trim (f);
+
+ strings ns;
+ for (size_t b (0), e (0); next_word (n, b, e); )
+ ns.push_back (string (n, b, e - b));
+
+ if (f == "!")
+ f.clear ();
+
+ policy->emplace (move (ns), path (move (f)));
+ }
+ else if (map<pair<string, string>, path>* show =
+ k == "apt-cache-show" ? &s.apt_cache_show_ :
+ k == "apt-cache-show-fetched" ? &s.apt_cache_show_fetched_ :
+ nullptr)
+ {
+ size_t q (l.rfind (' ')); assert (q != string::npos);
+ string n (l, p + 2, q - p - 2); trim (n);
+ string f (l, q + 1); trim (f);
+
+ q = n.find (' '); assert (q != string::npos);
+ pair<string, string> nv (string (n, 0, q), string (n, q + 1));
+ trim (nv.second);
+
+ if (f == "!")
+ f.clear ();
+
+ show->emplace (move (nv), path (move (f)));
+ }
+ else if (k == "apt-get-update-fail")
+ {
+ s.apt_get_update_fail_ = true;
+ }
+ else if (k == "apt-get-install-fail")
+ {
+ s.apt_get_install_fail_ = true;
+ }
+ else
+ fail << "unknown keyword '" << k << "' in simulation description";
+ }
+
+ // Fallback to stubs and sort in the version descending order.
+ //
+ for (pair<const string, available_packages>& p: aps)
+ {
+ if (p.second.empty ())
+ p.second.push_back (make_available_stub (p.first));
+
+ sort_available (p.second);
+ }
+
+ system_package_manager_debian m (move (osr),
+ host_triplet,
+ "" /* arch */,
+ nullopt /* progress */,
+ nullopt /* fetch_timeout */,
+ install,
+ fetch,
+ false /* yes */,
+ "sudo");
+ m.simulate_ = &s;
+
+ // Query each package.
+ //
+ for (const string& n: qps)
+ {
+ package_name pn (n);
+
+ const system_package_status* s (*m.status (pn, &aps[n]));
+
+ assert (*m.status (pn, nullptr) == s); // Test caching.
+
+ if (s == nullptr)
+ fail << "no installed " << (install ? "or available " : "")
+ << "system package for " << pn;
+
+ cout << pn << ' ' << s->version
+ << " (" << s->system_name << ' ' << s->system_version << ") ";
+
+ switch (s->status)
+ {
+ case package_status::installed: cout << "installed"; break;
+ case package_status::partially_installed: cout << "part installed"; break;
+ case package_status::not_installed: cout << "not installed"; break;
+ }
+
+ cout << '\n';
+ }
+
+ // Install if requested.
+ //
+ if (install)
+ {
+ assert (argi != argc); // <install-pkg>...
+
+ vector<package_name> ips;
+ for (; argi != argc; ++argi)
+ ips.push_back (package_name (argv[argi]));
+
+ m.install (ips);
+ }
+ }
+ else
+ fail << "unknown command '" << cmd << "'";
+
+ return 0;
+ }
+ catch (const failed&)
+ {
+ return 1;
+ }
+}
+
+int
+main (int argc, char* argv[])
+{
+ return bpkg::main (argc, argv);
+}
diff --git a/bpkg/system-package-manager-debian.test.testscript b/bpkg/system-package-manager-debian.test.testscript
new file mode 100644
index 0000000..56c6785
--- /dev/null
+++ b/bpkg/system-package-manager-debian.test.testscript
@@ -0,0 +1,1177 @@
+# file : bpkg/system-package-manager-debian.test.testscript
+# license : MIT; see accompanying LICENSE file
+
+: apt-cache-policy
+:
+{
+ test.arguments += apt-cache-policy
+
+ : basics
+ :
+ $* libssl3 libssl1.1 libssl-dev libsqlite5 libxerces-c-dev <<EOI 2>>EOE >>EOO
+ libssl3:
+ Installed: 3.0.7-1
+ Candidate: 3.0.7-2
+ Version table:
+ 3.0.7-2 500
+ 500 http://deb.debian.org/debian bookworm/main amd64 Packages
+ *** 3.0.7-1 100
+ 100 /var/lib/dpkg/status
+ libssl1.1:
+ Installed: 1.1.1n-0+deb11u3
+ Candidate: 1.1.1n-0+deb11u3
+ Version table:
+ *** 1.1.1n-0+deb11u3 100
+ 100 /var/lib/dpkg/status
+ libssl-dev:
+ Installed: 3.0.7-1
+ Candidate: 3.0.7-2
+ Version table:
+ 3.0.7-2 500
+ 500 http://deb.debian.org/debian bookworm/main amd64 Packages
+ *** 3.0.7-1 100
+ 100 /var/lib/dpkg/status
+ libxerces-c-dev:
+ Installed: (none)
+ Candidate: 3.2.4+debian-1
+ Version table:
+ 3.2.4+debian-1 500
+ 500 http://deb.debian.org/debian bookworm/main amd64 Packages
+ EOI
+ LC_ALL=C apt-cache policy --quiet libssl3 libssl1.1 libssl-dev libsqlite5 libxerces-c-dev <-
+ EOE
+ libssl3 '3.0.7-1' '3.0.7-2'
+ libssl1.1 '1.1.1n-0+deb11u3' '1.1.1n-0+deb11u3'
+ libssl-dev '3.0.7-1' '3.0.7-2'
+ libsqlite5 '' ''
+ libxerces-c-dev '' '3.2.4+debian-1'
+ EOO
+
+ : empty
+ :
+ $* libsqlite5 <:'' 2>>EOE >>EOO
+ LC_ALL=C apt-cache policy --quiet libsqlite5 <-
+ EOE
+ libsqlite5 '' ''
+ EOO
+
+ : none-none
+ :
+ $* pulseaudio <<EOI 2>>EOE >>EOO
+ pulseaudio:
+ Installed: (none)
+ Candidate: (none)
+ Version table:
+ 1:11.1-1ubuntu7.5 -1
+ 500 http://au.archive.ubuntu.com/ubuntu bionic-updates/main amd64 Packages
+ 1:11.1-1ubuntu7 -1
+ 500 http://au.archive.ubuntu.com/ubuntu bionic/main amd64 Packages
+ EOI
+ LC_ALL=C apt-cache policy --quiet pulseaudio <-
+ EOE
+ pulseaudio '' ''
+ EOO
+}
+
+: apt-cache-show
+:
+{
+ test.arguments += apt-cache-show
+
+ # Note: put Depends last to test folded/multiline parsing.
+ #
+ : basics
+ :
+ $* libssl1.1 1.1.1n-0+deb11u3 <<EOI 2>>EOE >>EOO
+ Package: libssl1.1
+ Status: install ok installed
+ Priority: optional
+ Section: libs
+ Installed-Size: 4120
+ Maintainer: Debian OpenSSL Team <pkg-openssl-devel@lists.alioth.debian.org>
+ Architecture: amd64
+ Multi-Arch: same
+ Source: openssl
+ Version: 1.1.1n-0+deb11u3
+ Breaks: isync (<< 1.3.0-2), lighttpd (<< 1.4.49-2), python-boto (<< 2.44.0-1.1), python-httplib2 (<< 0.11.3-1), python-imaplib2 (<< 2.57-5), python3-boto (<< 2.44.0-1.1), python3-imaplib2 (<< 2.57-5)
+ Description: Secure Sockets Layer toolkit - shared libraries
+ This package is part of the OpenSSL project's implementation of the SSL
+ and TLS cryptographic protocols for secure communication over the
+ Internet.
+ .
+ It provides the libssl and libcrypto shared libraries.
+ Description-md5: 88547c6206c7fbc4fcc7d09ce100d210
+ Homepage: https://www.openssl.org/
+ Depends: libc6 (>= 2.25), debconf (>= 0.5) | debconf-2.0
+
+ EOI
+ LC_ALL=C apt-cache show --quiet libssl1.1=1.1.1n-0+deb11u3 <-
+ EOE
+ libc6 (>= 2.25), debconf (>= 0.5) | debconf-2.0
+ EOO
+
+ : no-depends
+ :
+ $* libssl1.1 1.1.1n-0+deb11u3 <<EOI 2>>EOE >''
+ Package: libssl1.1
+ Status: install ok installed
+ Priority: optional
+ Section: libs
+ Installed-Size: 4120
+ Maintainer: Debian OpenSSL Team <pkg-openssl-devel@lists.alioth.debian.org>
+ Architecture: amd64
+ Multi-Arch: same
+ Source: openssl
+ Version: 1.1.1n-0+deb11u3
+ Breaks: isync (<< 1.3.0-2), lighttpd (<< 1.4.49-2), python-boto (<< 2.44.0-1.1), python-httplib2 (<< 0.11.3-1), python-imaplib2 (<< 2.57-5), python3-boto (<< 2.44.0-1.1), python3-imaplib2 (<< 2.57-5)
+ Description: Secure Sockets Layer toolkit - shared libraries
+ This package is part of the OpenSSL project's implementation of the SSL
+ and TLS cryptographic protocols for secure communication over the
+ Internet.
+ .
+ It provides the libssl and libcrypto shared libraries.
+ Description-md5: 88547c6206c7fbc4fcc7d09ce100d210
+ Homepage: https://www.openssl.org/
+
+ EOI
+ LC_ALL=C apt-cache show --quiet libssl1.1=1.1.1n-0+deb11u3 <-
+ EOE
+}
+
+: parse-name-value
+:
+{
+ test.arguments += parse-name-value
+
+ : basics
+ :
+ $* libssl <<EOI >>EOO
+ libssl3 libssl-common libssl-doc libssl-dev libssl-dbg libssl-extras, libc6 libc-dev libc-common libc-doc, libz-dev
+ EOI
+ main: libssl3
+ dev: libssl-dev
+ doc: libssl-doc
+ dbg: libssl-dbg
+ common: libssl-common
+ extras: libssl-extras libc6 libc-dev libz-dev
+ EOO
+
+ : non-lib
+ :
+ $* sqlite3 <<EOI >>EOO
+ sqlite3 sqlite3-common sqlite3-doc
+ EOI
+ main: sqlite3
+ doc: sqlite3-doc
+ common: sqlite3-common
+ EOO
+
+ : lib-dev
+ :
+ $* libssl <<EOI >>EOO
+ libssl-dev
+ EOI
+ dev: libssl-dev
+ EOO
+
+ : non-lib-dev
+ :
+ $* ssl-dev <<EOI >>EOO
+ ssl-dev
+ EOI
+ main: ssl-dev
+ EOO
+
+ : lib-custom-dev
+ :
+ $* libfoo-dev <<EOI >>EOO
+ libfoo-dev libfoo-dev-dev
+ EOI
+ main: libfoo-dev
+ dev: libfoo-dev-dev
+ EOO
+}
+
+: main-from-dev
+:
+{
+ test.arguments += main-from-dev
+
+ : first
+ :
+ $* libssl-dev 3.0.7-1 <<EOI >'libssl3'
+ libssl3 (= 3.0.7-1), debconf (>= 0.5) | debconf-2.0
+ EOI
+
+ : not-first
+ :
+ $* libxerces-c-dev 3.2.4+debian-1 <<EOI >'libxerces-c3.2'
+ libc6-dev | libc-dev, libicu-dev, libxerces-c3.2 (= 3.2.4+debian-1)
+ EOI
+
+ : exact
+ :
+ $* libexpat1-dev 2.5.0-1 <<EOI >'libexpat1'
+ libexpat1 (= 2.5.0-1), libc6-dev | libc-dev
+ EOI
+
+ : not-stem
+ :
+ $* libcurl4-openssl-dev 7.87.0-2 <<EOI >''
+ libcurl4 (= 7.87.0-2)
+ EOI
+}
+
+: map-package
+:
+{
+ test.arguments += map-package
+
+ : default-name
+ :
+ $* <<EOI >>EOO
+ : 1
+ name: byacc
+ version: 20210808
+ summary: yacc parser generator
+ license: other: public domain
+ EOI
+ version: 20210808-0~debian10
+ main: byacc
+ EOO
+
+ : default-name-lib
+ :
+ $* <<EOI >>EOO
+ : 1
+ name: libsqlite3
+ version: 3.40.1
+ summary: database library
+ license: other: public domain
+ EOI
+ version: 3.40.1-0~debian10
+ main: libsqlite3
+ dev: libsqlite3-dev
+ EOO
+
+ : custom-name
+ :
+ $* <<EOI >>EOO
+ : 1
+ name: libsqlite3
+ debian_9-name: libsqlite3-0 libsqlite3-dev
+ version: 3.40.1
+ summary: database library
+ license: other: public domain
+ EOI
+ version: 3.40.1-0~debian10
+ main: libsqlite3-0
+ dev: libsqlite3-dev
+ EOO
+
+ : custom-name-dev-only
+ :
+ $* <<EOI >>EOO
+ : 1
+ name: libsqlite3
+ debian_9-name: libsqlite3-0-dev
+ version: 3.40.1
+ summary: database library
+ license: other: public domain
+ EOI
+ version: 3.40.1-0~debian10
+ main: libsqlite3-0
+ dev: libsqlite3-0-dev
+ EOO
+
+ : custom-name-non-native
+ :
+ $* <<EOI >>EOO
+ : 1
+ name: libsqlite3
+ debian_0-name: libsqlite libsqlite-dev
+ debian_9-name: libsqlite3-0 libsqlite3-dev
+ version: 3.40.1
+ summary: database library
+ license: other: public domain
+ EOI
+ version: 3.40.1-0~debian10
+ main: libsqlite
+ dev: libsqlite-dev
+ EOO
+
+ : version-upstream
+ :
+ $* <<EOI >>EOO
+ : 1
+ name: byacc
+ version: +2-1.2.3-beta.1+3
+ upstream-version: 20210808
+ summary: yacc parser generator
+ license: other: public domain
+ EOI
+ version: 20210808~beta.1-3~debian10
+ main: byacc
+ EOO
+
+ : version-distribution
+ :
+ $* <<EOI >>EOO
+ : 1
+ name: byacc
+ version: +2-1.2.3-beta.1+3
+ debian-version: 20210808~beta.1
+ summary: yacc parser generator
+ license: other: public domain
+ EOI
+ version: 20210808~beta.1-0~debian10
+ main: byacc
+ EOO
+
+ : version-distribution-epoch-revision
+ :
+ $* <<EOI >>EOO
+ : 1
+ name: byacc
+ version: +2-1.2.3-beta.1+3
+ debian-version: 1:1.2.3-2
+ summary: yacc parser generator
+ license: other: public domain
+ EOI
+ version: 1:1.2.3-2~debian10
+ main: byacc
+ EOO
+
+ : version-distribution-empty-release
+ :
+ $* <<EOI >>EOO
+ : 1
+ name: byacc
+ version: +2-1.2.3-beta.1+3
+ debian-version: 20210808~-4
+ summary: yacc parser generator
+ license: other: public domain
+ EOI
+ version: 20210808~beta.1-4~debian10
+ main: byacc
+ EOO
+
+ : version-distribution-empty-revision
+ :
+ $* <<EOI >>EOO
+ : 1
+ name: byacc
+ version: +2-1.2.3-beta.1+3
+ debian-version: 20210808~b.1-
+ summary: yacc parser generator
+ license: other: public domain
+ EOI
+ version: 20210808~b.1-3~debian10
+ main: byacc
+ EOO
+
+ : version-distribution-empty-release-revision
+ :
+ $* <<EOI >>EOO
+ : 1
+ name: byacc
+ version: +2-1.2.3-beta.1+3
+ debian-version: 20210808~-
+ summary: yacc parser generator
+ license: other: public domain
+ EOI
+ version: 20210808~beta.1-3~debian10
+ main: byacc
+ EOO
+
+ : version-no-build-metadata
+ :
+ $* '' <<EOI >>EOO
+ : 1
+ name: byacc
+ version: 1.2.3
+ summary: yacc parser generator
+ license: other: public domain
+ EOI
+ version: 1.2.3
+ main: byacc
+ EOO
+
+ : version-distribution-no-build-metadata
+ :
+ $* '' <<EOI >>EOO
+ : 1
+ name: byacc
+ version: 1.2.3
+ debian-version: 20210808
+ summary: yacc parser generator
+ license: other: public domain
+ EOI
+ version: 20210808
+ main: byacc
+ EOO
+}
+
+: build
+:
+{
+ test.arguments += build
+
+ : libsqlite3
+ :
+ {
+ : installed
+ :
+ cat <<EOI >=libsqlite3-dev.policy;
+ libsqlite3-dev:
+ Installed: 3.40.1-1
+ Candidate: 3.40.1-1
+ Version table:
+ *** 3.40.1-1 500
+ 500 http://deb.debian.org/debian bookworm/main amd64 Packages
+ 100 /var/lib/dpkg/status
+ EOI
+ cat <<EOI >=libsqlite3-dev.show;
+ Package: libsqlite3-dev
+ Version: 3.40.1-1
+ Depends: libsqlite3-0 (= 3.40.1-1), libc-dev
+ EOI
+ cat <<EOI >=libsqlite3-0.policy;
+ libsqlite3-0:
+ Installed: 3.40.1-1
+ Candidate: 3.40.1-1
+ Version table:
+ *** 3.40.1-1 500
+ 500 http://deb.debian.org/debian bookworm/main amd64 Packages
+ 100 /var/lib/dpkg/status
+ EOI
+ $* libsqlite3 --install libsqlite3 <<EOI 2>>EOE >>EOO
+ apt-cache-policy: libsqlite3-dev libsqlite3-dev.policy
+ apt-cache-show: libsqlite3-dev 3.40.1-1 libsqlite3-dev.show
+ apt-cache-policy: libsqlite3-0 libsqlite3-0.policy
+ EOI
+ LC_ALL=C apt-cache policy --quiet libsqlite3-dev <libsqlite3-dev.policy
+ LC_ALL=C apt-cache show --quiet libsqlite3-dev=3.40.1-1 <libsqlite3-dev.show
+ LC_ALL=C apt-cache policy --quiet libsqlite3-0 <libsqlite3-0.policy
+ sudo apt-get install --quiet --assume-no libsqlite3-0=3.40.1-1 libsqlite3-dev=3.40.1-1
+ LC_ALL=C apt-cache policy --quiet libsqlite3-0 <libsqlite3-0.policy
+ EOE
+ libsqlite3 3.40.1 (libsqlite3-0 3.40.1-1) installed
+ EOO
+
+
+ : part-installed
+ :
+ cat <<EOI >=libsqlite3-dev.policy;
+ libsqlite3-dev:
+ Installed: (none)
+ Candidate: 3.40.1-1
+ Version table:
+ 3.40.1-1 500
+ 500 http://deb.debian.org/debian bookworm/main amd64 Packages
+ EOI
+ cat <<EOI >=libsqlite3-dev.show;
+ Package: libsqlite3-dev
+ Version: 3.40.1-1
+ Depends: libsqlite3-0 (= 3.40.1-1), libc-dev
+ EOI
+ cat <<EOI >=libsqlite3-0.policy;
+ libsqlite3-0:
+ Installed: 3.40.1-1
+ Candidate: 3.40.1-1
+ Version table:
+ *** 3.40.1-1 500
+ 500 http://deb.debian.org/debian bookworm/main amd64 Packages
+ 100 /var/lib/dpkg/status
+ EOI
+ $* libsqlite3 --install libsqlite3 <<EOI 2>>EOE >>EOO
+ apt-cache-policy: libsqlite3-dev libsqlite3-dev.policy
+ apt-cache-show: libsqlite3-dev 3.40.1-1 libsqlite3-dev.show
+ apt-cache-policy: libsqlite3-0 libsqlite3-0.policy
+ EOI
+ LC_ALL=C apt-cache policy --quiet libsqlite3-dev <libsqlite3-dev.policy
+ sudo apt-get update --quiet --assume-no
+ LC_ALL=C apt-cache policy --quiet libsqlite3-dev <libsqlite3-dev.policy
+ LC_ALL=C apt-cache show --quiet libsqlite3-dev=3.40.1-1 <libsqlite3-dev.show
+ LC_ALL=C apt-cache policy --quiet libsqlite3-0 <libsqlite3-0.policy
+ sudo apt-get install --quiet --assume-no libsqlite3-0 libsqlite3-dev
+ LC_ALL=C apt-cache policy --quiet libsqlite3-0 <libsqlite3-0.policy
+ EOE
+ libsqlite3 3.40.1 (libsqlite3-0 3.40.1-1) part installed
+ EOO
+
+
+ : part-installed-upgrade
+ :
+ cat <<EOI >=libsqlite3-dev.policy;
+ libsqlite3-dev:
+ Installed: (none)
+ Candidate: 3.39.4-1
+ Version table:
+ 3.39.4-1 500
+ 500 http://deb.debian.org/debian bookworm/main amd64 Packages
+ EOI
+ cat <<EOI >=libsqlite3-dev.policy-fetched;
+ libsqlite3-dev:
+ Installed: (none)
+ Candidate: 3.40.1-1
+ Version table:
+ 3.40.1-1 500
+ 500 http://deb.debian.org/debian bookworm/main amd64 Packages
+ EOI
+ cat <<EOI >=libsqlite3-dev.show-fetched;
+ Package: libsqlite3-dev
+ Version: 3.40.1-1
+ Depends: libsqlite3-0 (= 3.40.1-1), libc-dev
+ EOI
+ cat <<EOI >=libsqlite3-0.policy-fetched;
+ libsqlite3-0:
+ Installed: 3.39.4-1
+ Candidate: 3.40.1-1
+ Version table:
+ 3.40.1-1 500
+ 500 http://deb.debian.org/debian bookworm/main amd64 Packages
+ *** 3.39.4-1 100
+ 100 /var/lib/dpkg/status
+ EOI
+ cat <<EOI >=libsqlite3-0.policy-installed;
+ libsqlite3-0:
+ Installed: 3.40.1-1
+ Candidate: 3.40.1-1
+ Version table:
+ *** 3.40.1-1 500
+ 500 http://deb.debian.org/debian bookworm/main amd64 Packages
+ 100 /var/lib/dpkg/status
+ EOI
+ $* libsqlite3 --install libsqlite3 <<EOI 2>>EOE >>EOO
+ apt-cache-policy: libsqlite3-dev libsqlite3-dev.policy
+ apt-cache-policy-fetched: libsqlite3-dev libsqlite3-dev.policy-fetched
+ apt-cache-show: libsqlite3-dev 3.40.1-1 libsqlite3-dev.show-fetched
+ apt-cache-policy-fetched: libsqlite3-0 libsqlite3-0.policy-fetched
+ apt-cache-policy-installed: libsqlite3-0 libsqlite3-0.policy-installed
+ EOI
+ LC_ALL=C apt-cache policy --quiet libsqlite3-dev <libsqlite3-dev.policy
+ sudo apt-get update --quiet --assume-no
+ LC_ALL=C apt-cache policy --quiet libsqlite3-dev <libsqlite3-dev.policy-fetched
+ LC_ALL=C apt-cache show --quiet libsqlite3-dev=3.40.1-1 <libsqlite3-dev.show-fetched
+ LC_ALL=C apt-cache policy --quiet libsqlite3-0 <libsqlite3-0.policy-fetched
+ sudo apt-get install --quiet --assume-no libsqlite3-0 libsqlite3-dev
+ LC_ALL=C apt-cache policy --quiet libsqlite3-0 <libsqlite3-0.policy-installed
+ EOE
+ libsqlite3 3.40.1 (libsqlite3-0 3.40.1-1) part installed
+ EOO
+
+
+ # Note that the semantics is unrealistic (maybe background apt-get update
+ # happenned in between).
+ #
+ : part-installed-upgrade-version-change
+ :
+ cat <<EOI >=libsqlite3-dev.policy;
+ libsqlite3-dev:
+ Installed: (none)
+ Candidate: 3.39.4-1
+ Version table:
+ 3.39.4-1 500
+ 500 http://deb.debian.org/debian bookworm/main amd64 Packages
+ EOI
+ cat <<EOI >=libsqlite3-dev.show;
+ Package: libsqlite3-dev
+ Version: 3.39.4-1
+ Depends: libsqlite3-0 (= 3.39.4-1), libc-dev
+ EOI
+ cat <<EOI >=libsqlite3-0.policy;
+ libsqlite3-0:
+ Installed: 3.39.4-1
+ Candidate: 3.39.4-1
+ Version table:
+ *** 3.39.4-1 500
+ 500 http://deb.debian.org/debian bookworm/main amd64 Packages
+ 100 /var/lib/dpkg/status
+ EOI
+ cat <<EOI >=libsqlite3-0.policy-installed;
+ libsqlite3-0:
+ Installed: 3.40.1-1
+ Candidate: 3.40.1-1
+ Version table:
+ *** 3.40.1-1 500
+ 500 http://deb.debian.org/debian bookworm/main amd64 Packages
+ 100 /var/lib/dpkg/status
+ EOI
+ $* libsqlite3 --install --no-fetch libsqlite3 <<EOI 2>>EOE >>EOO != 0
+ apt-cache-policy: libsqlite3-dev libsqlite3-dev.policy
+ apt-cache-show: libsqlite3-dev 3.39.4-1 libsqlite3-dev.show
+ apt-cache-policy: libsqlite3-0 libsqlite3-0.policy
+ apt-cache-policy-installed: libsqlite3-0 libsqlite3-0.policy-installed
+ EOI
+ LC_ALL=C apt-cache policy --quiet libsqlite3-dev <libsqlite3-dev.policy
+ LC_ALL=C apt-cache show --quiet libsqlite3-dev=3.39.4-1 <libsqlite3-dev.show
+ LC_ALL=C apt-cache policy --quiet libsqlite3-0 <libsqlite3-0.policy
+ sudo apt-get install --quiet --assume-no libsqlite3-0 libsqlite3-dev
+ LC_ALL=C apt-cache policy --quiet libsqlite3-0 <libsqlite3-0.policy-installed
+ error: unexpected debian package version for libsqlite3-0
+ info: expected: 3.39.4-1
+ info: installed: 3.40.1-1
+ info: consider retrying the bpkg command
+ EOE
+ libsqlite3 3.39.4 (libsqlite3-0 3.39.4-1) part installed
+ EOO
+
+
+ : not-installed
+ :
+ cat <<EOI >=libsqlite3-dev.policy;
+ libsqlite3-dev:
+ Installed: (none)
+ Candidate: 3.40.1-1
+ Version table:
+ 3.40.1-1 500
+ 500 http://deb.debian.org/debian bookworm/main amd64 Packages
+ EOI
+ cat <<EOI >=libsqlite3-dev.show;
+ Package: libsqlite3-dev
+ Version: 3.40.1-1
+ Depends: libsqlite3-0 (= 3.40.1-1), libc-dev
+ EOI
+ cat <<EOI >=libsqlite3-0.policy;
+ libsqlite3-0:
+ Installed: (none)
+ Candidate: 3.40.1-1
+ Version table:
+ 3.40.1-1 500
+ 500 http://deb.debian.org/debian bookworm/main amd64 Packages
+ EOI
+ cat <<EOI >=libsqlite3-0.policy-installed;
+ libsqlite3-0:
+ Installed: 3.40.1-1
+ Candidate: 3.40.1-1
+ Version table:
+ *** 3.40.1-1 500
+ 500 http://deb.debian.org/debian bookworm/main amd64 Packages
+ 100 /var/lib/dpkg/status
+ EOI
+ $* libsqlite3 --install libsqlite3 <<EOI 2>>EOE >>EOO
+ apt-cache-policy: libsqlite3-dev libsqlite3-dev.policy
+ apt-cache-show: libsqlite3-dev 3.40.1-1 libsqlite3-dev.show
+ apt-cache-policy: libsqlite3-0 libsqlite3-0.policy
+ apt-cache-policy-installed: libsqlite3-0 libsqlite3-0.policy-installed
+ EOI
+ LC_ALL=C apt-cache policy --quiet libsqlite3-dev <libsqlite3-dev.policy
+ sudo apt-get update --quiet --assume-no
+ LC_ALL=C apt-cache policy --quiet libsqlite3-dev <libsqlite3-dev.policy
+ LC_ALL=C apt-cache show --quiet libsqlite3-dev=3.40.1-1 <libsqlite3-dev.show
+ LC_ALL=C apt-cache policy --quiet libsqlite3-0 <libsqlite3-0.policy
+ sudo apt-get install --quiet --assume-no libsqlite3-0 libsqlite3-dev
+ LC_ALL=C apt-cache policy --quiet libsqlite3-0 <libsqlite3-0.policy-installed
+ EOE
+ libsqlite3 3.40.1 (libsqlite3-0 3.40.1-1) not installed
+ EOO
+
+
+ : no-install
+ :
+ cat <<EOI >=libsqlite3-dev.policy;
+ libsqlite3-dev:
+ Installed: (none)
+ Candidate: 3.40.1-1
+ Version table:
+ 3.40.1-1 500
+ 500 http://deb.debian.org/debian bookworm/main amd64 Packages
+ EOI
+ $* libsqlite3 <<EOI 2>>EOE != 0
+ apt-cache-policy: libsqlite3-dev libsqlite3-dev.policy
+ EOI
+ LC_ALL=C apt-cache policy --quiet libsqlite3-dev <libsqlite3-dev.policy
+ error: no installed system package for libsqlite3
+ EOE
+
+
+ : not-available
+ :
+ $* libsqlite3 --install libsqlite3 <<EOI 2>>EOE != 0
+ apt-cache-policy: libsqlite3-dev !
+ EOI
+ LC_ALL=C apt-cache policy --quiet libsqlite3-dev </dev/null
+ sudo apt-get update --quiet --assume-no
+ LC_ALL=C apt-cache policy --quiet libsqlite3-dev </dev/null
+ error: no installed or available system package for libsqlite3
+ EOE
+
+
+ : not-available-no-fetch
+ :
+ $* libsqlite3 --install --no-fetch libsqlite3 <<EOI 2>>EOE != 0
+ apt-cache-policy: libsqlite3-dev !
+ EOI
+ LC_ALL=C apt-cache policy --quiet libsqlite3-dev </dev/null
+ error: no installed or available system package for libsqlite3
+ EOE
+ }
+
+ : sqlite3
+ :
+ {
+ : installed
+ :
+ cat <<EOI >=sqlite3.policy;
+ sqlite3:
+ Installed: 3.40.1-1
+ Candidate: 3.40.1-1
+ Version table:
+ *** 3.40.1-1 500
+ 500 http://deb.debian.org/debian bookworm/main amd64 Packages
+ 100 /var/lib/dpkg/status
+ EOI
+ $* sqlite3 --install sqlite3 <<EOI 2>>EOE >>EOO
+ apt-cache-policy: sqlite3 sqlite3.policy
+ EOI
+ LC_ALL=C apt-cache policy --quiet sqlite3 <sqlite3.policy
+ sudo apt-get install --quiet --assume-no sqlite3=3.40.1-1
+ LC_ALL=C apt-cache policy --quiet sqlite3 <sqlite3.policy
+ EOE
+ sqlite3 3.40.1 (sqlite3 3.40.1-1) installed
+ EOO
+
+ : not-installed
+ :
+ cat <<EOI >=sqlite3.policy;
+ sqlite3:
+ Installed: (none)
+ Candidate: 3.39.4-1
+ Version table:
+ 3.39.4-1 500
+ 500 http://deb.debian.org/debian bookworm/main amd64 Packages
+ EOI
+ cat <<EOI >=sqlite3.policy-fetched;
+ sqlite3:
+ Installed: (none)
+ Candidate: 3.40.1-1
+ Version table:
+ 3.40.1-1 500
+ 500 http://deb.debian.org/debian bookworm/main amd64 Packages
+ EOI
+ cat <<EOI >=sqlite3.policy-installed;
+ sqlite3:
+ Installed: 3.40.1-1
+ Candidate: 3.40.1-1
+ Version table:
+ *** 3.40.1-1 500
+ 500 http://deb.debian.org/debian bookworm/main amd64 Packages
+ 100 /var/lib/dpkg/status
+ EOI
+ $* sqlite3 --install sqlite3 <<EOI 2>>EOE >>EOO
+ apt-cache-policy: sqlite3 sqlite3.policy
+ apt-cache-policy-fetched: sqlite3 sqlite3.policy-fetched
+ apt-cache-policy-installed: sqlite3 sqlite3.policy-installed
+ EOI
+ LC_ALL=C apt-cache policy --quiet sqlite3 <sqlite3.policy
+ sudo apt-get update --quiet --assume-no
+ LC_ALL=C apt-cache policy --quiet sqlite3 <sqlite3.policy-fetched
+ sudo apt-get install --quiet --assume-no sqlite3
+ LC_ALL=C apt-cache policy --quiet sqlite3 <sqlite3.policy-installed
+ EOE
+ sqlite3 3.40.1 (sqlite3 3.40.1-1) not installed
+ EOO
+ }
+
+ : libssl
+ :
+ {
+ +cat <<EOI >=libcrypto.manifest
+ : 1
+ name: libcrypto
+ version: 1.1.1+18
+ upstream-version: 1.1.1n
+ debian-name: libssl1.1 libssl-dev
+ debian-to-downstream-version: /1\.1\.1[a-z]/1.1.1/
+ summary: OpenSSL libcrypto
+ license: OpenSSL
+ EOI
+ +cat <<EOI >=libssl.manifest
+ : 1
+ name: libssl
+ version: 1.1.1+18
+ upstream-version: 1.1.1n
+ debian-name: libssl1.1 libssl-dev
+ debian-to-downstream-version: /1\.1\.1[a-z]/1.1.1/
+ summary: OpenSSL libssl
+ license: OpenSSL
+ EOI
+
+ : installed
+ :
+ ln -s ../libcrypto.manifest ./;
+ ln -s ../libssl.manifest ./;
+ cat <<EOI >=libssl1.1+libssl-dev.policy;
+ libssl1.1:
+ Installed: 1.1.1n-0+deb11u3
+ Candidate: 1.1.1n-0+deb11u3
+ Version table:
+ *** 1.1.1n-0+deb11u3 100
+ 100 /var/lib/dpkg/status
+ libssl-dev:
+ Installed: 1.1.1n-0+deb11u3
+ Candidate: 1.1.1n-0+deb11u3
+ Version table:
+ *** 1.1.1n-0+deb11u3 100
+ 100 /var/lib/dpkg/status
+ EOI
+ cat <<EOI >=libssl1.1.policy-installed;
+ libssl1.1:
+ Installed: 1.1.1n-0+deb11u3
+ Candidate: 1.1.1n-0+deb11u3
+ Version table:
+ *** 1.1.1n-0+deb11u3 100
+ 100 /var/lib/dpkg/status
+ EOI
+ $* libcrypto libssl --install libcrypto libssl <<EOI 2>>EOE >>EOO
+ manifest: libcrypto libcrypto.manifest
+ manifest: libssl libssl.manifest
+
+ apt-cache-policy: libssl1.1 libssl-dev libssl1.1+libssl-dev.policy
+ apt-cache-policy-installed: libssl1.1 libssl1.1.policy-installed
+ EOI
+ LC_ALL=C apt-cache policy --quiet libssl1.1 libssl-dev <libssl1.1+libssl-dev.policy
+ LC_ALL=C apt-cache policy --quiet libssl1.1 libssl-dev <libssl1.1+libssl-dev.policy
+ sudo apt-get install --quiet --assume-no libssl1.1=1.1.1n-0+deb11u3 libssl-dev=1.1.1n-0+deb11u3
+ LC_ALL=C apt-cache policy --quiet libssl1.1 <libssl1.1.policy-installed
+ EOE
+ libcrypto 1.1.1 (libssl1.1 1.1.1n-0+deb11u3) installed
+ libssl 1.1.1 (libssl1.1 1.1.1n-0+deb11u3) installed
+ EOO
+
+ : part-installed
+ :
+ ln -s ../libcrypto.manifest ./;
+ ln -s ../libssl.manifest ./;
+ cat <<EOI >=libssl1.1+libssl-dev.policy;
+ libssl1.1:
+ Installed: 1.1.1n-0+deb11u3
+ Candidate: 1.1.1n-0+deb11u3
+ Version table:
+ *** 1.1.1n-0+deb11u3 100
+ 100 /var/lib/dpkg/status
+ libssl-dev:
+ Installed: (none)
+ Candidate: 1.1.1n-0+deb11u3
+ Version table:
+ 1.1.1n-0+deb11u3 500
+ 500 http://deb.debian.org/debian bookworm/main amd64 Packages
+ EOI
+ cat <<EOI >=libssl1.1.policy-installed;
+ libssl1.1:
+ Installed: 1.1.1n-0+deb11u3
+ Candidate: 1.1.1n-0+deb11u3
+ Version table:
+ *** 1.1.1n-0+deb11u3 100
+ 100 /var/lib/dpkg/status
+ EOI
+ $* libcrypto libssl --install libcrypto libssl <<EOI 2>>EOE >>EOO
+ manifest: libcrypto libcrypto.manifest
+ manifest: libssl libssl.manifest
+
+ apt-cache-policy: libssl1.1 libssl-dev libssl1.1+libssl-dev.policy
+ apt-cache-policy-installed: libssl1.1 libssl1.1.policy-installed
+ EOI
+ LC_ALL=C apt-cache policy --quiet libssl1.1 libssl-dev <libssl1.1+libssl-dev.policy
+ sudo apt-get update --quiet --assume-no
+ LC_ALL=C apt-cache policy --quiet libssl1.1 libssl-dev <libssl1.1+libssl-dev.policy
+ LC_ALL=C apt-cache policy --quiet libssl1.1 libssl-dev <libssl1.1+libssl-dev.policy
+ sudo apt-get install --quiet --assume-no libssl1.1 libssl-dev
+ LC_ALL=C apt-cache policy --quiet libssl1.1 <libssl1.1.policy-installed
+ EOE
+ libcrypto 1.1.1 (libssl1.1 1.1.1n-0+deb11u3) part installed
+ libssl 1.1.1 (libssl1.1 1.1.1n-0+deb11u3) part installed
+ EOO
+
+ : not-installed
+ :
+ ln -s ../libcrypto.manifest ./;
+ ln -s ../libssl.manifest ./;
+ cat <<EOI >=libssl1.1+libssl-dev.policy;
+ libssl1.1:
+ Installed: (none)
+ Candidate: 1.1.1n-0+deb11u3
+ Version table:
+ *** 1.1.1n-0+deb11u3 500
+ 500 http://deb.debian.org/debian bookworm/main amd64 Packages
+ libssl-dev:
+ Installed: (none)
+ Candidate: 1.1.1n-0+deb11u3
+ Version table:
+ 1.1.1n-0+deb11u3 500
+ 500 http://deb.debian.org/debian bookworm/main amd64 Packages
+ EOI
+ cat <<EOI >=libssl1.1.policy-installed;
+ libssl1.1:
+ Installed: 1.1.1n-0+deb11u3
+ Candidate: 1.1.1n-0+deb11u3
+ Version table:
+ *** 1.1.1n-0+deb11u3 100
+ 100 /var/lib/dpkg/status
+ EOI
+ $* libcrypto libssl --install libcrypto libssl <<EOI 2>>EOE >>EOO
+ manifest: libcrypto libcrypto.manifest
+ manifest: libssl libssl.manifest
+
+ apt-cache-policy: libssl1.1 libssl-dev libssl1.1+libssl-dev.policy
+ apt-cache-policy-installed: libssl1.1 libssl1.1.policy-installed
+ EOI
+ LC_ALL=C apt-cache policy --quiet libssl1.1 libssl-dev <libssl1.1+libssl-dev.policy
+ sudo apt-get update --quiet --assume-no
+ LC_ALL=C apt-cache policy --quiet libssl1.1 libssl-dev <libssl1.1+libssl-dev.policy
+ LC_ALL=C apt-cache policy --quiet libssl1.1 libssl-dev <libssl1.1+libssl-dev.policy
+ sudo apt-get install --quiet --assume-no libssl1.1 libssl-dev
+ LC_ALL=C apt-cache policy --quiet libssl1.1 <libssl1.1.policy-installed
+ EOE
+ libcrypto 1.1.1 (libssl1.1 1.1.1n-0+deb11u3) not installed
+ libssl 1.1.1 (libssl1.1 1.1.1n-0+deb11u3) not installed
+ EOO
+ }
+
+ : libcurl
+ :
+ {
+ # Note that libcurl3-gnutls libcurl4-gnutls-dev is not a mistake.
+ #
+ # Note also that there is a third flavor, libcurl3-nss libcurl4-nss-dev,
+ # but we omit it to keep the tests manageable.
+ #
+ #
+ +cat <<EOI >=libcurl.manifest
+ : 1
+ name: libcurl
+ version: 7.84.0
+ debian-name: libcurl4 libcurl4-openssl-dev libcurl4-doc
+ debian-name: libcurl3-gnutls libcurl4-gnutls-dev libcurl4-doc
+ summary: C library for transferring data with URLs
+ license: curl
+ EOI
+
+
+ : one-full-installed
+ :
+ ln -s ../libcurl.manifest ./;
+ cat <<EOI >=libcurl4+libcurl4-openssl-dev.policy;
+ libcurl4:
+ Installed: 7.85.0-1
+ Candidate: 7.87.0-2
+ Version table:
+ 7.87.0-2 500
+ 500 http://deb.debian.org/debian bookworm/main amd64 Packages
+ *** 7.85.0-1 100
+ 100 /var/lib/dpkg/status
+ libcurl4-openssl-dev:
+ Installed: 7.85.0-1
+ Candidate: 7.87.0-2
+ Version table:
+ 7.87.0-2 500
+ 500 http://deb.debian.org/debian bookworm/main amd64 Packages
+ *** 7.85.0-1 100
+ 100 /var/lib/dpkg/status
+ EOI
+ cat <<EOI >=libcurl3-gnutls+libcurl4-gnutls-dev.policy;
+ libcurl3-gnutls:
+ Installed: 7.85.0-1
+ Candidate: 7.87.0-2
+ Version table:
+ 7.87.0-2 500
+ 500 http://deb.debian.org/debian bookworm/main amd64 Packages
+ *** 7.85.0-1 100
+ 100 /var/lib/dpkg/status
+ libcurl4-gnutls-dev:
+ Installed: (none)
+ Candidate: 7.87.0-2
+ Version table:
+ 7.87.0-2 500
+ 500 http://deb.debian.org/debian bookworm/main amd64 Packages
+ EOI
+ cat <<EOI >=libcurl4.policy-installed;
+ libcurl4:
+ Installed: 7.85.0-1
+ Candidate: 7.87.0-2
+ Version table:
+ 7.87.0-2 500
+ 500 http://deb.debian.org/debian bookworm/main amd64 Packages
+ *** 7.85.0-1 100
+ 100 /var/lib/dpkg/status
+ EOI
+ $* libcurl --install libcurl <<EOI 2>>EOE >>EOO
+ manifest: libcurl libcurl.manifest
+
+ apt-cache-policy: libcurl4 libcurl4-openssl-dev libcurl4+libcurl4-openssl-dev.policy
+ apt-cache-policy: libcurl3-gnutls libcurl4-gnutls-dev libcurl3-gnutls+libcurl4-gnutls-dev.policy
+ apt-cache-policy-installed: libcurl4 libcurl4.policy-installed
+ EOI
+ LC_ALL=C apt-cache policy --quiet libcurl4 libcurl4-openssl-dev <libcurl4+libcurl4-openssl-dev.policy
+ LC_ALL=C apt-cache policy --quiet libcurl3-gnutls libcurl4-gnutls-dev <libcurl3-gnutls+libcurl4-gnutls-dev.policy
+ sudo apt-get install --quiet --assume-no libcurl4=7.85.0-1 libcurl4-openssl-dev=7.85.0-1
+ LC_ALL=C apt-cache policy --quiet libcurl4 <libcurl4.policy-installed
+ EOE
+ libcurl 7.85.0 (libcurl4 7.85.0-1) installed
+ EOO
+
+
+ : one-part-installed
+ :
+ ln -s ../libcurl.manifest ./;
+ cat <<EOI >=libcurl4+libcurl4-openssl-dev.policy;
+ libcurl4:
+ Installed: 7.85.0-1
+ Candidate: 7.87.0-2
+ Version table:
+ 7.87.0-2 500
+ 500 http://deb.debian.org/debian bookworm/main amd64 Packages
+ *** 7.85.0-1 100
+ 100 /var/lib/dpkg/status
+ libcurl4-openssl-dev:
+ Installed: (none)
+ Candidate: 7.87.0-2
+ Version table:
+ 7.87.0-2 500
+ 500 http://deb.debian.org/debian bookworm/main amd64 Packages
+ EOI
+ cat <<EOI >=libcurl3-gnutls+libcurl4-gnutls-dev.policy;
+ libcurl3-gnutls:
+ Installed: (none)
+ Candidate: 7.87.0-2
+ Version table:
+ 7.87.0-2 500
+ 500 http://deb.debian.org/debian bookworm/main amd64 Packages
+ libcurl4-gnutls-dev:
+ Installed: (none)
+ Candidate: 7.87.0-2
+ Version table:
+ 7.87.0-2 500
+ 500 http://deb.debian.org/debian bookworm/main amd64 Packages
+ EOI
+ cat <<EOI >=libcurl4.policy-installed;
+ libcurl4:
+ Installed: 7.87.0-2
+ Candidate: 7.87.0-2
+ Version table:
+ *** 7.87.0-2 500
+ 500 http://deb.debian.org/debian bookworm/main amd64 Packages
+ 100 /var/lib/dpkg/status
+ EOI
+ $* libcurl --install libcurl <<EOI 2>>EOE >>EOO
+ manifest: libcurl libcurl.manifest
+
+ apt-cache-policy: libcurl4 libcurl4-openssl-dev libcurl4+libcurl4-openssl-dev.policy
+ apt-cache-policy: libcurl3-gnutls libcurl4-gnutls-dev libcurl3-gnutls+libcurl4-gnutls-dev.policy
+ apt-cache-policy-installed: libcurl4 libcurl4.policy-installed
+ EOI
+ LC_ALL=C apt-cache policy --quiet libcurl4 libcurl4-openssl-dev <libcurl4+libcurl4-openssl-dev.policy
+ LC_ALL=C apt-cache policy --quiet libcurl3-gnutls libcurl4-gnutls-dev <libcurl3-gnutls+libcurl4-gnutls-dev.policy
+ sudo apt-get update --quiet --assume-no
+ LC_ALL=C apt-cache policy --quiet libcurl4 libcurl4-openssl-dev <libcurl4+libcurl4-openssl-dev.policy
+ LC_ALL=C apt-cache policy --quiet libcurl3-gnutls libcurl4-gnutls-dev <libcurl3-gnutls+libcurl4-gnutls-dev.policy
+ sudo apt-get install --quiet --assume-no libcurl4 libcurl4-openssl-dev
+ LC_ALL=C apt-cache policy --quiet libcurl4 <libcurl4.policy-installed
+ EOE
+ libcurl 7.87.0 (libcurl4 7.87.0-2) part installed
+ EOO
+
+
+ : none-installed
+ :
+ ln -s ../libcurl.manifest ./;
+ cat <<EOI >=libcurl4+libcurl4-openssl-dev.policy;
+ libcurl4:
+ Installed: (none)
+ Candidate: 7.87.0-2
+ Version table:
+ 7.87.0-2 500
+ 500 http://deb.debian.org/debian bookworm/main amd64 Packages
+ libcurl4-openssl-dev:
+ Installed: (none)
+ Candidate: 7.87.0-2
+ Version table:
+ 7.87.0-2 500
+ 500 http://deb.debian.org/debian bookworm/main amd64 Packages
+ EOI
+ cat <<EOI >=libcurl3-gnutls+libcurl4-gnutls-dev.policy;
+ libcurl3-gnutls:
+ Installed: (none)
+ Candidate: 7.87.0-2
+ Version table:
+ 7.87.0-2 500
+ 500 http://deb.debian.org/debian bookworm/main amd64 Packages
+ libcurl4-gnutls-dev:
+ Installed: (none)
+ Candidate: 7.87.0-2
+ Version table:
+ 7.87.0-2 500
+ 500 http://deb.debian.org/debian bookworm/main amd64 Packages
+ EOI
+ $* libcurl --install libcurl <<EOI 2>>EOE != 0
+ manifest: libcurl libcurl.manifest
+
+ apt-cache-policy: libcurl4 libcurl4-openssl-dev libcurl4+libcurl4-openssl-dev.policy
+ apt-cache-policy: libcurl3-gnutls libcurl4-gnutls-dev libcurl3-gnutls+libcurl4-gnutls-dev.policy
+ EOI
+ LC_ALL=C apt-cache policy --quiet libcurl4 libcurl4-openssl-dev <libcurl4+libcurl4-openssl-dev.policy
+ LC_ALL=C apt-cache policy --quiet libcurl3-gnutls libcurl4-gnutls-dev <libcurl3-gnutls+libcurl4-gnutls-dev.policy
+ sudo apt-get update --quiet --assume-no
+ LC_ALL=C apt-cache policy --quiet libcurl4 libcurl4-openssl-dev <libcurl4+libcurl4-openssl-dev.policy
+ LC_ALL=C apt-cache policy --quiet libcurl3-gnutls libcurl4-gnutls-dev <libcurl3-gnutls+libcurl4-gnutls-dev.policy
+ error: multiple available debian packages for libcurl
+ info: candidate: libcurl4 7.87.0-2
+ info: candidate: libcurl3-gnutls 7.87.0-2
+ info: consider installing the desired package manually and retrying the bpkg command
+ EOE
+
+
+ : both-part-installed
+ :
+ ln -s ../libcurl.manifest ./;
+ cat <<EOI >=libcurl4+libcurl4-openssl-dev.policy;
+ libcurl4:
+ Installed: 7.85.0-1
+ Candidate: 7.87.0-2
+ Version table:
+ 7.87.0-2 500
+ 500 http://deb.debian.org/debian bookworm/main amd64 Packages
+ *** 7.85.0-1 100
+ 100 /var/lib/dpkg/status
+ libcurl4-openssl-dev:
+ Installed: (none)
+ Candidate: 7.87.0-2
+ Version table:
+ 7.87.0-2 500
+ 500 http://deb.debian.org/debian bookworm/main amd64 Packages
+ EOI
+ cat <<EOI >=libcurl3-gnutls+libcurl4-gnutls-dev.policy;
+ libcurl3-gnutls:
+ Installed: 7.85.0-1
+ Candidate: 7.87.0-2
+ Version table:
+ 7.87.0-2 500
+ 500 http://deb.debian.org/debian bookworm/main amd64 Packages
+ *** 7.85.0-1 100
+ 100 /var/lib/dpkg/status
+ libcurl4-gnutls-dev:
+ Installed: (none)
+ Candidate: 7.87.0-2
+ Version table:
+ 7.87.0-2 500
+ 500 http://deb.debian.org/debian bookworm/main amd64 Packages
+ EOI
+ $* libcurl --install libcurl <<EOI 2>>EOE != 0
+ manifest: libcurl libcurl.manifest
+
+ apt-cache-policy: libcurl4 libcurl4-openssl-dev libcurl4+libcurl4-openssl-dev.policy
+ apt-cache-policy: libcurl3-gnutls libcurl4-gnutls-dev libcurl3-gnutls+libcurl4-gnutls-dev.policy
+ EOI
+ LC_ALL=C apt-cache policy --quiet libcurl4 libcurl4-openssl-dev <libcurl4+libcurl4-openssl-dev.policy
+ LC_ALL=C apt-cache policy --quiet libcurl3-gnutls libcurl4-gnutls-dev <libcurl3-gnutls+libcurl4-gnutls-dev.policy
+ sudo apt-get update --quiet --assume-no
+ LC_ALL=C apt-cache policy --quiet libcurl4 libcurl4-openssl-dev <libcurl4+libcurl4-openssl-dev.policy
+ LC_ALL=C apt-cache policy --quiet libcurl3-gnutls libcurl4-gnutls-dev <libcurl3-gnutls+libcurl4-gnutls-dev.policy
+ error: multiple partially installed debian packages for libcurl
+ info: candidate: libcurl4 7.87.0-2, missing components: libcurl4-openssl-dev
+ info: candidate: libcurl3-gnutls 7.87.0-2, missing components: libcurl4-gnutls-dev
+ info: consider fully installing the desired package manually and retrying the bpkg command
+ EOE
+ }
+}
diff --git a/bpkg/system-package-manager-fedora.cxx b/bpkg/system-package-manager-fedora.cxx
new file mode 100644
index 0000000..3b79c50
--- /dev/null
+++ b/bpkg/system-package-manager-fedora.cxx
@@ -0,0 +1,4560 @@
+// file : bpkg/system-package-manager-fedora.cxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#include <bpkg/system-package-manager-fedora.hxx>
+
+#include <locale>
+
+#include <bpkg/diagnostics.hxx>
+
+#include <bpkg/pkg-bindist-options.hxx>
+
+using namespace butl;
+
+namespace bpkg
+{
+ using package_status = system_package_status_fedora;
+
+ // Translate host CPU to Fedora package architecture.
+ //
+ string system_package_manager_fedora::
+ arch_from_target (const target_triplet& h)
+ {
+ const string& c (h.cpu);
+ return
+ c == "i386" || c == "i486" || c == "i586" || c == "i686" ? "i686" :
+ c;
+ }
+
+ // Parse the fedora-name (or alike) value. The first argument is the package
+ // type.
+ //
+ // Note that for now we treat all the packages from the non-main groups as
+ // extras omitting the -common package (assuming it's pulled by the main
+ // package) as well as -doc and -debug* unless requested with the
+ // extra_{doc,debug*} arguments. Note that we treat -static as -devel (since
+ // we can't know whether the static library is needed or not).
+ //
+ package_status system_package_manager_fedora::
+ parse_name_value (const string& pt,
+ const string& nv,
+ bool extra_doc,
+ bool extra_debuginfo,
+ bool extra_debugsource)
+ {
+ auto split = [] (const string& s, char d) -> strings
+ {
+ strings r;
+ for (size_t b (0), e (0); next_word (s, b, e, d); )
+ r.push_back (string (s, b, e - b));
+ return r;
+ };
+
+ auto suffix = [] (const string& n, const string& s) -> bool
+ {
+ size_t nn (n.size ());
+ size_t sn (s.size ());
+ return nn > sn && n.compare (nn - sn, sn, s) == 0;
+ };
+
+ auto parse_group = [&split, &suffix] (const string& g, const string* pt)
+ {
+ strings ns (split (g, ' '));
+
+ if (ns.empty ())
+ fail << "empty package group";
+
+ package_status r;
+
+ // Handle the "devel instead of main" special case for libraries.
+ //
+ // Check that the following name does not end with -devel. This will be
+ // the only way to disambiguate the case where the library name happens
+ // to end with -devel (e.g., libfoo-devel libfoo-devel-devel).
+ //
+ {
+ string& m (ns[0]);
+
+ if (pt != nullptr &&
+ *pt == "lib" &&
+ suffix (m, "-devel") &&
+ !(ns.size () > 1 && suffix (ns[1], "-devel")))
+ {
+ r = package_status ("", move (m));
+ }
+ else
+ r = package_status (move (m));
+ }
+
+ // Handle the rest.
+ //
+ for (size_t i (1); i != ns.size (); ++i)
+ {
+ string& n (ns[i]);
+
+ const char* w;
+ if (string* v = (suffix (n, (w = "-devel")) ? &r.devel :
+ suffix (n, (w = "-static")) ? &r.static_ :
+ suffix (n, (w = "-doc")) ? &r.doc :
+ suffix (n, (w = "-debuginfo")) ? &r.debuginfo :
+ suffix (n, (w = "-debugsource")) ? &r.debugsource :
+ suffix (n, (w = "-common")) ? &r.common :
+ nullptr))
+ {
+ if (!v->empty ())
+ fail << "multiple " << w << " package names in '" << g << "'" <<
+ info << "did you forget to separate package groups with comma?";
+
+ *v = move (n);
+ }
+ else
+ r.extras.push_back (move (n));
+ }
+
+ return r;
+ };
+
+ strings gs (split (nv, ','));
+ assert (!gs.empty ()); // *-name value cannot be empty.
+
+ package_status r;
+ for (size_t i (0); i != gs.size (); ++i)
+ {
+ if (i == 0) // Main group.
+ r = parse_group (gs[i], &pt);
+ else
+ {
+ package_status g (parse_group (gs[i], nullptr));
+
+ if (!g.main.empty ()) r.extras.push_back (move (g.main));
+ if (!g.devel.empty ()) r.extras.push_back (move (g.devel));
+ if (!g.static_.empty ()) r.extras.push_back (move (g.static_));
+ if (!g.doc.empty () && extra_doc) r.extras.push_back (move (g.doc));
+
+ if (!g.debuginfo.empty () && extra_debuginfo)
+ r.extras.push_back (move (g.debuginfo));
+
+ if (!g.debugsource.empty () && extra_debugsource)
+ r.extras.push_back (move (g.debugsource));
+
+ if (!g.common.empty () && false) r.extras.push_back (move (g.common));
+ if (!g.extras.empty ()) r.extras.insert (
+ r.extras.end (),
+ make_move_iterator (g.extras.begin ()),
+ make_move_iterator (g.extras.end ()));
+ }
+ }
+
+ return r;
+ }
+
+ // Attempt to determine the main package name from its -devel package based
+ // on the extracted (by dnf_repoquery_requires()) dependencies, passed as a
+ // list of the package name/version pairs. Return empty string if unable to.
+ //
+ string system_package_manager_fedora::
+ main_from_devel (const string& devel_name,
+ const string& devel_ver,
+ const vector<pair<string, string>>& depends)
+ {
+ // For the main package we first look for a dependency with the
+ // <devel-stem>-libs name and the devel_ver version. Failed that, we try
+ // just <devel-stem>.
+ //
+ // Note that the order is important since for a mixed package we need to
+ // end up with the -libs sub-package rather than with the base package as,
+ // for example, in the following case:
+ //
+ // sqlite-devel 3.36.0-3.fc35 ->
+ // sqlite 3.36.0-3.fc35
+ // sqlite-libs 3.36.0-3.fc35
+ //
+ string devel_stem (devel_name, 0, devel_name.rfind ("-devel"));
+
+ auto find = [&devel_ver, &depends] (const string& n)
+ {
+ auto i (find_if (depends.begin (), depends.end (),
+ [&n, &devel_ver] (const pair<string, string>& d)
+ {
+ return d.first == n && d.second == devel_ver;
+ }));
+
+ return i != depends.end () ? i->first : string ();
+ };
+
+ string r (find (devel_stem + "-libs"));
+ return !r.empty () ? r : find (devel_stem);
+ }
+
+ static process_path dnf_path;
+ static process_path sudo_path;
+
+ // Obtain the installed and candidate versions for the specified list of
+ // Fedora packages by executing `dnf list`.
+ //
+ // If the n argument is not 0, then only query the first n packages.
+ //
+ void system_package_manager_fedora::
+ dnf_list (vector<package_info>& pis, size_t n)
+ {
+ if (n == 0)
+ n = pis.size ();
+
+ assert (n != 0 && n <= pis.size ());
+
+ // The --quiet option makes sure we don't get 'Last metadata expiration
+ // check: <timestamp>' printed to stderr. It does not appear to affect
+ // error diagnostics (try specifying a single unknown package).
+ //
+ cstrings args {
+ "dnf", "list",
+ "--all", // Look for both installed and available.
+ "--cacheonly", // Don't automatically update the metadata.
+ "--quiet"};
+
+ for (size_t i (0); i != n; ++i)
+ {
+ package_info& pi (pis[i]);
+
+ string& n (pi.name);
+ assert (!n.empty ());
+
+ pi.installed_version.clear ();
+ pi.candidate_version.clear ();
+
+ pi.installed_arch.clear ();
+ pi.candidate_arch.clear ();
+
+ args.push_back (n.c_str ());
+ }
+
+ // Note that `dnf list` fails if there are no matching packages to print.
+ // Thus, let's hack around this by adding the rpm package to the list, so
+ // that at least one package is always present and the command can never
+ // fail for that reason.
+ //
+ // Also note that we still allow the rpm package to appear in the
+ // specified package list.
+ //
+ bool rpm (false);
+ args.push_back ("rpm");
+
+ args.push_back (nullptr);
+
+ // Run with the C locale to make sure there is no localization.
+ //
+ const char* evars[] = {"LC_ALL=C", nullptr};
+
+ try
+ {
+ if (dnf_path.empty () && !simulate_)
+ dnf_path = process::path_search (args[0], false /* init */);
+
+ process_env pe (dnf_path, evars);
+
+ if (verb >= 3)
+ print_process (pe, args);
+
+ // Redirect stdout to a pipe. For good measure also redirect stdin to
+ // /dev/null to make sure there are no prompts of any kind.
+ //
+ process pr;
+ if (!simulate_)
+ pr = process (dnf_path,
+ args,
+ -2 /* stdin */,
+ -1 /* stdout */,
+ 2 /* stderr */,
+ nullptr /* cwd */,
+ evars);
+ else
+ {
+ strings k;
+ for (size_t i (0); i != n; ++i)
+ k.push_back (pis[i].name);
+
+ const path* f (nullptr);
+ if (installed_)
+ {
+ auto i (simulate_->dnf_list_installed_.find (k));
+ if (i != simulate_->dnf_list_installed_.end ())
+ f = &i->second;
+ }
+ if (f == nullptr && fetched_)
+ {
+ auto i (simulate_->dnf_list_fetched_.find (k));
+ if (i != simulate_->dnf_list_fetched_.end ())
+ f = &i->second;
+ }
+ if (f == nullptr)
+ {
+ auto i (simulate_->dnf_list_.find (k));
+ if (i != simulate_->dnf_list_.end ())
+ f = &i->second;
+ }
+
+ diag_record dr (text);
+ print_process (dr, pe, args);
+ dr << " <" << (f == nullptr || f->empty () ? "/dev/null" : f->string ());
+
+ pr = process (process_exit (0));
+ pr.in_ofd = f == nullptr || f->empty ()
+ ? fdopen_null ()
+ : (f->string () == "-"
+ ? fddup (stdin_fd ())
+ : fdopen (*f, fdopen_mode::in));
+ }
+
+ try
+ {
+ ifdstream is (move (pr.in_ofd), fdstream_mode::skip, ifdstream::badbit);
+
+ // The output of `dnf list <pkg1> <pkg2> ...` is the 2 groups of lines
+ // in the following form:
+ //
+ // Installed Packages
+ // <pkg1>.<arch1> 13.0.0-3.fc35 @<repo1>
+ // <pkg2>.<arch2> 69.1-6.fc35 @<repo2>
+ // Available Packages
+ // <pkg1>.<arch1> 13.0.1-1.fc35 <repo1>
+ // <pkg3>.<arch3> 1.2.11-32.fc35 <repo3>
+ //
+ // Where unknown packages are omitted. The lines order does not
+ // necessarily match the order of the packages on the command line.
+ // It looks like there should be not blank lines but who really knows.
+ //
+ // Note also that if a package appears in the 'Installed Packages'
+ // group, then it only appears in the 'Available Packages' if the
+ // candidate version is better. Only the single (best) available
+ // version is listed, which we call the candidate version.
+ //
+ {
+ auto df = make_diag_frame (
+ [&pe, &args] (diag_record& dr)
+ {
+ dr << info << "while parsing output of ";
+ print_process (dr, pe, args);
+ });
+
+ // Keep track of whether we are inside of the 'Installed Packages'
+ // or 'Available Packages' sections.
+ //
+ optional<bool> installed;
+
+ for (string l; !eof (getline (is, l)); )
+ {
+ if (l == "Installed Packages")
+ {
+ if (installed)
+ fail << "unexpected line '" << l << "'";
+
+ installed = true;
+ continue;
+ }
+
+ if (l == "Available Packages")
+ {
+ if (installed && !*installed)
+ fail << "duplicate line '" << l << "'";
+
+ installed = false;
+ continue;
+ }
+
+ if (!installed)
+ fail << "unexpected line '" << l << "'";
+
+ // Parse the package name.
+ //
+ size_t e (l.find (' '));
+
+ if (l.empty () || e == 0)
+ fail << "expected package name in '" << l << "'";
+
+ if (e == string::npos)
+ fail << "expected package version in '" << l << "'";
+
+ string p (l, 0, e);
+
+ // Parse the package version.
+ //
+ size_t b (l.find_first_not_of (' ', e + 1));
+
+ if (b == string::npos)
+ fail << "expected package version in '" << l << "'";
+
+ // It doesn't not seem that the repository id can be absent. Even
+ // if the package is installed manually it is assumed to come from
+ // some special repository (@commandline, etc). For example:
+ //
+ // # dnf install ./libsigc++30-3.0.7-2.fc35.x86_64.rpm
+ // # rpm -i ./libsigc++30-devel-3.0.7-2.fc35.x86_64.rpm
+ // # dnf list --quiet libsigc++30.x86_64 libsigc++30-devel.x86_64
+ // Installed Packages
+ // libsigc++30.x86_64 3.0.7-2.fc35 @@commandline
+ // libsigc++30-devel.x86_64 3.0.7-2.fc35 @@System
+ //
+ // Thus, we assume that the version is always followed with the
+ // space character.
+ //
+ e = l.find (' ', b + 1);
+
+ if (e == string::npos)
+ fail << "expected package repository in '" << l << "'";
+
+ string v (l, b, e - b);
+
+ // While we don't really care about the rest of the line, let's
+ // verify that it contains a repository id, for good measure.
+ //
+ b = l.find_first_not_of (' ', e + 1);
+
+ if (b == string::npos)
+ fail << "expected package repository in '" << l << "'";
+
+ // Separate the architecture from the package name.
+ //
+ e = p.rfind ('.');
+
+ if (e == string::npos || e == 0 || e == p.size () - 1)
+ fail << "can't extract architecture for package " << p
+ << " in '" << l << "'";
+
+ string a (p, e + 1);
+
+ // Skip the package if its architecture differs from the host
+ // architecture.
+ //
+ if (a != arch && a != "noarch")
+ continue;
+
+ p.resize (e);
+
+ if (p == "rpm")
+ rpm = true;
+
+ // Find the package info to update.
+ //
+ auto i (find_if (pis.begin (), pis.end (),
+ [&p] (const package_info& pi)
+ {return pi.name == p;}));
+
+ if (i == pis.end ())
+ {
+ // Skip the special rpm package which may not be present in the
+ // list.
+ //
+ if (p == "rpm")
+ continue;
+
+ fail << "unexpected package " << p << '.' << a << ' ' << v
+ << " in '" << l << "'";
+ }
+
+ string& ver (*installed
+ ? i->installed_version
+ : i->candidate_version);
+
+ if (!ver.empty ())
+ fail << "multiple " << (*installed ? "installed " : "available ")
+ << "versions of package " << p << '.' << a <<
+ info << "version: " << ver <<
+ info << "version: " << v;
+
+ ver = move (v);
+
+ (*installed ? i->installed_arch : i->candidate_arch) = move (a);
+ }
+ }
+
+ is.close ();
+ }
+ catch (const io_error& e)
+ {
+ if (pr.wait ())
+ fail << "unable to read " << args[0] << " list output: " << e;
+
+ // Fall through.
+ }
+
+ if (!pr.wait ())
+ {
+ diag_record dr (fail);
+ dr << args[0] << " list exited with non-zero code";
+
+ if (verb < 3)
+ {
+ dr << info << "command line: ";
+ print_process (dr, pe, args);
+ }
+ }
+ }
+ catch (const process_error& e)
+ {
+ error << "unable to execute " << args[0] << ": " << e;
+
+ if (e.child)
+ exit (1);
+
+ throw failed ();
+ }
+
+ if (!rpm)
+ fail << "rpm package doesn't exist";
+
+ // Note that if a Fedora package is installed but the repository doesn't
+ // contain a better version, then this package won't appear in the
+ // 'Available Packages' section of the `dnf list` output and thus the
+ // candidate_version will stay empty. Let's set it to the installed
+ // version in this case to be consistent with the Debian's semantics and
+ // keep the Fedora and Debian system package manager implementations
+ // aligned.
+ //
+ for (size_t i (0); i != n; ++i)
+ {
+ package_info& pi (pis[i]);
+
+ if (pi.candidate_version.empty () && !pi.installed_version.empty ())
+ {
+ pi.candidate_version = pi.installed_version;
+ pi.candidate_arch = pi.installed_arch;
+ }
+ }
+ }
+
+ // Execute `dnf repoquery --requires` for the specified
+ // package/version/architecture and return its dependencies as a list of the
+ // name/version pairs.
+ //
+ // It is expected that the specified package/version/architecture is known
+ // (e.g., returned by the `dnf list` command). Note that if that's not the
+ // case (can happen due to a race), then an empty list is returned. This,
+ // however, is ok for our current usage since in this case we will shortly
+ // fail with the 'unable to guess main package' error anyway.
+ //
+ // Note that the returned dependencies are always of the host architecture
+ // or noarch. For example:
+ //
+ // dhcp-client-12:4.4.3-4.P1.fc35.x86_64 ->
+ // dhcp-common-12:4.4.3-4.P1.fc35.noarch
+ // coreutils-8.32-36.fc35.x86_64
+ // ...
+ //
+ // rust-uuid+std-devel-1.2.1-1.fc35.noarch ->
+ // rust-uuid-devel-1.2.1-1.fc35.noarch
+ // cargo-1.65.0-1.fc35.x86_64
+ //
+ vector<pair<string, string>> system_package_manager_fedora::
+ dnf_repoquery_requires (const string& name,
+ const string& ver,
+ const string& qarch,
+ bool installed)
+ {
+ assert (!name.empty () && !ver.empty () && !arch.empty ());
+
+ // Qualify the package with the architecture suffix.
+ //
+ // Note that for reasons unknown, the below command may still print some
+ // dependencies with different architecture (see the below example). It
+ // feels sensible to just skip them.
+ //
+ string spec (name + '-' + ver + '.' + qarch);
+
+ // The --quiet option makes sure we don't get 'Last metadata expiration
+ // check: <timestamp>' printed to stderr. It does not appear to affect
+ // error diagnostics (try specifying an unknown option).
+ //
+ cstrings args {
+ "dnf", "repoquery", "--requires",
+ "--quiet",
+ "--cacheonly", // Don't automatically update the metadata.
+ "--resolve", // Resolve requirements to packages/versions.
+ "--qf", "%{name} %{arch} %{epoch}:%{version}-%{release}"};
+
+ // Note that installed packages which are not available from configured
+ // repositories (e.g. packages installed from local rpm files or temporary
+ // local repositories, package versions not available anymore from their
+ // original repositories, etc) are not seen by `dnf repoquery` by
+ // default. It also turned out that the --installed option not only limits
+ // the resulting set to the installed packages, but also makes `dnf
+ // repoquery` to see all the installed packages, including the unavailable
+ // ones. Thus, we always add this option to query dependencies of the
+ // installed packages.
+ //
+ if (installed)
+ {
+ args.push_back ("--installed");
+
+ // dnf(8) also recommends to use --disableexcludes together with
+ // --install to make sure that all installed packages will be listed and
+ // no configuration file may influence the result.
+ //
+ args.push_back ("--disableexcludes=all");
+ }
+
+ args.push_back (spec.c_str ());
+ args.push_back (nullptr);
+
+ // Note that for this command there seems to be no need to run with the C
+ // locale since the output is presumably not localizable. But let's do it
+ // for good measure.
+ //
+ const char* evars[] = {"LC_ALL=C", nullptr};
+
+ vector<pair<string, string>> r;
+ try
+ {
+ if (dnf_path.empty () && !simulate_)
+ dnf_path = process::path_search (args[0], false /* init */);
+
+ process_env pe (dnf_path, evars);
+
+ if (verb >= 3)
+ print_process (pe, args);
+
+ // Redirect stdout to a pipe. For good measure also redirect stdin to
+ // /dev/null to make sure there are no prompts of any kind.
+ //
+ process pr;
+ if (!simulate_)
+ pr = process (dnf_path,
+ args,
+ -2 /* stdin */,
+ -1 /* stdout */,
+ 2 /* stderr */,
+ nullptr /* cwd */,
+ evars);
+ else
+ {
+ simulation::package k {name, ver, qarch, installed};
+
+ const path* f (nullptr);
+ if (fetched_)
+ {
+ auto i (simulate_->dnf_repoquery_requires_fetched_.find (k));
+ if (i != simulate_->dnf_repoquery_requires_fetched_.end ())
+ f = &i->second;
+ }
+ if (f == nullptr)
+ {
+ auto i (simulate_->dnf_repoquery_requires_.find (k));
+ if (i != simulate_->dnf_repoquery_requires_.end ())
+ f = &i->second;
+ }
+
+ diag_record dr (text);
+ print_process (dr, pe, args);
+ dr << " <" << (f == nullptr || f->empty () ? "/dev/null" : f->string ());
+
+ pr = process (process_exit (0));
+ pr.in_ofd = f == nullptr || f->empty ()
+ ? fdopen_null ()
+ : (f->string () == "-"
+ ? fddup (stdin_fd ())
+ : fdopen (*f, fdopen_mode::in));
+ }
+
+ try
+ {
+ ifdstream is (move (pr.in_ofd), fdstream_mode::skip, ifdstream::badbit);
+
+ // The output of the command will be the sequence of the package lines
+ // in the `<name> <arc> <version>` form (per the -qf option above). So
+ // for example for the libicu-devel-69.1-6.fc35.x86_64 package it is
+ // as follows:
+ //
+ // bash i686 0:5.1.8-3.fc35
+ // bash x86_64 0:5.1.8-3.fc35
+ // glibc i686 0:2.34-49.fc35
+ // glibc x86_64 0:2.34-49.fc35
+ // libicu x86_64 0:69.1-6.fc35
+ // libicu-devel i686 0:69.1-6.fc35
+ // libicu-devel x86_64 0:69.1-6.fc35
+ // pkgconf-pkg-config i686 0:1.8.0-1.fc35
+ // pkgconf-pkg-config x86_64 0:1.8.0-1.fc35
+ //
+ // Note that there is also a self-dependency.
+ //
+ for (string l; !eof (getline (is, l)); )
+ {
+ // Parse the package name.
+ //
+ size_t e (l.find (' '));
+
+ if (l.empty () || e == 0)
+ fail << "expected package name in '" << l << "'";
+
+ if (e == string::npos)
+ fail << "expected package architecture in '" << l << "'";
+
+ string p (l, 0, e);
+
+ // Parse the package architecture.
+ //
+ size_t b (e + 1);
+ e = l.find (' ', b);
+
+ if (e == string::npos)
+ fail << "expected package version in '" << l << "'";
+
+ string a (l, b, e - b);
+ if (a.empty ())
+ fail << "expected package architecture in '" << l << "'";
+
+ // Parse the package version.
+ //
+ string v (l, e + 1);
+
+ // Strip the '0:' epoch from the package version to align with
+ // versions retrieved by other functions (dnf_list(), etc).
+ //
+ e = v.find (':');
+ if (e == string::npos || e == 0)
+ fail << "no epoch for package version in '" << l << "'";
+
+ if (e == 1 && v[0] == '0')
+ v.erase (0, 2);
+
+ // Skip a potential self-dependency and dependencies of a different
+ // architecture.
+ //
+ if (p == name || (a != arch && a != "noarch"))
+ continue;
+
+ r.emplace_back (move (p), move (v));
+ }
+
+ is.close ();
+ }
+ catch (const io_error& e)
+ {
+ if (pr.wait ())
+ fail << "unable to read " << args[0] << " repoquery --requires "
+ << "output: " << e;
+
+ // Fall through.
+ }
+
+ if (!pr.wait ())
+ {
+ diag_record dr (fail);
+ dr << args[0] << " repoquery --requires exited with non-zero code";
+
+ if (verb < 3)
+ {
+ dr << info << "command line: ";
+ print_process (dr, pe, args);
+ }
+ }
+ }
+ catch (const process_error& e)
+ {
+ error << "unable to execute " << args[0] << ": " << e;
+
+ if (e.child)
+ exit (1);
+
+ throw failed ();
+ }
+
+ return r;
+ }
+
+ // Prepare the common options for commands which update the system.
+ //
+ pair<cstrings, const process_path&> system_package_manager_fedora::
+ dnf_common (const char* command,
+ optional<size_t> fetch_timeout,
+ strings& args_storage)
+ {
+ // Pre-allocate the required number of entries in the arguments storage.
+ //
+ if (fetch_timeout)
+ args_storage.reserve (1);
+
+ cstrings args;
+
+ if (!sudo_.empty ())
+ args.push_back (sudo_.c_str ());
+
+ args.push_back ("dnf");
+ args.push_back (command);
+
+ // Map our verbosity/progress to dnf --quiet and --verbose options.
+ //
+ // Note that all the diagnostics, including the progress indication and
+ // general information (like what's being installed) but excluding error
+ // messages, is printed to stdout. So we fix this by redirecting stdout to
+ // stderr. By default the progress bar for network transfers is printed,
+ // unless stdout is not a terminal. The --quiet option disables printing
+ // the plan and all the progress indication, but not the confirmation
+ // prompt nor error messages.
+ //
+ if (progress_ && *progress_)
+ {
+ // Print the progress bar by default, unless this is not a terminal
+ // (there is no way to force it).
+ }
+ else if (verb == 0 || (progress_ && !*progress_))
+ {
+ args.push_back ("--quiet");
+ }
+
+ if (yes_)
+ {
+ args.push_back ("--assumeyes");
+ }
+ else if (!stderr_term)
+ {
+ // Suppress any prompts if stderr is not a terminal for good measure.
+ //
+ args.push_back ("--assumeno");
+ }
+
+ // Add the network operations timeout configuration options, if requested.
+ //
+ if (fetch_timeout)
+ {
+ args_storage.push_back (
+ "--setopt=timeout=" + to_string (*fetch_timeout));
+
+ args.push_back (args_storage.back ().c_str ());
+ args.push_back ("--setopt=minrate=0");
+ }
+
+ try
+ {
+ const process_path* pp (nullptr);
+
+ if (!sudo_.empty ())
+ {
+ if (sudo_path.empty () && !simulate_)
+ sudo_path = process::path_search (args[0], false /* init */);
+
+ pp = &sudo_path;
+ }
+ else
+ {
+ if (dnf_path.empty () && !simulate_)
+ dnf_path = process::path_search (args[0], false /* init */);
+
+ pp = &dnf_path;
+ }
+
+ return pair<cstrings, const process_path&> (move (args), *pp);
+ }
+ catch (const process_error& e)
+ {
+ fail << "unable to execute " << args[0] << ": " << e << endf;
+ }
+ }
+
+ // Execute `dnf makecache` to download and cache the repositories metadata.
+ //
+ void system_package_manager_fedora::
+ dnf_makecache ()
+ {
+ strings args_storage;
+ pair<cstrings, const process_path&> args_pp (
+ dnf_common ("makecache", fetch_timeout_, args_storage));
+
+ cstrings& args (args_pp.first);
+ const process_path& pp (args_pp.second);
+
+ args.push_back ("--refresh");
+ args.push_back (nullptr);
+
+ try
+ {
+ if (verb >= 2)
+ print_process (args);
+ else if (verb == 1)
+ text << "updating " << os_release.name_id << " repositories metadata...";
+
+ process pr;
+ if (!simulate_)
+ {
+ // Redirect stdout to stderr.
+ //
+ pr = process (pp, args, 0 /* stdin */, 2 /* stdout */);
+ }
+ else
+ {
+ print_process (args);
+ pr = process (process_exit (simulate_->dnf_makecache_fail_ ? 1 : 0));
+ }
+
+ if (!pr.wait ())
+ {
+ diag_record dr (fail);
+ dr << "dnf makecache exited with non-zero code";
+
+ if (verb < 2)
+ {
+ dr << info << "command line: ";
+ print_process (dr, args);
+ }
+ }
+
+ if (verb == 1)
+ text << "updated " << os_release.name_id << " repositories metadata";
+ }
+ catch (const process_error& e)
+ {
+ error << "unable to execute " << args[0] << ": " << e;
+
+ if (e.child)
+ exit (1);
+
+ throw failed ();
+ }
+ }
+
+ // Execute `dnf install` to install the specified packages (e.g.,
+ // libfoo.x86_64 or libfoo-1.2.3-1.fc35.x86_64).
+ //
+ // Note that the package name can only contain alpha-numeric characters,
+ // '-', '.', '_', and '+' (see Guidelines for Naming Fedora Packages for
+ // details). If specified, both the version (1.2.3) and release (1.fc35)
+ // parts are mandatory and may only contain alpha-numeric characters, `.`,
+ // `_`, `+`, `~`, and `^` (see the RPM spec file format documentation for
+ // details). Thus, package specs (which are actually wildcards) are
+ // generally ambiguous, so that libfoo-1.2.3-1.fc35.x86_64 may theoretically
+ // be a package name and libfoo-bar a specific package version.
+ //
+ // By default, `dnf install` tries to interpret the spec as the
+ // <name>-[<epoch>:]<version>-<release>.<arch> form prior to trying the
+ // <name>.<arch> form until any matched packages are found (see SPECIFYING
+ // PACKAGES section of dnf(8) for more details on the spec matching
+ // rules). We could potentially use `dnf install-nevra` command for the
+ // package version specs and `dnf install-na` for the package name specs.
+ // Let's, however, keep it simple for now given that clashes for our
+ // use-case are presumably not very likely.
+ //
+ void system_package_manager_fedora::
+ dnf_install (const strings& pkgs)
+ {
+ assert (!pkgs.empty ());
+
+ strings args_storage;
+ pair<cstrings, const process_path&> args_pp (
+ dnf_common ("install", fetch_timeout_, args_storage));
+
+ cstrings& args (args_pp.first);
+ const process_path& pp (args_pp.second);
+
+ // Note that we can't use --cacheonly here to prevent the metadata update,
+ // since the install command then expects the package RPM files to also be
+ // cached and fails if that's not the case. Thus we have to override the
+ // metadata_expire=never configuration option instead. Which makes the
+ // whole thing quite hairy and of dubious value -- there is nothing wrong
+ // with letting it re-fetch the metadata during install (which in fact may
+ // save us from attempting to download no longer existing packages).
+ //
+#if 0
+ args.push_back ("--setopt=metadata_expire=never");
+#endif
+
+ for (const string& p: pkgs)
+ args.push_back (p.c_str ());
+
+ args.push_back (nullptr);
+
+ try
+ {
+ if (verb >= 2)
+ print_process (args);
+ else if (verb == 1)
+ text << "installing " << os_release.name_id << " packages...";
+
+ process pr;
+ if (!simulate_)
+ {
+ // Redirect stdout to stderr.
+ //
+ pr = process (pp, args, 0 /* stdin */, 2 /* stdout */);
+ }
+ else
+ {
+ print_process (args);
+ pr = process (process_exit (simulate_->dnf_install_fail_ ? 100 : 0));
+ }
+
+ if (!pr.wait ())
+ {
+ diag_record dr (fail);
+ dr << "dnf install exited with non-zero code";
+
+ if (verb < 2)
+ {
+ dr << info << "command line: ";
+ print_process (dr, args);
+ }
+
+ dr << info << "consider resolving the issue manually and retrying "
+ << "the bpkg command";
+ }
+
+ if (verb == 1)
+ text << "installed " << os_release.name_id << " packages";
+ }
+ catch (const process_error& e)
+ {
+ error << "unable to execute " << args[0] << ": " << e;
+
+ if (e.child)
+ exit (1);
+
+ throw failed ();
+ }
+ }
+
+ // Execute `dnf mark install` to mark the installed packages as installed by
+ // the user (see dnf_install() for details on the package specs).
+ //
+ // Note that an installed package may be marked as installed by the user
+ // rather than as a dependency. In particular, such a package will never be
+ // automatically removed as an unused dependency. This mark can be added and
+ // removed by the `dnf mark install` and `dnf mark remove` commands,
+ // respectively. Besides that, this mark is automatically added by `dnf
+ // install` for a package specified on the command line, but only if it is
+ // not yet installed. Note that this mark will not be added automatically
+ // for an already installed package even if it is upgraded explicitly. For
+ // example:
+ //
+ // $ sudo dnf install libsigc++30-devel-3.0.2-2.fc32 --repofrompath test,./repo --setopt=gpgcheck=0 --assumeyes
+ // Installed: libsigc++30-3.0.2-2.fc32.x86_64 libsigc++30-devel-3.0.2-2.fc32.x86_64
+ //
+ // $ sudo dnf install --best libsigc++30 --assumeyes
+ // Upgraded: libsigc++30-3.0.7-2.fc35.x86_64 libsigc++30-devel-3.0.7-2.fc35.x86_64
+ //
+ // $ sudo dnf remove libsigc++30-devel --assumeyes
+ // Removed: libsigc++30-3.0.7-2.fc35.x86_64 libsigc++30-devel-3.0.7-2.fc35.x86_64
+ //
+ void system_package_manager_fedora::
+ dnf_mark_install (const strings& pkgs)
+ {
+ assert (!pkgs.empty ());
+
+ strings args_storage;
+ pair<cstrings, const process_path&> args_pp (
+ dnf_common ("mark", nullopt /* fetch_timeout */, args_storage));
+
+ cstrings& args (args_pp.first);
+ const process_path& pp (args_pp.second);
+
+ args.push_back ("install");
+ args.push_back ("--cacheonly");
+
+ for (const string& p: pkgs)
+ args.push_back (p.c_str ());
+
+ args.push_back (nullptr);
+
+ try
+ {
+ if (verb >= 2)
+ print_process (args);
+
+ process pr;
+ if (!simulate_)
+ {
+ // Redirect stdout to stderr.
+ //
+ pr = process (pp, args, 0 /* stdin */, 2 /* stdout */);
+ }
+ else
+ {
+ print_process (args);
+ pr = process (process_exit (simulate_->dnf_mark_install_fail_ ? 1 : 0));
+ }
+
+ if (!pr.wait ())
+ {
+ diag_record dr (fail);
+ dr << "dnf mark install exited with non-zero code";
+
+ if (verb < 2)
+ {
+ dr << info << "command line: ";
+ print_process (dr, args);
+ }
+
+ dr << info << "consider resolving the issue manually and retrying "
+ << "the bpkg command";
+ }
+ }
+ catch (const process_error& e)
+ {
+ error << "unable to execute " << args[0] << ": " << e;
+
+ if (e.child)
+ exit (1);
+
+ throw failed ();
+ }
+ }
+
+ optional<const system_package_status*> system_package_manager_fedora::
+ status (const package_name& pn, const available_packages* aps)
+ {
+ // First check the cache.
+ //
+ {
+ auto i (status_cache_.find (pn));
+
+ if (i != status_cache_.end ())
+ return i->second ? &*i->second : nullptr;
+
+ if (aps == nullptr)
+ return nullopt;
+ }
+
+ optional<package_status> r (status (pn, *aps));
+
+ // Cache.
+ //
+ auto i (status_cache_.emplace (pn, move (r)).first);
+ return i->second ? &*i->second : nullptr;
+ }
+
+ optional<package_status> system_package_manager_fedora::
+ status (const package_name& pn, const available_packages& aps)
+ {
+ tracer trace ("system_package_manager_fedora::status");
+
+ // For now we ignore -doc and -debug* package components (but we may want
+ // to have options controlling this later). Note also that we assume
+ // -common is pulled automatically by the base package so we ignore it as
+ // well (see equivalent logic in parse_name_value()).
+ //
+ bool need_doc (false);
+ bool need_debuginfo (false);
+ bool need_debugsource (false);
+
+ vector<package_status> candidates;
+
+ // Translate our package name to the Fedora package names.
+ //
+ {
+ auto df = make_diag_frame (
+ [this, &pn] (diag_record& dr)
+ {
+ dr << info << "while mapping " << pn << " to " << os_release.name_id
+ << " package name";
+ });
+
+ // Without explicit type, the best we can do in trying to detect whether
+ // this is a library is to check for the lib prefix. Libraries without
+ // the lib prefix and non-libraries with the lib prefix (both of which
+ // we do not recomment) will have to provide a manual mapping (or
+ // explicit type).
+ //
+ // Note that using the first (latest) available package as a source of
+ // type information seems like a reasonable choice.
+ //
+ const string& pt (!aps.empty ()
+ ? aps.front ().first->effective_type ()
+ : package_manifest::effective_type (nullopt, pn));
+
+ strings ns;
+ if (!aps.empty ())
+ ns = system_package_names (aps,
+ os_release.name_id,
+ os_release.version_id,
+ os_release.like_ids,
+ true /* native */);
+ if (ns.empty ())
+ {
+ // Attempt to automatically translate our package name. Failed that we
+ // should try to use the project name, if present, as a fallback.
+ //
+ const string& n (pn.string ());
+
+ // Note that theoretically different available packages can have
+ // different project names. But taking it from the latest version
+ // feels good enough.
+ //
+ const shared_ptr<available_package>& ap (!aps.empty ()
+ ? aps.front ().first
+ : nullptr);
+
+ string f (ap != nullptr && ap->project && *ap->project != pn
+ ? ap->project->string ()
+ : empty_string);
+
+ if (pt == "lib")
+ {
+ // If there is no project name let's try to use the package name
+ // with the lib prefix stripped as a fallback. Note that naming
+ // library packages without the lib prefix is quite common in Fedora
+ // (xerces-c, uuid-c++, etc).
+ //
+ if (f.empty ())
+ f = string (n, 3);
+
+ f += "-devel";
+
+ // Keep the base package name empty as an indication that it is to
+ // be discovered.
+ //
+ candidates.push_back (package_status ("", n + "-devel", move (f)));
+ }
+ else
+ candidates.push_back (package_status (n, "", move (f)));
+ }
+ else
+ {
+ // Parse each manual mapping.
+ //
+ for (const string& n: ns)
+ {
+ package_status s (parse_name_value (pt,
+ n,
+ need_doc,
+ need_debuginfo,
+ need_debugsource));
+
+ // Suppress duplicates for good measure based on the base package
+ // name (and falling back to -devel if empty).
+ //
+ auto i (find_if (candidates.begin (), candidates.end (),
+ [&s] (const package_status& x)
+ {
+ // Note that it's possible for one mapping to be
+ // specified as -devel only while the other as
+ // main and -devel.
+ //
+ return s.main.empty () || x.main.empty ()
+ ? s.devel == x.devel
+ : s.main == x.main;
+ }));
+ if (i == candidates.end ())
+ candidates.push_back (move (s));
+ else
+ {
+ // Should we verify the rest matches for good measure? But what
+ // if we need to override, as in:
+ //
+ // fedora_35-name: libfoo libfoo-bar-devel
+ // fedora_34-name: libfoo libfoo-devel
+ //
+ // Note that for this to work we must get fedora_35 values before
+ // fedora_34, which is the semantics guaranteed by
+ // system_package_names().
+ }
+ }
+ }
+ }
+
+ // Guess unknown main package given the -devel package, its version, and
+ // architecture. Failed that, assume the package to be a binless library
+ // and leave the main member of the package_status object empty.
+ //
+ auto guess_main = [this, &trace] (package_status& s,
+ const string& ver,
+ const string& qarch,
+ bool installed)
+ {
+ vector<pair<string, string>> depends (
+ dnf_repoquery_requires (s.devel, ver, qarch, installed));
+
+ s.main = main_from_devel (s.devel, ver, depends);
+
+ if (s.main.empty ())
+ {
+ if (verb >= 4)
+ {
+ diag_record dr (trace);
+ dr << "unable to guess main package for " << s.devel << ' ' << ver;
+
+ if (!depends.empty ())
+ {
+ dr << ", depends on";
+
+ for (auto b (depends.begin ()), i (b); i != depends.end (); ++i)
+ dr << (i == b ? " " : ", ") << i->first << ' ' << i->second;
+ }
+ else
+ dr << ", has no dependencies";
+ }
+ }
+ };
+
+ // Calculate the package status from individual package components.
+ // Return nullopt if there is a component without installed or candidate
+ // version (which means the package cannot be installed).
+ //
+ // The main argument specifies the size of the main group. Only components
+ // from this group are considered for partially_installed determination.
+ //
+ // @@ TODO: we should probably prioritize partially installed with fully
+ // installed main group. Add almost_installed next to partially_installed?
+ //
+ using status_type = package_status::status_type;
+
+ auto status = [] (const vector<package_info>& pis, size_t main)
+ -> optional<status_type>
+ {
+ bool i (false), u (false);
+
+ for (size_t j (0); j != pis.size (); ++j)
+ {
+ const package_info& pi (pis[j]);
+
+ if (pi.installed_version.empty ())
+ {
+ if (pi.candidate_version.empty ())
+ return nullopt;
+
+ u = true;
+ }
+ else if (j < main)
+ i = true;
+ }
+
+ return (!u ? package_status::installed :
+ !i ? package_status::not_installed :
+ package_status::partially_installed);
+ };
+
+ // First look for an already fully installed package.
+ //
+ optional<package_status> r;
+
+ {
+ diag_record dr; // Ambiguity diagnostics.
+
+ for (package_status& ps: candidates)
+ {
+ vector<package_info>& pis (ps.package_infos);
+
+ // Query both main and fallback packages with a single dns_list()
+ // invocation.
+ //
+ if (!ps.main.empty ()) pis.emplace_back (ps.main);
+ if (!ps.devel.empty ()) pis.emplace_back (ps.devel);
+ if (!ps.fallback.empty ()) pis.emplace_back (ps.fallback);
+ if (!ps.static_.empty ()) pis.emplace_back (ps.static_);
+ if (!ps.doc.empty () && need_doc) pis.emplace_back (ps.doc);
+
+ if (!ps.debuginfo.empty () && need_debuginfo)
+ pis.emplace_back (ps.debuginfo);
+
+ if (!ps.debugsource.empty () && need_debugsource)
+ pis.emplace_back (ps.debugsource);
+
+ if (!ps.common.empty () && false) pis.emplace_back (ps.common);
+ ps.package_infos_main = pis.size ();
+ for (const string& n: ps.extras) pis.emplace_back (n);
+
+ dnf_list (pis);
+
+ // Handle the fallback package name, if specified.
+ //
+ // Specifically, if the main/devel package is known to the system
+ // package manager we use that. Otherwise, if the fallback package is
+ // known we use that. And if neither is known, then we skip this
+ // candidate (ps).
+ //
+ if (!ps.fallback.empty ())
+ {
+ assert (pis.size () > 1); // devel+fallback or main+fallback
+
+ package_info& mp (pis[0]); // Main/devel package info.
+ package_info& fp (pis[1]); // Fallback package info.
+
+ // Note that at this stage we can only use the installed main/devel
+ // and fallback packages (since the candidate versions may change
+ // after fetch).
+ //
+ // Also note that this logic prefers installed fallback package to
+ // potentially available non-fallback package.
+ //
+ if (mp.installed_version.empty ())
+ {
+ if (!fp.installed_version.empty ())
+ {
+ // Use the fallback.
+ //
+ (ps.main.empty () ? ps.devel : ps.main) = move (ps.fallback);
+ mp = move (fp);
+ }
+ else
+ continue; // Skip the candidate at this stage.
+ }
+
+ // Whether it was used or not, cleanup the fallback information.
+ //
+ ps.fallback.clear ();
+ pis.erase (pis.begin () + 1);
+ --ps.package_infos_main;
+ }
+
+ // Handle the unknown main package.
+ //
+ if (ps.main.empty ())
+ {
+ const package_info& devel (pis.front ());
+
+ // Note that at this stage we can only use the installed -devel
+ // package (since the candidate version may change after fetch).
+ //
+ if (devel.installed_version.empty ())
+ continue;
+
+ guess_main (ps,
+ devel.installed_version,
+ devel.installed_arch,
+ true /* installed */);
+
+ if (!ps.main.empty ()) // Not a binless library?
+ {
+ pis.emplace (pis.begin (), ps.main);
+ ps.package_infos_main++;
+ dnf_list (pis, 1);
+ }
+ }
+
+ optional<status_type> s (status (pis, ps.package_infos_main));
+
+ if (!s || *s != package_status::installed)
+ continue;
+
+ const package_info& main (pis.front ()); // Main/devel.
+
+ ps.status = *s;
+ ps.system_name = main.name;
+ ps.system_version = main.installed_version;
+
+ if (!r)
+ {
+ r = move (ps);
+ continue;
+ }
+
+ if (dr.empty ())
+ {
+ dr << fail << "multiple installed " << os_release.name_id
+ << " packages for " << pn <<
+ info << "candidate: " << r->system_name << ' ' << r->system_version;
+ }
+
+ dr << info << "candidate: " << ps.system_name << ' '
+ << ps.system_version;
+ }
+
+ if (!dr.empty ())
+ dr << info << "consider specifying the desired version manually";
+ }
+
+ // Next look for available versions if we are allowed to install. Indicate
+ // the non-installable candidates by setting both their main and -devel
+ // package names to empty strings.
+ //
+ if (!r && install_)
+ {
+ // If we weren't instructed to fetch or we already fetched, then we
+ // don't need to re-run dnf_list().
+ //
+ bool requery;
+ if ((requery = fetch_ && !fetched_))
+ {
+ dnf_makecache ();
+ fetched_ = true;
+ }
+
+ {
+ diag_record dr; // Ambiguity diagnostics.
+
+ for (package_status& ps: candidates)
+ {
+ vector<package_info>& pis (ps.package_infos);
+
+ if (requery)
+ dnf_list (pis);
+
+ // Handle the fallback package name, if specified.
+ //
+ if (!ps.fallback.empty ())
+ {
+ assert (pis.size () > 1); // devel+fallback or main+fallback
+
+ package_info& mp (pis[0]); // Main/devel package info.
+ package_info& fp (pis[1]); // Fallback package info.
+
+ // Note that this time we use the candidate versions.
+ //
+ if (mp.candidate_version.empty ())
+ {
+ string& main (!ps.main.empty () ? ps.main : ps.devel);
+
+ if (!fp.candidate_version.empty ())
+ {
+ // Use the fallback.
+ //
+ main = move (ps.fallback);
+ mp = move (fp);
+ }
+ else
+ {
+ // Otherwise, we would have resolved the name on the previous
+ // stage.
+ //
+ assert (mp.installed_version.empty () &&
+ fp.installed_version.empty ());
+
+ // Main/devel package is not installable.
+ //
+ main.clear ();
+ continue;
+ }
+ }
+
+ // Whether it was used or not, cleanup the fallback information.
+ //
+ ps.fallback.clear ();
+ pis.erase (pis.begin () + 1);
+ --ps.package_infos_main;
+ }
+
+ // Handle the unknown main package.
+ //
+ if (ps.main.empty ())
+ {
+ const package_info& devel (pis.front ());
+
+ // Note that this time we use the candidate version.
+ //
+ if (devel.candidate_version.empty ())
+ {
+ // Not installable.
+ //
+ ps.devel.clear ();
+ continue;
+ }
+
+ guess_main (ps,
+ devel.candidate_version,
+ devel.candidate_arch,
+ devel.candidate_version == devel.installed_version);
+
+ if (!ps.main.empty ()) // Not a binless library?
+ {
+ pis.emplace (pis.begin (), ps.main);
+ ps.package_infos_main++;
+ dnf_list (pis, 1);
+ }
+ }
+
+ optional<status_type> s (status (pis, ps.package_infos_main));
+
+ if (!s)
+ {
+ // Not installable.
+ //
+ ps.main.clear ();
+ ps.devel.clear ();
+ continue;
+ }
+
+ assert (*s != package_status::installed); // Sanity check.
+
+ const package_info& main (pis.front ()); // Main/devel.
+
+ // Note that if we are installing something for this main package,
+ // then we always go for the candidate version even though it may
+ // have an installed version that may be good enough (especially if
+ // what we are installing are extras). The reason is that it may as
+ // well not be good enough (especially if we are installing the
+ // -devel package) and there is no straightforward way to change our
+ // mind.
+ //
+ ps.status = *s;
+ ps.system_name = main.name;
+ ps.system_version = main.candidate_version;
+
+ // Prefer partially installed to not installed. This makes detecting
+ // ambiguity a bit trickier so we handle partially installed here
+ // and not installed in a separate loop below.
+ //
+ if (ps.status != package_status::partially_installed)
+ continue;
+
+ if (!r)
+ {
+ r = move (ps);
+ continue;
+ }
+
+ auto print_missing = [&dr] (const package_status& s)
+ {
+ for (const package_info& pi: s.package_infos)
+ if (pi.installed_version.empty ())
+ dr << ' ' << pi.name;
+ };
+
+ if (dr.empty ())
+ {
+ dr << fail << "multiple partially installed "
+ << os_release.name_id << " packages for " << pn;
+
+ dr << info << "candidate: " << r->system_name << ' '
+ << r->system_version << ", missing components:";
+ print_missing (*r);
+ }
+
+ dr << info << "candidate: " << ps.system_name << ' '
+ << ps.system_version << ", missing components:";
+ print_missing (ps);
+ }
+
+ if (!dr.empty ())
+ dr << info << "consider fully installing the desired package "
+ << "manually and retrying the bpkg command";
+ }
+
+ if (!r)
+ {
+ diag_record dr; // Ambiguity diagnostics.
+
+ for (package_status& ps: candidates)
+ {
+ if (ps.main.empty () && ps.devel.empty ()) // Not installable?
+ continue;
+
+ assert (ps.status == package_status::not_installed); // Sanity check.
+
+ if (!r)
+ {
+ r = move (ps);
+ continue;
+ }
+
+ if (dr.empty ())
+ {
+ dr << fail << "multiple available " << os_release.name_id
+ << " packages for " << pn <<
+ info << "candidate: " << r->system_name << ' '
+ << r->system_version;
+ }
+
+ dr << info << "candidate: " << ps.system_name << ' '
+ << ps.system_version;
+ }
+
+ if (!dr.empty ())
+ dr << info << "consider installing the desired package manually and "
+ << "retrying the bpkg command";
+ }
+ }
+
+ if (r)
+ {
+ // Map the Fedora version to the bpkg version. But first strip the
+ // release from Fedora version ([<epoch>:]<version>-<release>).
+ //
+ string sv (r->system_version, 0, r->system_version.rfind ('-'));
+
+ optional<version> v;
+ if (!aps.empty ())
+ v = downstream_package_version (sv,
+ aps,
+ os_release.name_id,
+ os_release.version_id,
+ os_release.like_ids);
+
+ if (!v)
+ {
+ // Fallback to using system version as downstream version. But first
+ // strip the epoch, if any. Also convert the potential pre-release
+ // separator to the bpkg version pre-release separator.
+ //
+ size_t p (sv.find (':'));
+ if (p != string::npos)
+ sv.erase (0, p + 1);
+
+ // Consider the first '~' character as a pre-release separator. Note
+ // that if there are more of them, then we will fail since '~' is an
+ // invalid character for bpkg version.
+ //
+ p = sv.find ('~');
+ if (p != string::npos)
+ sv[p] = '-';
+
+ try
+ {
+ v = version (sv);
+ }
+ catch (const invalid_argument& e)
+ {
+ fail << "unable to map " << os_release.name_id << " package "
+ << r->system_name << " version " << sv << " to bpkg package "
+ << pn << " version" <<
+ info << os_release.name_id << " version is not a valid bpkg "
+ << "version: " << e.what () <<
+ info << "consider specifying explicit mapping in " << pn
+ << " package manifest";
+ }
+ }
+
+ r->version = move (*v);
+ }
+
+ return r;
+ }
+
+ void system_package_manager_fedora::
+ install (const vector<package_name>& pns)
+ {
+ assert (!pns.empty ());
+
+ assert (install_ && !installed_);
+ installed_ = true;
+
+ // Collect and merge all the Fedora packages/versions for the specified
+ // bpkg packages.
+ //
+ struct package
+ {
+ string name;
+ string version; // Empty if unspecified.
+ string arch; // Always specified.
+ };
+ vector<package> pkgs;
+
+ // At first it may seem we don't need to do anything for already fully
+ // installed packages. But it's possible some of them were automatically
+ // installed, meaning that they can be automatically removed if they no
+ // longer have any dependents (see dnf(8) for details). Which in turn
+ // means that things may behave differently depending on whether we've
+ // installed a package ourselves or if it was already installed.
+ //
+ // So what we are going to do is to run `dnf install` only if there are
+ // any non-fully installed packages. In this case we will pass all the
+ // packages, including the fully installed ones. But we must be careful
+ // not to force their upgrade. To achieve this we will specify the
+ // installed version as the desired version. Whether we run `dnf install`
+ // or not we will also always run `dnf mark install` afterwards for all
+ // the packages to mark them as installed by the user.
+ //
+ // Note also that for partially/not installed we don't specify the
+ // version, expecting the candidate version to be installed. We, however,
+ // still specify the candidate architecture in this case, since for
+ // reasons unknown dnf may install a package of a different architecture
+ // otherwise.
+ //
+ bool install (false);
+
+ for (const package_name& pn: pns)
+ {
+ auto it (status_cache_.find (pn));
+ assert (it != status_cache_.end () && it->second);
+
+ const package_status& ps (*it->second);
+ bool fi (ps.status == package_status::installed);
+
+ if (!fi)
+ install = true;
+
+ for (const package_info& pi: ps.package_infos)
+ {
+ string n (pi.name);
+ string v (fi ? pi.installed_version : string ());
+ string a (fi ? pi.installed_arch : pi.candidate_arch);
+
+ auto i (find_if (pkgs.begin (), pkgs.end (),
+ [&n] (const package& p)
+ {
+ return p.name == n;
+ }));
+
+ if (i != pkgs.end ())
+ {
+ if (i->version.empty ())
+ {
+ i->version = move (v);
+ i->arch = move (a);
+ }
+ else
+ // Feels like this cannot happen since we always use the installed
+ // version of the package.
+ //
+ assert (i->version == v && i->arch == a);
+ }
+ else
+ pkgs.push_back (package {move (n), move (v), move (a)});
+ }
+ }
+
+ // Convert to the <name>-[<epoch>:]<version>-<release>.<arch> package spec
+ // for the installed packages and to the <name> spec for partially/not
+ // installed ones (see dnf_install() for details on the package specs).
+ //
+ strings specs;
+ specs.reserve (pkgs.size ());
+ for (const package& p: pkgs)
+ {
+ string s (p.name);
+ if (!p.version.empty ())
+ {
+ s += '-';
+ s += p.version;
+ }
+ s += '.';
+ s += p.arch;
+
+ specs.push_back (move (s));
+ }
+
+ // Install.
+ //
+ if (install)
+ dnf_install (specs);
+
+ // Mark as installed by the user.
+ //
+ dnf_mark_install (specs);
+
+ // Verify that versions we have promised in status() match what actually
+ // got installed.
+ //
+ if (install)
+ {
+ vector<package_info> pis;
+
+ // Here we just check the main package component of each package.
+ //
+ for (const package_name& pn: pns)
+ {
+ const package_status& ps (*status_cache_.find (pn)->second);
+
+ if (find_if (pis.begin (), pis.end (),
+ [&ps] (const package_info& pi)
+ {
+ return pi.name == ps.system_name;
+ }) == pis.end ())
+ {
+ pis.push_back (package_info (ps.system_name));
+ }
+ }
+
+ dnf_list (pis);
+
+ for (const package_name& pn: pns)
+ {
+ const package_status& ps (*status_cache_.find (pn)->second);
+
+ auto i (find_if (pis.begin (), pis.end (),
+ [&ps] (const package_info& pi)
+ {
+ return pi.name == ps.system_name;
+ }));
+ assert (i != pis.end ());
+
+ const package_info& pi (*i);
+
+ if (pi.installed_version != ps.system_version)
+ {
+ fail << "unexpected " << os_release.name_id << " package version "
+ << "for " << ps.system_name <<
+ info << "expected: " << ps.system_version <<
+ info << "installed: " << pi.installed_version <<
+ info << "consider retrying the bpkg command";
+ }
+ }
+ }
+ }
+
+ // Map non-system bpkg package to system package name(s) and version.
+ //
+ // This is used both to map the package being generated and its
+ // dependencies. What should we do with extras returned in package_status?
+ // We can't really generate any of them (which files would we place in
+ // them?) nor can we list them as dependencies (we don't know their system
+ // versions). So it feels like the only sensible choice is to ignore extras.
+ //
+ // In a sense, we have a parallel arrangement going on here: binary packages
+ // that we generate don't have extras (i.e., they include everything
+ // necessary in the "standard" packages from the main group) and when we
+ // punch a system dependency based on a non-system bpkg package, we assume
+ // it was generated by us and thus doesn't have any extras. Or, to put it
+ // another way, if you want the system dependency to refer to a "native"
+ // system package with extras you need to configure it as a system bpkg
+ // package.
+ //
+ // In fact, this extends to package names. For example, unless custom
+ // mapping is specified, we will generate libsqlite3 and libsqlite3-devel
+ // while native names are sqlite-libs and sqlite-devel. While this duality
+ // is not ideal, presumably we will normally only be producing our binary
+ // packages if there are no suitable native packages. And for a few
+ // exceptions (e.g., our package is "better" in some way, such as configured
+ // differently or fixes a critical bug), we will just have to provide
+ // appropriate manual mapping that makes sure the names match (the extras is
+ // still a potential problem though -- we will only have them as
+ // dependencies if we build against a native system package; maybe we can
+ // add them manually with an option).
+ //
+ package_status system_package_manager_fedora::
+ map_package (const package_name& pn,
+ const version& pv,
+ const available_packages& aps) const
+ {
+ // We should only have one available package corresponding to this package
+ // name/version.
+ //
+ assert (aps.size () == 1);
+
+ const shared_ptr<available_package>& ap (aps.front ().first);
+ const lazy_shared_ptr<repository_fragment>& rf (aps.front ().second);
+
+ // Without explicit type, the best we can do in trying to detect whether
+ // this is a library is to check for the lib prefix. Libraries without the
+ // lib prefix and non-libraries with the lib prefix (both of which we do
+ // not recomment) will have to provide a manual mapping (or explicit
+ // type).
+ //
+ const string& pt (ap->effective_type ());
+
+ strings ns (system_package_names (aps,
+ os_release.name_id,
+ os_release.version_id,
+ os_release.like_ids,
+ false /* native */));
+ package_status r;
+ if (ns.empty ())
+ {
+ // Automatically translate our package name similar to the consumption
+ // case above. Except here we don't attempt to deduce main from -devel
+ // or fallback to the project name, naturally.
+ //
+ const string& n (pn.string ());
+
+ if (pt == "lib")
+ r = package_status (n, n + "-devel");
+ else
+ r = package_status (n);
+ }
+ else
+ {
+ // Even though we only pass one available package, we may still end up
+ // with multiple mappings. In this case we take the first, per the
+ // documentation.
+ //
+ r = parse_name_value (pt,
+ ns.front (),
+ false /* need_doc */,
+ false /* need_debuginfo */,
+ false /* need_debugsource */);
+
+ // If this is -devel without main, then derive main by stripping the
+ // -devel suffix. This feels tighter than just using the bpkg package
+ // name.
+ //
+ if (r.main.empty ())
+ {
+ assert (!r.devel.empty ());
+ r.main.assign (r.devel, 0, r.devel.size () - 6);
+ }
+ }
+
+ // Map the version.
+ //
+ // To recap, a Fedora package version has the following form:
+ //
+ // [<epoch>:]<version>-<release>
+ //
+ // Where <release> has the following form:
+ //
+ // <release-number>[.<distribution-tag>]
+ //
+ // For details on the ordering semantics, see the Fedora Versioning
+ // Guidelines. While overall unsurprising, the only notable exceptions are
+ // `~`, which sorts before anything else and is commonly used for upstream
+ // pre-releases, and '^', which sorts after anything else and is
+ // supposedly used for upstream post-release snapshots. For example,
+ // 0.1.0~alpha.1-1.fc35 sorts earlier than 0.1.0-1.fc35.
+ //
+ // Ok, so how do we map our version to that? To recap, the bpkg version
+ // has the following form:
+ //
+ // [+<epoch>-]<upstream>[-<prerel>][+<revision>]
+ //
+ // Let's start with the case where neither distribution nor upstream
+ // version is specified and we need to derive everything from the bpkg
+ // version.
+ //
+ // <epoch>
+ //
+ // On one hand, if we keep the epoch, it won't necessarily match
+ // Fedora's native package epoch. But on the other it will allow our
+ // binary packages from different epochs to co-exist. Seeing that this
+ // can be easily overridden with a custom distribution version, let's
+ // keep it.
+ //
+ // Note that while the Fedora start/default epoch is 0, ours is 1 (we
+ // use the 0 epoch for stub packages). So we will need to shift this
+ // value range.
+ //
+ //
+ // <upstream>[-<prerel>]
+ //
+ // Our upstream version maps naturally to Fedora's <version>. That is,
+ // our upstream version format/semantics is a subset of Fedora's
+ // <version>.
+ //
+ // If this is a pre-release, then we could fail (that is, don't allow
+ // pre-releases) but then we won't be able to test on pre-release
+ // packages, for example, to make sure the name mapping is correct.
+ // Plus sometimes it's useful to publish pre-releases. We could ignore
+ // it, but then such packages will be indistinguishable from each other
+ // and the final release, which is not ideal. On the other hand, Fedora
+ // has the mechanism (`~`) which is essentially meant for this, so let's
+ // use it. We will use <prerel> as is since its format is the same as
+ // <upstream> and thus should map naturally.
+ //
+ //
+ // <revision>
+ //
+ // Similar to epoch, our revision won't necessarily match Fedora's
+ // native package release number. But on the other hand it will allow us
+ // to establish a correspondence between source and binary packages.
+ // Plus, upgrades between binary package releases will be handled
+ // naturally. Also note that the revision is mandatory in Fedora.
+ // Seeing that we allow overriding the releases with a custom
+ // distribution version (see below), let's use it.
+ //
+ // Note that the Fedora start release number is 1 and our revision is
+ // 0. So we will need to shift this value range.
+ //
+ // Another related question is whether we should do anything about the
+ // distribution tag (.fc35, .el8, etc). Given that the use of hardcoded
+ // distribution tags in RPM spec files is strongly discouraged we will
+ // just rely on the standard approach to include the appropriate tag
+ // (while allowing the user to redefine it with an option). Note that
+ // the distribution tag is normally specified for the Release and
+ // Requires directives using the %{?dist} macro expansion and can be
+ // left unspecified for the Requires directive. For example:
+ //
+ // Name: curl
+ // Version: 7.87.0
+ // Release: 1%{?dist}
+ // Requires: libcurl%{?_isa} >= %{version}-%{release}
+ // %global libpsl_version 1.2.3
+ // Requires: libpsl%{?_isa} >= %{libpsl_version}
+ //
+ // The next case to consider is when we have the upstream version
+ // (upstream-version manifest value). After some rumination it feels
+ // correct to use it in place of the <epoch>-<upstream> components in the
+ // above mapping (upstream version itself cannot have epoch). In other
+ // words, we will add the pre-release and revision components from the
+ // bpkg version. If this is not the desired semantics, then it can always
+ // be overrided with the distribution version.
+ //
+ // Finally, we have the distribution version. The <epoch> and <version>
+ // components are straightforward: they should be specified by the
+ // distribution version as required. This leaves pre-release and
+ // release. It feels like in most cases we would want these copied over
+ // from the bpkg version automatically -- it's too tedious and error-
+ // prone to maintain them manually. However, we want the user to have the
+ // full override ability. So instead, if empty release is specified, as in
+ // 1.2.3-, then we automatically add bpkg revision. Similarly, if empty
+ // pre-release is specified, as in 1.2.3~, then we add bpkg pre-release.
+ // To add both automatically, we would specify 1.2.3~- (other combinations
+ // are 1.2.3~b.1- and 1.2.3~-1). If specified, the release must not
+ // contain the distribution tag, since it is deduced automatically using
+ // the %{?dist} macro expansion if required. Also, since the release
+ // component is mandatory in Fedora, if it is omitted together with the
+ // separating dash we will add the release 1 automatically.
+ //
+ // Note also that per the RPM spec file format documentation neither
+ // version nor release components may contain `:` or `-`. Note that the
+ // bpkg upstream version may not contain either.
+ //
+ string& sv (r.system_version);
+
+ if (optional<string> ov = system_package_version (ap,
+ rf,
+ os_release.name_id,
+ os_release.version_id,
+ os_release.like_ids))
+ {
+ string& dv (*ov);
+ size_t n (dv.size ());
+
+ // Find the package release and upstream pre-release positions, if any.
+ //
+ size_t rp (dv.rfind ('-'));
+ size_t pp (dv.rfind ('~', rp));
+
+ // Copy over the [<epoch>:]<version> part.
+ //
+ sv.assign (dv, 0, pp < rp ? pp : rp);
+
+ // Add pre-release copying over the bpkg version value if empty.
+ //
+ if (pp != string::npos)
+ {
+ if (size_t pn = (rp != string::npos ? rp : n) - (pp + 1))
+ {
+ sv.append (dv, pp, pn + 1);
+ }
+ else
+ {
+ if (pv.release)
+ {
+ assert (!pv.release->empty ()); // Cannot be earliest special.
+ sv += '~';
+ sv += *pv.release;
+ }
+ }
+ }
+
+ // Add release copying over the bpkg version revision if empty.
+ //
+ if (rp != string::npos)
+ {
+ if (size_t rn = n - (rp + 1))
+ {
+ sv.append (dv, rp, rn + 1);
+ }
+ else
+ {
+ sv += '-';
+ sv += to_string (pv.revision ? *pv.revision + 1 : 1);
+ }
+ }
+ else
+ sv += "-1"; // Default to 1 since the release is mandatory.
+ }
+ else
+ {
+ if (ap->upstream_version)
+ {
+ const string& uv (*ap->upstream_version);
+
+ // Make sure the upstream version doesn't contain ':' and '-'
+ // characters since they are not allowed in the <version> component
+ // (see the RPM spec file format documentation for details).
+ //
+ // Note that this verification is not exhaustive and here we only make
+ // sure that these characters are only used to separate the version
+ // components.
+ //
+ size_t p (uv.find (":-"));
+ if (p != string::npos)
+ fail << "'" << uv[p] << "' character in upstream-version manifest "
+ << "value " << uv << " of package " << pn << ' '
+ << ap->version <<
+ info << "consider specifying explicit " << os_release.name_id
+ << " version mapping in " << pn << " package manifest";
+
+ sv += uv;
+ }
+ else
+ {
+ // Add epoch unless maps to 0.
+ //
+ assert (pv.epoch != 0); // Cannot be a stub.
+ if (pv.epoch != 1)
+ {
+ sv = to_string (pv.epoch - 1);
+ sv += ':';
+ }
+
+ sv += pv.upstream;
+ }
+
+ // Add pre-release.
+ //
+ if (pv.release)
+ {
+ assert (!pv.release->empty ()); // Cannot be earliest special.
+ sv += '~';
+ sv += *pv.release;
+ }
+
+ // Add revision.
+ //
+ sv += '-';
+ sv += to_string (pv.revision ? *pv.revision + 1 : 1);
+ }
+
+ return r;
+ }
+
+ // Evaluate the specified expressions expanding the contained macros by
+ // executing `rpm --eval <expr1> --eval <expr2>...` and return the list of
+ // the resulting lines read from the process stdout. Note that an expression
+ // may potentially end up with multiple lines which the caller is expected
+ // to deal with (ensure fixed number of lines, eval only one expression,
+ // etc).
+ //
+ strings system_package_manager_fedora::
+ rpm_eval (const cstrings& opts, const cstrings& expressions)
+ {
+ strings r;
+
+ if (expressions.empty ())
+ return r;
+
+ cstrings args;
+ args.reserve (2 + opts.size () + expressions.size () * 2);
+
+ args.push_back ("rpm");
+
+ for (const char* o: opts)
+ args.push_back (o);
+
+ for (const char* e: expressions)
+ {
+ args.push_back ("--eval");
+ args.push_back (e);
+ }
+
+ args.push_back (nullptr);
+
+ try
+ {
+ process_path pp (process::path_search (args[0]));
+ process_env pe (pp);
+
+ if (verb >= 3)
+ print_process (pe, args);
+
+ process pr (pp, args, -2 /* stdin */, -1 /* stdout */, 2);
+
+ try
+ {
+ ifdstream is (move (pr.in_ofd), fdstream_mode::skip, ifdstream::badbit);
+
+ // The number of lines is normally equal to or greater than the number
+ // of expressions.
+ //
+ r.reserve (expressions.size ());
+
+ for (string l; !eof (getline (is, l)); )
+ r.push_back (move (l));
+
+ is.close ();
+ }
+ catch (const io_error& e)
+ {
+ if (pr.wait ())
+ fail << "unable to read " << args[0] << " --eval output: " << e;
+
+ // Fall through.
+ }
+
+ if (!pr.wait ())
+ {
+ diag_record dr (fail);
+ dr << args[0] << " exited with non-zero code";
+
+ if (verb < 3)
+ {
+ dr << info << "command line: ";
+ print_process (dr, pe, args);
+ }
+ }
+ }
+ catch (const process_error& e)
+ {
+ error << "unable to execute " << args[0] << ": " << e;
+
+ if (e.child)
+ exit (1);
+
+ throw failed ();
+ }
+
+ return r;
+ }
+
+ // Some background on creating Fedora packages (for a bit more detailed
+ // overview see the RPM Packaging Guide).
+ //
+ // An RPM package consists of the cpio archive, which contains the package
+ // files plus the RPM header file with metadata about the package. The RPM
+ // package manager uses this metadata to determine dependencies, where to
+ // install files, and other information. There are two types of RPM
+ // packages: source RPM and binary RPM. A source RPM contains source code,
+ // optionally patches to apply, and the spec file, which describes how to
+ // build the source code into a binary RPM. A binary RPM contains the
+ // binaries built from the sources package. While it's possible to create
+ // the package completely manually without using any of the Fedora tools, we
+ // are not going to go this route (see reasons mentioned in the Debian
+ // implementation for the list of issues with this approach).
+ //
+ // Based on this our plan is to produce an RPM spec file and then invoke
+ // rpmbuild to produce the binary package from that. While this approach is
+ // normally used to build things from source, it feels like we should be
+ // able to pretend that we are. Specifially, we can implement the %install
+ // section of the spec file to invoke the build system and install all the
+ // packages directly from their bpkg locations.
+ //
+ // Note that the -debuginfo sub-packages are generated by default and all we
+ // need to do from our side is to compile with debug information (-g),
+ // failed which we get a warning from rpmbuild. We will also disable
+ // generating the -debugsource sub-packages since that would require to set
+ // up the source files infrastructure in the ~/rpmbuild/BUILD/ directory,
+ // which feels too hairy for now.
+ //
+ // Note: this setup requires rpmdevtools (rpmdev-setuptree) and its
+ // dependency rpm-build and rpm packages.
+ //
+ auto system_package_manager_fedora::
+ generate (const packages& pkgs,
+ const packages& deps,
+ const strings& vars,
+ const dir_path& cfg_dir,
+ const package_manifest& pm,
+ const string& pt,
+ const small_vector<language, 1>& langs,
+ optional<bool> recursive_full,
+ bool /* first */) -> binary_files
+ {
+ tracer trace ("system_package_manager_fedora::generate");
+
+ assert (!langs.empty ()); // Should be effective.
+
+ const shared_ptr<selected_package>& sp (pkgs.front ().selected);
+ const package_name& pn (sp->name);
+ const version& pv (sp->version);
+
+ // Use version without iteration in paths, etc.
+ //
+ string pvs (pv.string (false /* ignore_revision */,
+ true /* ignore_iteration */));
+
+ const available_packages& aps (pkgs.front ().available);
+
+ bool lib (pt == "lib");
+ bool priv (ops_->private_ ()); // Private installation.
+
+ // For now we only know how to handle libraries with C-common interface
+ // languages. But we allow other implementation languages.
+ //
+ if (lib)
+ {
+ for (const language& l: langs)
+ if (!l.impl && l.name != "c" && l.name != "c++" && l.name != "cc")
+ fail << l.name << " libraries are not yet supported";
+ }
+
+ // Return true if this package uses the specified language, only as
+ // interface language if intf_only is true.
+ //
+ auto lang = [&langs] (const char* n, bool intf_only = false) -> bool
+ {
+ return find_if (langs.begin (), langs.end (),
+ [n, intf_only] (const language& l)
+ {
+ return (!intf_only || !l.impl) && l.name == n;
+ }) != langs.end ();
+ };
+
+ // As a first step, figure out the system names and version of the package
+ // we are generating and all the dependencies, diagnosing anything fishy.
+ // If the main package is not present for a dependency, then set the main
+ // package name to an empty string.
+ //
+ // Note that there should be no duplicate dependencies and we can sidestep
+ // the status cache.
+ //
+ package_status st (map_package (pn, pv, aps));
+
+ vector<package_status> sdeps;
+ sdeps.reserve (deps.size ());
+ for (const package& p: deps)
+ {
+ const shared_ptr<selected_package>& sp (p.selected);
+ const available_packages& aps (p.available);
+
+ package_status s;
+ if (sp->substate == package_substate::system)
+ {
+ // Note that for a system dependency the main package name is already
+ // empty if it is not present in the distribution.
+ //
+ optional<package_status> os (status (sp->name, aps));
+
+ if (!os || os->status != package_status::installed)
+ fail << os_release.name_id << " package for " << sp->name
+ << " system package is no longer installed";
+
+ // For good measure verify the mapped back version still matches
+ // configured. Note that besides the normal case (queried by the
+ // system package manager), it could have also been specified by the
+ // user as an actual version or a wildcard. Ignoring this check for a
+ // wildcard feels consistent with the overall semantics.
+ //
+ if (sp->version != wildcard_version && sp->version != os->version)
+ {
+ fail << "current " << os_release.name_id << " package version for "
+ << sp->name << " system package does not match configured" <<
+ info << "configured version: " << sp->version <<
+ info << "current version: " << os->version << " ("
+ << os->system_version << ')';
+ }
+
+ s = move (*os);
+
+ // Note that the system version retrieved with status() likely
+ // contains the distribution tag in its release component. We,
+ // however, don't want it to ever be mentioned in the spec file and so
+ // just strip it right away. This will also make it consistent with
+ // the non-system dependencies.
+ //
+ string& v (s.system_version);
+ size_t p (v.find_last_of ("-."));
+ assert (p != string::npos); // The release is mandatory.
+
+ if (v[p] == '.')
+ v.resize (p);
+ }
+ else
+ {
+ s = map_package (sp->name, sp->version, aps);
+
+ // Set the main package name to an empty string if we wouldn't be
+ // generating the main package for this dependency (binless library
+ // without the -common sub-package).
+ //
+ assert (aps.size () == 1);
+
+ const optional<string>& t (aps.front ().first->type);
+
+ if (s.common.empty () &&
+ package_manifest::effective_type (t, sp->name) == "lib")
+ {
+ strings sos (package_manifest::effective_type_sub_options (t));
+
+ if (find (sos.begin (), sos.end (), "binless") != sos.end ())
+ s.main.clear ();
+ }
+ }
+
+ sdeps.push_back (move (s));
+ }
+
+ // We only allow the standard -debug* sub-package names.
+ //
+ if (!st.debuginfo.empty () && st.debuginfo != st.main + "-debuginfo")
+ fail << "generation of -debuginfo packages with custom names not "
+ << "supported" <<
+ info << "use " << st.main << "-debuginfo name instead";
+
+ if (!st.debugsource.empty () && st.debuginfo != st.main + "-debugsource")
+ fail << "generation of -debugsource packages with custom names not "
+ << "supported" <<
+ info << "use " << st.main << "-debugsource name instead";
+
+ // Prepare the common extra options that need to be passed to both
+ // rpmbuild and rpm.
+ //
+ strings common_opts {"--target", arch};
+
+ // Add the dist macro (un)definition if --fedora-dist-tag is specified.
+ //
+ if (ops_->fedora_dist_tag_specified ())
+ {
+ string dist (ops_->fedora_dist_tag ());
+
+ if (!dist.empty ())
+ {
+ bool f (dist.front () == '+');
+ bool b (dist.back () == '+');
+
+ if (f && b) // Note: covers just `+`.
+ fail << "invalid distribution tag '" << dist << "'";
+
+ // If the distribution tag is specified with a leading/trailing '+',
+ // then we query the default tag value and modify it using the
+ // specified suffix/prefix.
+ //
+ // Note that we rely on the fact that the dist tag doesn't depend on
+ // the --target option which we also pass to rpmbuild.
+ //
+ if (f || b)
+ {
+ string affix (move (dist));
+ strings expansions (rpm_eval (cstrings (), cstrings {"%{?dist}"}));
+
+ if (expansions.size () != 1)
+ fail << "one line expected as an expansion of macro %{?dist}";
+
+ dist = move (expansions[0]);
+
+ // Normally, the default distribution tag starts with the dot, in
+ // which case we insert the prefix after it. Note, however, that the
+ // tag can potentially be re/un-defined (for example in
+ // ~/.rpmmacros), so we need to also handle the potential absence of
+ // the leading dot inserting the prefix right at the beginning in
+ // this case.
+ //
+ if (f)
+ dist.append (affix, 1, affix.size () - 1);
+ else
+ dist.insert (dist[0] == '.' ? 1 : 0, affix, 0, affix.size () - 1);
+ }
+ else
+ {
+ // Insert the leading dot into the distribution tag if missing.
+ //
+ if (dist.front () != '.')
+ dist.insert (dist.begin (), '.');
+ }
+
+ common_opts.push_back ("--define=dist " + dist);
+ }
+ else
+ common_opts.push_back ("--define=dist %{nil}");
+ }
+
+ // Evaluate the specified expressions expanding the contained macros. Make
+ // sure these macros are expanded to the same values as if used in the
+ // being generated spec file.
+ //
+ // Note that %{_docdir} and %{_licensedir} macros are set internally by
+ // rpmbuild (may depend on DocDir spec file directive, etc which we will
+ // not use) and thus cannot be queried with `rpm --eval` out of the
+ // box. To allow using these macros in the expressions, we provide their
+ // definitions to their default values on the command line.
+ //
+ auto eval = [&common_opts, this] (const cstrings& expressions)
+ {
+ cstrings opts;
+ opts.reserve (common_opts.size () +
+ 2 +
+ ops_->fedora_query_option ().size ());
+
+ // Pass the rpmbuild/rpm common options.
+ //
+ for (const string& o: common_opts)
+ opts.push_back (o.c_str ());
+
+ // Pass the %{_docdir} and %{_licensedir} macro definitions.
+ //
+ opts.push_back ("--define=_docdir %{_defaultdocdir}");
+ opts.push_back ("--define=_licensedir %{_defaultlicensedir}");
+
+ // Pass any additional options specified by the user.
+ //
+ for (const string& o: ops_->fedora_query_option ())
+ opts.push_back (o.c_str ());
+
+ return rpm_eval (opts, expressions);
+ };
+
+ // We override every config.install.* variable in order not to pick
+ // anything configured. Note that we add some more in the spec file below.
+ //
+ // We make use of the <project> substitution since in the recursive mode
+ // we may be installing multiple projects. Note that the <private>
+ // directory component is automatically removed if this functionality is
+ // not enabled. One side-effect of using <project> is that we will be
+ // using the bpkg package name instead of the Fedora package name. But
+ // perhaps that's correct: while in Fedora the source package name (which
+ // is the same as the main binary package name) does not necessarily
+ // correspond to the "logical" package name, we still want to use the
+ // logical name (consider libsqlite3 which is mapped to sqlite-libs and
+ // sqlite-devel; we don't want <project> to be sqlite-libs). To keep
+ // things consistent we use the bpkg package name for <private> as well.
+ //
+ // Let's only use those directory macros which we can query with `rpm
+ // --eval` (see eval() lambda for details). Note that this means our
+ // installed_entries paths (see below) may not correspond exactly to where
+ // things will actually be installed during rpmbuild. But that shouldn't
+ // be an issue since we make sure to never use these paths directly in the
+ // spec file (always using macros instead).
+ //
+ // NOTE: make sure to update the expressions evaluation and the %files
+ // sections below if changing anything here.
+ //
+ strings config {
+ "config.install.root=%{_prefix}/",
+ "config.install.data_root=%{_exec_prefix}/",
+ "config.install.exec_root=%{_exec_prefix}/",
+
+ "config.install.bin=%{_bindir}/",
+ "config.install.sbin=%{_sbindir}/",
+
+ // On Fedora shared libraries should be executable.
+ //
+ "config.install.lib=%{_libdir}/<private>/",
+ "config.install.lib.mode=755",
+ "config.install.libexec=%{_libexecdir}/<private>/<project>/",
+ "config.install.pkgconfig=lib/pkgconfig/",
+
+ "config.install.etc=%{_sysconfdir}/",
+ "config.install.include=%{_includedir}/<private>/",
+ "config.install.include_arch=include/",
+ "config.install.share=%{_datadir}/",
+ "config.install.data=share/<private>/<project>/",
+ "config.install.buildfile=share/build2/export/<project>/",
+
+ "config.install.doc=%{_docdir}/<private>/<project>/",
+ "config.install.legal=%{_licensedir}/<private>/<project>/",
+ "config.install.man=%{_mandir}/",
+ "config.install.man1=man/man1/",
+ "config.install.man2=man/man2/",
+ "config.install.man3=man/man3/",
+ "config.install.man4=man/man4/",
+ "config.install.man5=man/man5/",
+ "config.install.man6=man/man6/",
+ "config.install.man7=man/man7/",
+ "config.install.man8=man/man8/"};
+
+ config.push_back ("config.install.private=" +
+ (priv ? pn.string () : "[null]"));
+
+ // Add user-specified configuration variables last to allow them to
+ // override anything.
+ //
+ for (const string& v: vars)
+ config.push_back (v);
+
+ // Note that we need to expand macros in the configuration variables
+ // before passing them to the below installed_entries() call.
+ //
+ // Also note that we expand the variables passed on the command line as
+ // well. While this can be useful, it can also be surprising. However, it
+ // is always possible to escape the '%' character which introduces the
+ // macro expansion, which in most cases won't be necessary since an
+ // undefined macro expansion is preserved literally.
+ //
+ // While at it, also obtain some other information that we will need down
+ // the road.
+ //
+ strings expansions;
+
+ // Installed entry directories for sorting out the installed files into
+ // the %files sections of the sub-packages.
+ //
+ // We put exported buildfiles into the main package, which makes sense
+ // after some meditation: they normally contain rules and are bundled
+ // either with a tool (say, thrift), a module (say, libbuild2-thrift), or
+ // an add-on package (say, thrift-build2).
+ //
+ dir_path bindir;
+ dir_path sbindir;
+ dir_path libexecdir;
+ dir_path confdir;
+ dir_path incdir;
+ dir_path bfdir;
+ dir_path libdir;
+ dir_path pkgdir; // Not queried, set as libdir/pkgconfig/.
+ dir_path sharedir;
+ dir_path docdir;
+ dir_path mandir;
+ dir_path licensedir;
+ dir_path build2dir;
+
+ // Note that the ~/rpmbuild/{.,BUILD,BUILDROOT,RPMS,SOURCES,SPECS,SRPMS}
+ // directory paths used by rpmbuild are actually defined as the
+ // %{_topdir}, %{_builddir}, %{_buildrootdir}, %{_rpmdir}, %{_sourcedir},
+ // %{_specdir}, and %{_srcrpmdir} RPM macros. These macros can potentially
+ // be redefined in RPM configuration files, in particular, in
+ // ~/.rpmmacros.
+ //
+ dir_path topdir; // ~/rpmbuild/
+ dir_path specdir; // ~/rpmbuild/SPECS/
+
+ // RPM file absolute path template.
+ //
+ // Note that %{_rpmfilename} normally expands as the following template:
+ //
+ // %{ARCH}/%{NAME}-%{VERSION}-%{RELEASE}.%{ARCH}.rpm
+ //
+ string rpmfile;
+ {
+ cstrings expressions;
+ expressions.reserve (config.size () + 13);
+
+ for (const string& c: config)
+ expressions.push_back (c.c_str ());
+
+ expressions.push_back ("%{?_bindir}");
+ expressions.push_back ("%{?_sbindir}");
+ expressions.push_back ("%{?_libexecdir}");
+ expressions.push_back ("%{?_sysconfdir}");
+ expressions.push_back ("%{?_includedir}");
+ expressions.push_back ("%{?_libdir}");
+ expressions.push_back ("%{?_datadir}");
+ expressions.push_back ("%{?_docdir}");
+ expressions.push_back ("%{?_mandir}");
+ expressions.push_back ("%{?_licensedir}");
+
+ expressions.push_back ("%{?_topdir}");
+ expressions.push_back ("%{?_specdir}");
+
+ expressions.push_back ("%{_rpmdir}/%{_rpmfilename}");
+
+ // Note that we rely on the fact that these macros are defined while
+ // refer to them in the spec file, etc. Thus, let's verify that and fail
+ // early if that's not the case for whatever reason.
+ //
+ expressions.push_back ("%{?_rpmdir}");
+ expressions.push_back ("%{?_rpmfilename}");
+ expressions.push_back ("%{?_usrsrc}");
+ expressions.push_back ("%{?buildroot}");
+
+ // Note that if the architecture passed with the --target option is
+ // invalid, then rpmbuild will fail with some ugly diagnostics since
+ // %{_arch} macro stays unexpanded in some commands executed by
+ // rpmbuild. Thus, let's verify that the architecture is recognized by
+ // rpmbuild and fail early if that's not the case.
+ //
+ expressions.push_back ("%{?_arch}");
+
+ expansions = eval (expressions);
+
+ // Shouldn't happen unless some paths contain newlines, which we don't
+ // care about.
+ //
+ if (expansions.size () != expressions.size ())
+ fail << "number of RPM directory path expansions differs from number "
+ << "of path expressions";
+
+ // Pop the string/directory expansions.
+ //
+ auto pop_string = [&expansions, &expressions] ()
+ {
+ assert (!expansions.empty ());
+
+ string r (move (expansions.back ()));
+
+ if (r.empty ())
+ fail << "macro '" << expressions.back () << "' expands into empty "
+ << "string";
+
+ expansions.pop_back ();
+ expressions.pop_back ();
+ return r;
+ };
+
+ auto pop_path = [&expansions, &expressions] ()
+ {
+ assert (!expansions.empty ());
+
+ try
+ {
+ path r (move (expansions.back ()));
+
+ if (r.empty ())
+ fail << "macro '" << expressions.back () << "' expands into empty "
+ << "path";
+
+ expansions.pop_back ();
+ expressions.pop_back ();
+ return r;
+ }
+ catch (const invalid_path& e)
+ {
+ fail << "macro '" << expressions.back () << "' expands into invalid "
+ << "path '" << e.path << "'" << endf;
+ }
+ };
+
+ auto pop_dir = [&pop_path] ()
+ {
+ return path_cast<dir_path> (pop_path ());
+ };
+
+ // The source of a potentially invalid architecture is likely to be the
+ // --architecture option specified by the user. But can probably also be
+ // some mis-configuration.
+ //
+ if (expansions.back ().empty ()) // %{?_arch}
+ fail << "unknown target architecture '" << arch << "'";
+
+ // We only need the following macro expansions for the verification.
+ //
+ pop_string (); // %{?_arch}
+ pop_dir (); // %{?buildroot}
+ pop_dir (); // %{?_usrsrc}
+ pop_string (); // %{?_rpmfilename}
+ pop_dir (); // %{?_rpmdir}
+
+ rpmfile = pop_string ();
+ specdir = pop_dir ();
+ topdir = pop_dir ();
+
+ // Let's tighten things up and only look for the installed files in
+ // <private>/ (if specified) to make sure there is nothing stray.
+ //
+ dir_path pd (priv ? pn.string () : "");
+
+ licensedir = pop_dir () / pd;
+ mandir = pop_dir ();
+ docdir = pop_dir () / pd;
+ sharedir = pop_dir ();
+ build2dir = sharedir / dir_path ("build2");
+ bfdir = build2dir / dir_path ("export");
+ sharedir /= pd;
+ libdir = pop_dir () / pd;
+ pkgdir = libdir / dir_path ("pkgconfig");
+ incdir = pop_dir () / pd;
+ confdir = pop_dir ();
+ libexecdir = pop_dir () / pd;
+ sbindir = pop_dir ();
+ bindir = pop_dir ();
+
+ // Only configuration variables expansions must remain.
+ //
+ assert (expansions.size () == config.size ());
+ }
+
+ // Note that the conventional place for all the inputs and outputs of the
+ // rpmbuild operations is the directory tree rooted at ~/rpmbuild/. We
+ // won't fight with rpmbuild and will use this tree as the user would
+ // do while creating the binary package manually.
+ //
+ // Specifially, we will create the RPM spec file in ~/rpmbuild/SPECS/,
+ // install the package(s) under the ~/rpmbuild/BUILDROOT/<package-dir>/
+ // chroot, and expect the generated RPM files under ~/rpmbuild/RPMS/.
+ //
+ // That, in particular, means that we have no use for the --output-root
+ // directory. We will also make sure that we don't overwrite an existing
+ // RPM spec file unless --wipe-output is specified.
+ //
+ if (ops_->output_root_specified () && ops_->output_root () != topdir)
+ fail << "--output-root|-o must be " << topdir << " if specified";
+
+ // Note that in Fedora the Name spec file directive names the source
+ // package as well as the main binary package and the spec file should
+ // match this name.
+ //
+ // @@ TODO (maybe/later): it's unclear whether it's possible to rename
+ // the main binary package. Maybe makes sense to investigate if/when
+ // we decide to generate source packages.
+ //
+ path spec (specdir / (st.main + ".spec"));
+
+ if (exists (spec) && !ops_->wipe_output ())
+ fail << "RPM spec file " << spec << " already exists" <<
+ info << "use --wipe-output to remove but be careful";
+
+ // Note that we can use weak install scope for the auto recursive mode
+ // since we know dependencies cannot be spread over multiple linked
+ // configurations.
+ //
+ string scope (!recursive_full || *recursive_full ? "project" : "weak");
+
+ // Get the map of files that will end up in the binary packages.
+ //
+ installed_entry_map ies (
+ installed_entries (*ops_, pkgs, expansions, scope));
+
+ if (ies.empty ())
+ fail << "specified package(s) do not install any files";
+
+ if (verb >= 4)
+ {
+ for (const auto& p: ies)
+ {
+ diag_record dr (trace);
+ dr << "installed entry: " << p.first;
+
+ if (p.second.target != nullptr)
+ dr << " -> " << p.second.target->first; // Symlink.
+ else
+ dr << ' ' << p.second.mode;
+ }
+ }
+
+ // As an optimization, don't generate the main and -debug* packages for a
+ // binless library unless it also specifies the -common sub-package.
+ //
+ // If this is a binless library, then verify that it doesn't install any
+ // executable, library, or configuration files. Also verify that it has
+ // the -devel sub-package but doesn't specify the -static sub-package.
+ //
+ bool binless (false);
+
+ if (lib)
+ {
+ assert (aps.size () == 1);
+
+ const shared_ptr<available_package>& ap (aps.front ().first);
+ strings sos (package_manifest::effective_type_sub_options (ap->type));
+
+ if (find (sos.begin (), sos.end (), "binless") != sos.end ())
+ {
+ // Verify installed files.
+ //
+ auto bad_install = [&pn, &pv] (const string& w)
+ {
+ fail << "binless library " << pn << ' ' << pv << " installs " << w;
+ };
+
+ auto verify_not_installed = [&ies, &bad_install] (const dir_path& d)
+ {
+ auto p (ies.find_sub (d));
+ if (p.first != p.second)
+ bad_install (p.first->first.string ());
+ };
+
+ verify_not_installed (bindir);
+ verify_not_installed (sbindir);
+ verify_not_installed (libexecdir);
+
+ // It would probably be better not to fail here but generate the main
+ // package instead (as we do if the -common sub-package is also being
+ // generated). Then, however, it would not be easy to detect if a
+ // dependency has the main package or not (see sdeps initialization
+ // for details).
+ //
+ verify_not_installed (confdir);
+
+ for (auto p (ies.find_sub (libdir)); p.first != p.second; ++p.first)
+ {
+ const path& f (p.first->first);
+
+ if (!f.sub (pkgdir))
+ bad_install (f.string ());
+ }
+
+ // Verify sub-packages.
+ //
+ if (st.devel.empty ())
+ fail << "binless library " << pn << ' ' << pv << " doesn't have "
+ << os_release.name_id << " -devel package";
+
+ if (!st.static_.empty ())
+ fail << "binless library " << pn << ' ' << pv << " has "
+ << os_release.name_id << ' ' << st.static_ << " package";
+
+ binless = true;
+ }
+ }
+
+ bool gen_main (!binless || !st.common.empty ());
+
+ // If we don't generate the main package (and thus the -common
+ // sub-package), then fail if there are any data files installed. It would
+ // probably be better not to fail but generate the main package instead in
+ // this case. Then, however, it would not be easy to detect if a
+ // dependency has the main package or not.
+ //
+ if (!gen_main)
+ {
+ for (auto p (ies.find_sub (sharedir)); p.first != p.second; ++p.first)
+ {
+ const path& f (p.first->first);
+
+ if (!f.sub (docdir) && !f.sub (mandir) && !f.sub (licensedir))
+ {
+ fail << "binless library " << pn << ' ' << pv << " installs " << f <<
+ info << "consider specifying -common package in explicit "
+ << os_release.name_id << " name mapping in package manifest";
+ }
+ }
+
+ for (auto p (ies.find_sub (bfdir)); p.first != p.second; ++p.first)
+ {
+ const path& f (p.first->first);
+
+ fail << "binless library " << pn << ' ' << pv << " installs " << f <<
+ info << "consider specifying -common package in explicit "
+ << os_release.name_id << " name mapping in package manifest";
+ }
+ }
+
+ if (verb >= 3)
+ {
+ auto print_status = [] (diag_record& dr,
+ const package_status& s,
+ const string& main)
+ {
+ dr << (main.empty () ? "" : " ") << main
+ << (s.devel.empty () ? "" : " ") << s.devel
+ << (s.static_.empty () ? "" : " ") << s.static_
+ << (s.doc.empty () ? "" : " ") << s.doc
+ << (s.debuginfo.empty () ? "" : " ") << s.debuginfo
+ << (s.debugsource.empty () ? "" : " ") << s.debugsource
+ << (s.common.empty () ? "" : " ") << s.common
+ << ' ' << s.system_version;
+ };
+
+ {
+ diag_record dr (trace);
+ dr << "package:";
+ print_status (dr, st, gen_main ? st.main : empty_string);
+ }
+
+ for (const package_status& s: sdeps)
+ {
+ diag_record dr (trace);
+ dr << "dependency:";
+ print_status (dr, s, s.main);
+ }
+ }
+
+ // Prepare the data for the RPM spec file.
+ //
+ // Url directive.
+ //
+ string url (pm.package_url ? pm.package_url->string () :
+ pm.url ? pm.url->string () :
+ string ());
+
+ // Packager directive.
+ //
+ string packager;
+ if (ops_->fedora_packager_specified ())
+ {
+ packager = ops_->fedora_packager ();
+ }
+ else
+ {
+ const email* e (pm.package_email ? &*pm.package_email :
+ pm.email ? &*pm.email :
+ nullptr);
+
+ if (e == nullptr)
+ fail << "unable to determine packager from manifest" <<
+ info << "specify explicitly with --fedora-packager";
+
+ // In certain places (e.g., %changelog), Fedora expect this to be in the
+ // `John Doe <john@example.org>` form while we often specify just the
+ // email address (e.g., to the mailing list). Try to detect such a case
+ // and complete it to the desired format.
+ //
+ if (e->find (' ') == string::npos && e->find ('@') != string::npos)
+ {
+ // Try to use comment as name, if any.
+ //
+ if (!e->comment.empty ())
+ {
+ packager = e->comment;
+
+ // Strip the potential trailing dot.
+ //
+ if (packager.back () == '.')
+ packager.pop_back ();
+ }
+ else
+ packager = pn.string () + " package maintainer";
+
+ packager += " <" + *e + '>';
+ }
+ else
+ packager = *e;
+ }
+
+ // Version, Release, and Epoch directives.
+ //
+ struct system_version
+ {
+ string epoch;
+ string version;
+ string release;
+ };
+
+ auto parse_system_version = [] (const string& v)
+ {
+ system_version r;
+
+ size_t e (v.find (':'));
+ if (e != string::npos)
+ r.epoch = string (v, 0, e);
+
+ size_t b (e != string::npos ? e + 1 : 0);
+ e = v.find ('-', b);
+ assert (e != string::npos); // Release is required.
+
+ r.version = string (v, b, e - b);
+
+ b = e + 1;
+ r.release = string (v, b);
+ return r;
+ };
+
+ system_version sys_version (parse_system_version (st.system_version));
+
+ // License directive.
+ //
+ // The directive value is a SPDX license expression. Note that the OR/AND
+ // operators must be specified in upper case and the AND operator has a
+ // higher precedence than OR.
+ //
+ string license;
+ for (const licenses& ls: pm.license_alternatives)
+ {
+ if (!license.empty ())
+ license += " OR ";
+
+ for (auto b (ls.begin ()), i (b); i != ls.end (); ++i)
+ {
+ if (i != b)
+ license += " AND ";
+
+ license += *i;
+ }
+ }
+
+ // Create the ~/rpmbuild directory tree if it doesn't exist yet.
+ //
+ if (!exists (topdir))
+ {
+ cstrings args {"rpmdev-setuptree", nullptr};
+
+ try
+ {
+ process_path pp (process::path_search (args[0]));
+ process_env pe (pp);
+
+ if (verb >= 3)
+ print_process (pe, args);
+
+ process pr (pp, args);
+
+ if (!pr.wait ())
+ {
+ diag_record dr (fail);
+ dr << args[0] << " exited with non-zero code";
+
+ if (verb < 3)
+ {
+ dr << info << "command line: ";
+ print_process (dr, pe, args);
+ }
+ }
+ }
+ catch (const process_error& e)
+ {
+ error << "unable to execute " << args[0] << ": " << e;
+
+ if (e.child)
+ exit (1);
+
+ throw failed ();
+ }
+
+ // For good measure verify that ~/rpmbuild directory now exists.
+ //
+ if (!exists (topdir))
+ fail << "unable to create RPM build directory " << topdir;
+ }
+
+ // We cannot easily detect architecture-independent packages (think
+ // libbutl.bash) and providing an option feels like the best we can do.
+ // Note that the noarch value means architecture-independent and any other
+ // value means architecture-dependent.
+ //
+ const string& build_arch (ops_->fedora_build_arch_specified ()
+ ? ops_->fedora_build_arch ()
+ : empty_string);
+
+ // The RPM spec file.
+ //
+ // Note that we try to do a reasonably thorough job (e.g., using macros
+ // rather than hardcoding values, trying to comply with Fedora guidelines
+ // and recommendations, etc) with the view that this can be used as a
+ // starting point for manual packaging.
+ //
+ // NOTE: if changing anything here make sure that all the macros expanded
+ // in the spec file unconditionally are defined (see above how we do
+ // that for the _usrsrc macro as an example).
+ //
+ try
+ {
+ ofdstream os (spec);
+
+ // Note that Fedora Packaging Guidelines recommend to declare the
+ // package dependencies in the architecture-specific fashion using the
+ // %{?_isa} macro in the corresponding Requires directive (e.g.,
+ // `Requires: foo%{?_isa}` which would expand to something like
+ // `Requires: foo(x86-64)`). We, however, cannot easily detect if the
+ // distribution packages which correspond to the bpkg package
+ // dependencies are architecture-specific or not. Thus, we will generate
+ // the architecture-independent Requires directives for them which
+ // postpones the architecture resolution until the package installation
+ // time by dnf. We could potentially still try to guess if the
+ // dependency package is architecture-specific or not based on its
+ // languages, but let's keep it simple for now seeing that it's not a
+ // deal breaker.
+ //
+ // Also note that we will generate the architecture-specific
+ // dependencies on our own sub-packages, unless the --fedora-build-arch
+ // option has been specified, and for the C/C++ language related
+ // dependencies (glibc, etc). In other words, we will not try to craft
+ // the architecture specifier ourselves when we cannot use %{?_isa}.
+ //
+ string isa (build_arch.empty () ? "%{?_isa}" : "");
+
+ // Add the Requires directive(s), optionally separating them from the
+ // previous directives with an empty line.
+ //
+ auto add_requires = [&os] (bool& first, const string& v)
+ {
+ if (first)
+ {
+ os << '\n';
+ first = false;
+ }
+
+ os << "Requires: " << v << '\n';
+ };
+
+ auto add_requires_list = [&add_requires] (bool& first, const strings& vs)
+ {
+ for (const string& v: vs)
+ add_requires (first, v);
+ };
+
+ // Add the Requires directives for language dependencies of a
+ // sub-package. Deduce the language dependency packages (such as glibc,
+ // libstdc++, etc), unless they are specified explicitly via the
+ // --fedora-*-langreq options. If single option with an empty value is
+ // specified, then no language dependencies are added. The valid
+ // sub-package suffixes are '' (main package), '-devel', and '-static'.
+ //
+ auto add_lang_requires = [&lang, &add_requires, &add_requires_list]
+ (bool& first,
+ const string& suffix,
+ const strings& options,
+ bool intf_only = false)
+ {
+ if (!options.empty ())
+ {
+ if (options.size () != 1 || !options[0].empty ())
+ add_requires_list (first, options);
+ }
+ else
+ {
+ // Add dependency on libstdc++<suffix> and glibc<suffix> packages.
+ //
+ // It doesn't seems that the -static sub-package needs to define any
+ // default C/C++ language dependencies. That is a choice of the
+ // dependent packages which may want to link the standard libraries
+ // either statically or dynamically, so let's leave if for them to
+ // arrange.
+ //
+ if (suffix != "-static")
+ {
+ // If this is an undetermined C-common library, we assume it may
+ // be C++ (better to over- than under-specify).
+ //
+ bool cc (lang ("cc", intf_only));
+ if (cc || lang ("c++", intf_only))
+ add_requires (first, string ("libstdc++") + suffix + "%{?_isa}");
+
+ if (cc || lang ("c", intf_only))
+ add_requires (first, string ("glibc") + suffix + "%{?_isa}");
+ }
+ }
+ };
+
+ // We need to add the mandatory Summary and %description directives both
+ // for the main package and for the sub-packages. In the Summary
+ // directives we will use the `summary` package manifest value. In the
+ // %description directives we will just describe the sub-package content
+ // since using the `description` package manifest value is not going to
+ // be easy: it can be arbitrarily long and may not even be plain text
+ // (it's commonly the contents of the README.md file).
+ //
+ // We will disable automatic dependency discovery for all sub-packages
+ // using the `AutoReqProv: no` directive since we have an accurate set
+ // and some of them may not be system packages.
+ //
+
+ // The common information and the main package.
+ //
+ {
+ os << "Name: " << st.main << '\n'
+ << "Version: " << sys_version.version << '\n'
+ << "Release: " << sys_version.release << "%{?dist}" << '\n';
+
+ if (!sys_version.epoch.empty ())
+ os << "Epoch: " << sys_version.epoch << '\n';
+
+ os << "License: " << license << '\n'
+ << "Summary: " << pm.summary << '\n'
+ << "Url: " << url << '\n';
+
+ if (!packager.empty ())
+ os << "Packager: " << packager << '\n';
+
+#if 0
+ os << "#Source: https://pkg.cppget.org/1/???/"
+ << pm.effective_project () << '/' << sp->name << '-'
+ << sp->version << ".tar.gz" << '\n';
+#endif
+
+ // Idiomatic epoch-version-release value.
+ //
+ os << '\n'
+ << "%global evr %{?epoch:%{epoch}:}%{version}-%{release}" << '\n';
+
+ if (gen_main)
+ {
+ os << '\n'
+ << "# " << st.main << '\n'
+ << "#" << '\n';
+
+ if (!build_arch.empty ())
+ os << "BuildArch: " << build_arch << '\n';
+
+ os << "AutoReqProv: no" << '\n';
+
+ // Requires directives.
+ //
+ {
+ bool first (true);
+ if (!st.common.empty ())
+ add_requires (first, st.common + " = %{evr}");
+
+ for (const package_status& s: sdeps)
+ {
+ if (!s.main.empty ())
+ add_requires (first, s.main + " >= " + s.system_version);
+ }
+
+ add_lang_requires (first,
+ "" /* suffix */,
+ ops_->fedora_main_langreq ());
+
+ if (ops_->fedora_main_extrareq_specified ())
+ add_requires_list (first, ops_->fedora_main_extrareq ());
+ }
+ }
+
+ // Note that we need to add the %description directive regardless if
+ // the main package is being generated or not.
+ //
+ if (!binless)
+ {
+ os << '\n'
+ << "%description" << '\n'
+ << "This package contains the runtime files." << '\n';
+ }
+ else
+ {
+ os << '\n'
+ << "%description" << '\n'
+ << "This package contains the development files." << '\n';
+ }
+ }
+
+ // The -devel sub-package.
+ //
+ if (!st.devel.empty ())
+ {
+ os << '\n'
+ << "# " << st.devel << '\n'
+ << "#" << '\n'
+ << "%package -n " << st.devel << '\n'
+ << "Summary: " << pm.summary << '\n';
+
+ // Feels like the architecture should be the same as for the main
+ // package.
+ //
+ if (!build_arch.empty ())
+ os << "BuildArch: " << build_arch << '\n';
+
+ os << '\n'
+ << "AutoReqProv: no" << '\n';
+
+ // Requires directives.
+ //
+ {
+ bool first (true);
+
+ // Dependency on the main package.
+ //
+ if (gen_main)
+ add_requires (first, "%{name}" + isa + " = %{evr}");
+
+ for (const package_status& s: sdeps)
+ {
+ // Doesn't look like we can distinguish between interface and
+ // implementation dependencies here. So better to over- than
+ // under-specify.
+ //
+ // Note that if the -devel sub-package doesn't exist for a
+ // dependency, then its potential content may be part of the main
+ // package. If that's the case we, strictly speaking, should add
+ // the dependency on the main package. Let's, however, skip that
+ // since we already have this dependency implicitly via our own
+ // main package, which the -devel sub-package depends on.
+ //
+ if (!s.devel.empty ())
+ add_requires (first, s.devel + " >= " + s.system_version);
+ }
+
+ add_lang_requires (first,
+ "-devel",
+ ops_->fedora_devel_langreq (),
+ true /* intf_only */);
+
+ if (ops_->fedora_devel_extrareq_specified ())
+ add_requires_list (first, ops_->fedora_devel_extrareq ());
+ }
+
+ // If the -static sub-package is not being generated but there are
+ // some static libraries installed, then they will be added to the
+ // -devel sub-package. If that's the case, we add the
+ // `Provides: %{name}-static` directive for the -devel sub-package, as
+ // recommended.
+ //
+ // Should we do the same for the main package, where the static
+ // libraries go if the -devel sub-package is not being generated
+ // either? While it feels sensible, we've never seen such a practice
+ // or recommendation. So let's not do it for now.
+ //
+ if (st.static_.empty ())
+ {
+ for (auto p (ies.find_sub (libdir)); p.first != p.second; ++p.first)
+ {
+ const path& f (p.first->first);
+ path l (f.leaf (libdir));
+ const string& n (l.string ());
+
+ if (l.simple () &&
+ n.size () > 3 && n.compare (0, 3, "lib") == 0 &&
+ l.extension () == "a")
+ {
+ os << '\n'
+ << "Provides: %{name}-static" << isa << " = %{evr}" << '\n';
+
+ break;
+ }
+ }
+ }
+
+ os << '\n'
+ << "%description -n " << st.devel << '\n'
+ << "This package contains the development files." << '\n';
+ }
+
+ // The -static sub-package.
+ //
+ if (!st.static_.empty ())
+ {
+ os << '\n'
+ << "# " << st.static_ << '\n'
+ << "#" << '\n'
+ << "%package -n " << st.static_ << '\n'
+ << "Summary: " << pm.summary << '\n';
+
+ // Feels like the architecture should be the same as for the -devel
+ // sub-package.
+ //
+ if (!build_arch.empty ())
+ os << "BuildArch: " << build_arch << '\n';
+
+ os << '\n'
+ << "AutoReqProv: no" << '\n';
+
+ // Requires directives.
+ //
+ {
+ bool first (true);
+
+ // The static libraries without headers doesn't seem to be of any
+ // use. Thus, add dependency on the -devel or main sub-package, if
+ // not being generated.
+ //
+ // Note that if there is no -devel package, then this cannot be a
+ // binless library and thus the main package is being generated.
+ //
+ add_requires (
+ first,
+ (!st.devel.empty () ? st.devel : "%{name}") + isa + " = %{evr}");
+
+ // Add dependency on sub-packages that may contain static libraries.
+ // Note that in the -devel case we can potentially over-specify the
+ // dependency which is better than to under-specify.
+ //
+ for (const package_status& s: sdeps)
+ {
+ // Note that if the -static sub-package doesn't exist for a
+ // dependency, then its potential content may be part of the
+ // -devel sub-package, if exists, or the main package otherwise.
+ // If that's the case we, strictly speaking, should add the
+ // dependency on the -devel sub-package, if exists, or the main
+ // package otherwise. Let's, however, also consider the implicit
+ // dependencies via our own -devel and main (sub-)packages, which
+ // we depend on, and simplify things similar to what we do for the
+ // -devel sub-package above.
+ //
+ // Also note that we only refer to the dependency's -devel
+ // sub-package if we don't have our own -devel sub-package
+ // (unlikely, but possible), which would provide us with such an
+ // implicit dependency.
+ //
+ const string& p (!s.static_.empty () ? s.static_ :
+ st.devel.empty () ? s.devel :
+ empty_string);
+
+ if (!p.empty ())
+ add_requires (first, p + " >= " + st.system_version);
+ }
+
+ add_lang_requires (first, "-static", ops_->fedora_stat_langreq ());
+
+ if (ops_->fedora_stat_extrareq_specified ())
+ add_requires_list (first, ops_->fedora_stat_extrareq ());
+ }
+
+ os << '\n'
+ << "%description -n " << st.static_ << '\n'
+ << "This package contains the static libraries." << '\n';
+ }
+
+ // The -doc sub-package.
+ //
+ if (!st.doc.empty ())
+ {
+ os << '\n'
+ << "# " << st.doc << '\n'
+ << "#" << '\n'
+ << "%package -n " << st.doc << '\n'
+ << "Summary: " << pm.summary << '\n'
+ << "BuildArch: noarch" << '\n'
+ << '\n'
+ << "AutoReqProv: no" << '\n'
+ << '\n'
+ << "%description -n " << st.doc << '\n'
+ << "This package contains the documentation." << '\n';
+ }
+
+ // The -common sub-package.
+ //
+ if (!st.common.empty ())
+ {
+ // Generally, this sub-package is not necessarily architecture-
+ // independent (for example, it could contain something shared between
+ // multiple binary packages produced from the same source package
+ // rather than something shared between all the architectures of a
+ // binary package). But seeing that we always generate one binary
+ // package, for us it only makes sense as architecture-independent.
+ //
+ // It's also not clear what dependencies we can deduce for this
+ // sub-package. Assuming that it depends on all the dependency -common
+ // sub-packages is probably unreasonable.
+ //
+ os << '\n'
+ << "# " << st.common << '\n'
+ << "#" << '\n'
+ << "%package -n " << st.common << '\n'
+ << "Summary: " << pm.summary << '\n'
+ << "BuildArch: noarch" << '\n'
+ << '\n'
+ << "AutoReqProv: no" << '\n'
+ << '\n'
+ << "%description -n " << st.common << '\n'
+ << "This package contains the architecture-independent files." << '\n';
+ }
+
+ // Build setup.
+ //
+ {
+ bool lang_c (lang ("c"));
+ bool lang_cxx (lang ("c++"));
+ bool lang_cc (lang ("cc"));
+
+ os << '\n'
+ << "# Build setup." << '\n'
+ << "#" << '\n';
+
+ // The -debuginfo and -debugsource sub-packages.
+ //
+ // Note that the -debuginfo and -debugsource sub-packages are defined
+ // in the spec file by expanding the %{debug_package} macro (search
+ // the macro definition in `rpm --showrc` stdout for details). This
+ // expansion happens as part of the %install section processing but
+ // only if the %{buildsubdir} macro is defined. This macro refers to
+ // the package source subdirectory in the ~/rpmbuild/BUILD directory
+ // and is normally set by the %setup macro expansion in the %prep
+ // section which, in particular, extracts source files from an
+ // archive, defines the %{buildsubdir} macro, and make this directory
+ // current. Since we don't have an archive to extract, we will use the
+ // %setup macro disabling sources extraction (-T) and creating an
+ // empty source directory instead (-c). This directory is also used by
+ // rpmbuild for saving debuginfo-related intermediate files
+ // (debugfiles.list, etc). See "Fedora Debuginfo packages" and "Using
+ // RPM build flags" documentation for better understanding what's
+ // going on under the hood. There is also the "[Rpm-ecosystem] Trying
+ // to understand %buildsubdir and debuginfo generation" mailing list
+ // thread which provides some additional clarifications.
+ //
+ // Also note that we disable generating the -debugsource sub-packages
+ // (see the generate() description above for the reasoning).
+ //
+ // For a binless library no -debug* packages are supposed to be
+ // generated. Thus, we just drop their definitions by redefining the
+ // %{debug_package} macro as an empty string.
+ //
+ if (!binless)
+ {
+ os << "%undefine _debugsource_packages" << '\n';
+
+ // Append the -ffile-prefix-map option which is normally used to
+ // strip source file path prefix in debug information (besides other
+ // places). By default it is not used since rpmbuild replaces the
+ // recognized prefixes by using the debugedit program (see below for
+ // details) and we cannot rely on that since in our case the prefix
+ // (bpkg configuration directory) is not recognized. We need to
+ // replace the bpkg configuration directory prefix in the source
+ // file paths with the destination directory where the -debugsource
+ // sub-package files would be installed (if we were to generate it).
+ // For example:
+ //
+ // /usr/src/debug/foo-1.0.0-1.fc35.x86_64
+ //
+ // There is just one complication:
+ //
+ // While the generation of the -debugsource sub-packages is
+ // currently disabled, the executed by rpmbuild find-debuginfo
+ // script still performs some preparations for them. It runs the
+ // debugedit program, which, in particular, reads the source file
+ // paths from the debug information in package binaries and saves
+ // those which start with the ~/rpmbuild/BUILD/foo-1.0.0 or
+ // /usr/src/debug/foo-1.0.0-1.fc35.x86_64 directory prefix into the
+ // ~/rpmbuild/BUILD/foo-1.0.0/debugsources.list file, stripping the
+ // prefixes. It also saves all the relative source file paths as is
+ // (presumably assuming they are sub-entries of the
+ // ~/rpmbuild/BUILD/foo-1.0.0 directory where the package archive
+ // would normally be extracted). Afterwards, the content of the
+ // debugsources.list file is piped as an input to the cpio program
+ // executed in the ~/rpmbuild/BUILD/foo-1.0.0 directory as its
+ // current working directory, which tries to copy these source files
+ // to the
+ // ~/rpmbuild/BUILDROOT/foo-1.0.0-1.fc35.x86_64/usr/src/debug/foo-1.0.0-1.fc35.x86_64
+ // directory. Given that these source files are actually located in
+ // the bpkg configuration directory rather than in the
+ // ~/rpmbuild/BUILD/foo-1.0.0 directory the cpio program fails to
+ // stat them and complains. To work around that we need to change
+ // the replacement directory path in the -ffile-prefix-map option
+ // value with some other absolute (not necessarily existing) path
+ // which is not a subdirectory of the directory prefixes recognized
+ // by the debugedit program. This way debugedit won't recognize any
+ // of the package source files, will create an empty
+ // debugsources.list file, and thus the cpio program won't try to
+ // copy anything. Not to confuse the user (who can potentially see
+ // such paths in gdb while examining a core file produced by the
+ // package binary), we will keep this replacement directory path
+ // close to the desired one, but will also make it clear that the
+ // path is bogus:
+ //
+ // /usr/src/debug/bogus/foo-1.0.0-1.fc35.x86_64
+ //
+ // Note that this path mapping won't work for external packages with
+ // source out of configuration (e.g., managed by bdep).
+ //
+ // @@ Supposedly this code won't be necessary when we add support
+ // for -debugsource sub-packages somehow. Feels like one way
+ // would be to make ~/rpmbuild/BUILD/foo-1.0.0 a symlink to the
+ // bpkg configuration (or to the primary package inside, if not
+ // --recursive).
+ //
+ if (ops_->fedora_buildflags () != "ignore")
+ {
+ const char* debugsource_dir (
+ "%{_usrsrc}/debug/bogus/%{name}-%{evr}.%{_arch}");
+
+ if (lang_c || lang_cc)
+ os << "%global build_cflags %{?build_cflags} -ffile-prefix-map="
+ << cfg_dir.string () << '=' << debugsource_dir << '\n';
+
+ if (lang_cxx || lang_cc)
+ os << "%global build_cxxflags %{?build_cxxflags} -ffile-prefix-map="
+ << cfg_dir.string () << '=' << debugsource_dir << '\n';
+ }
+ }
+ else
+ os << "%global debug_package %{nil}" << '\n';
+
+ // Common arguments for build2 commands.
+ //
+ // Let's use absolute path to the build system driver in case we are
+ // invoked with altered environment or some such.
+ //
+ // Note: should be consistent with the invocation in installed_entries()
+ // above.
+ //
+ cstrings verb_args; string verb_arg;
+ map_verb_b (*ops_, verb_b::normal, verb_args, verb_arg);
+
+ os << '\n'
+ << "%global build2 " << search_b (*ops_).effect_string ();
+ for (const char* o: verb_args) os << ' ' << o;
+ for (const string& o: ops_->build_option ()) os << ' ' << o;
+
+ // Map the %{_smp_build_ncpus} macro value to the build2 --jobs or
+ // --serial-stop options.
+ //
+ os << '\n'
+ << '\n'
+ << "%if %{defined _smp_build_ncpus}" << '\n'
+ << " %if %{_smp_build_ncpus} == 1" << '\n'
+ << " %global build2 %{build2} --serial-stop" << '\n'
+ << " %else" << '\n'
+ << " %global build2 %{build2} --jobs=%{_smp_build_ncpus}" << '\n'
+ << " %endif" << '\n'
+ << "%endif" << '\n';
+
+ // Configuration variables.
+ //
+ // Note: we need to quote values that contain `<>`, `[]`, since they
+ // will be passed through shell. For simplicity, let's just quote
+ // everything.
+ //
+ os << '\n'
+ << "%global config_vars";
+
+ auto add_macro_line = [&os] (const auto& v)
+ {
+ os << " \\\\\\\n " << v;
+ };
+
+ add_macro_line ("config.install.chroot='%{buildroot}/'");
+ add_macro_line ("config.install.sudo='[null]'");
+
+ // If this is a C-based language, add rpath for private installation.
+ //
+ if (priv && (lang_c || lang_cxx || lang_cc))
+ add_macro_line ("config.bin.rpath='%{_libdir}/" + pn.string () + "/'");
+
+ // Add build flags.
+ //
+ if (ops_->fedora_buildflags () != "ignore")
+ {
+ const string& m (ops_->fedora_buildflags ());
+
+ string o (m == "assign" ? "=" :
+ m == "append" ? "+=" :
+ m == "prepend" ? "=+" : "");
+
+ if (o.empty ())
+ fail << "unknown --fedora-buildflags option value '" << m << "'";
+
+ // Note that config.cc.* doesn't play well with the append/prepend
+ // modes because the orders are:
+ //
+ // x.poptions cc.poptions
+ // cc.coptions x.coptions
+ // cc.loptions x.loptions
+ //
+ // Oh, well, hopefully it will be close enough for most cases.
+ //
+ // Note also that there are compiler mode options that are not
+ // overridden. Also the preprocessor options are normally contained
+ // in the %{build_cflags} and %{build_cxxflags} macro definitions
+ // and have no separate macros associated at this level (see "Using
+ // RPM build flags" documentation for details). For example:
+ //
+ // $ rpm --eval "%{build_cflags}"
+ // -O2 -flto=auto -ffat-lto-objects -fexceptions -g
+ // -grecord-gcc-switches -pipe -Wall -Werror=format-security
+ // -Wp,-D_FORTIFY_SOURCE=2 -Wp,-D_GLIBCXX_ASSERTIONS
+ // -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1
+ // -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1
+ // -m64 -mtune=generic -fasynchronous-unwind-tables
+ // -fstack-clash-protection -fcf-protection
+ //
+ // Note the -Wp options above. Thus, we reset config.{c,cxx}.poptions
+ // to [null] in the assign mode and, for simplicity, leave them as
+ // configured otherwise. We could potentially fix that either by
+ // extracting the -Wp,... options from %{build_cflags} and
+ // %{build_cxxflags} macro values or using more lower level macros
+ // instead (%{_preprocessor_defines}, %{_hardened_cflags}, etc),
+ // which all feels quite hairy and brittle.
+ //
+ if (o == "=" && (lang_c || lang_cxx || lang_cc))
+ {
+ add_macro_line ("config.cc.poptions='[null]'");
+ add_macro_line ("config.cc.coptions='[null]'");
+ add_macro_line ("config.cc.loptions='[null]'");
+ }
+
+ if (lang_c || lang_cc)
+ {
+ if (o == "=")
+ add_macro_line ("config.c.poptions='[null]'");
+
+ add_macro_line ("config.c.coptions" + o + "'%{?build_cflags}'");
+ add_macro_line ("config.c.loptions" + o + "'%{?build_ldflags}'");
+ }
+
+ if (lang_cxx || lang_cc)
+ {
+ if (o == "=")
+ add_macro_line ("config.cxx.poptions='[null]'");
+
+ add_macro_line ("config.cxx.coptions" + o + "'%{?build_cxxflags}'");
+ add_macro_line ("config.cxx.loptions" + o + "'%{?build_ldflags}'");
+ }
+ }
+
+ // Keep last to allow user-specified configuration variables to
+ // override anything.
+ //
+ for (const string& c: config)
+ {
+ // Quote the value unless already quoted (see above). Presense of
+ // potentially-quoted user variables complicates things a bit (can
+ // be partially quoted, double-quoted, etc).
+ //
+ size_t p (c.find_first_of ("=+ \t")); // End of name.
+ if (p != string::npos)
+ {
+ p = c.find_first_not_of ("=+ \t", p); // Beginning of value.
+ if (p != string::npos)
+ {
+ if (c.find_first_of ("'\"", p) == string::npos) // Not quoted.
+ {
+ add_macro_line (string (c, 0, p) + '\'' + string (c, p) + '\'');
+ continue;
+ }
+ }
+ }
+
+ add_macro_line (c);
+ }
+
+ os << '\n'; // Close the macro definition.
+
+ // List of packages we need to install.
+ //
+ os << '\n'
+ << "%global packages";
+
+ for (const package& p: pkgs)
+ add_macro_line (p.out_root.representation ());
+
+ os << '\n'; // Close the macro definition.
+ }
+
+ // Build sections.
+ //
+ {
+ os << '\n'
+ << "# Build sections." << '\n'
+ << "#" << '\n'
+ << "%prep" << '\n'
+ << "%setup -T -c" << '\n'
+ << '\n'
+ << "%build" << '\n'
+ << "%{build2} %{config_vars} update-for-install: %{packages}" << '\n'
+ << '\n'
+ << "%install" << '\n'
+ << "%{build2} %{config_vars} '!config.install.scope=" << scope
+ << "' install: %{packages}" << '\n';
+ }
+
+ // Files sections.
+ //
+ // Generate the %files section for each sub-package in order to sort out
+ // which files belong where.
+ //
+ // For the details on the %files section directives see "Directives For
+ // the %files list" documentation. But the summary is:
+ //
+ // - Supports only simple wildcards (?, *, [...]; no recursive/**).
+ // - Includes directories recursively, unless the path is prefixed
+ // with the %dir directive, in which case only includes this
+ // directory entry, which will be created on install and removed on
+ // uninstall, if empty.
+ // - An entry that doesn't match anything is an error (say,
+ // /usr/sbin/*).
+ //
+ // Keep in mind that wherever there is <project> in the config.install.*
+ // variable, we can end up with multiple different directories (bundled
+ // packages).
+ //
+ {
+ string main;
+ string devel;
+ string static_;
+ string doc;
+ string common;
+
+ // Note that declaring package ownership for standard directories is
+ // considered in Fedora a bad idea and is reported as an error by some
+ // RPM package checking tools (rpmlint, etc). Thus, we generate, for
+ // example, libexecdir/* entry rather than libexecdir/. However, if
+ // the private directory is specified we generate libexecdir/<private>/
+ // to own the directory.
+ //
+ // NOTE: use consistently with the above install directory expressions
+ // (%{?_includedir}, etc) evaluation.
+ //
+ string pd (priv ? pn.string () + '/' : "");
+
+ // The main package contains everything that doesn't go to another
+ // packages.
+ //
+ if (gen_main)
+ {
+ if (ies.contains_sub (bindir)) main += "%{_bindir}/*\n";
+ if (ies.contains_sub (sbindir)) main += "%{_sbindir}/*\n";
+
+ if (ies.contains_sub (libexecdir))
+ main += "%{_libexecdir}/" + (priv ? pd : "*") + '\n';
+
+ // This could potentially go to -common but it could also be target-
+ // specific, who knows. So let's keep it in main for now.
+ //
+ // Let's also specify that the confdir/ sub-entries are
+ // non-replacable configuration files. This, in particular, means
+ // that if edited they will not be replaced/removed on the package
+ // upgrade or uninstallation (see RPM Packaging Guide for more
+ // details on the %config(noreplace) directive). Also note that the
+ // binary package configuration files can later be queried by the
+ // user via the `rpm --query --configfiles` command.
+ //
+ if (ies.contains_sub (confdir))
+ main += "%config(noreplace) %{_sysconfdir}/*\n";
+ }
+
+ if (ies.contains_sub (incdir))
+ (!st.devel.empty () ? devel : main) +=
+ "%{_includedir}/" + (priv ? pd : "*") + '\n';
+
+ if (st.devel.empty () && st.static_.empty ())
+ {
+ assert (gen_main); // Shouldn't be here otherwise.
+
+ if (ies.contains_sub (libdir))
+ main += "%{_libdir}/" + (priv ? pd : "*") + '\n';
+ }
+ else
+ {
+ // Ok, time for things to get hairy: we need to split the contents
+ // of lib/ into the main, -devel, and/or -static sub-packages. The
+ // -devel sub-package, if present, should contain three things:
+ //
+ // 1. Static libraries (.a), if no -static sub-package is present.
+ // 2. Non-versioned shared library symlinks (.so).
+ // 3. Contents of the pkgconfig/ subdirectory, except for *.static.pc
+ // files if -static sub-package is present.
+ //
+ // The -static sub-package, if present, should contain two things:
+ //
+ // 1. Static libraries (.a).
+ // 2. *.static.pc files in pkgconfig/ subdirectory.
+ //
+ // Everything else should go into the main package.
+ //
+ // The shared libraries are tricky. Here we can have three plausible
+ // arrangements:
+ //
+ // A. Portably-versioned library:
+ //
+ // libfoo-1.2.so
+ // libfoo.so -> libfoo-1.2.so
+ //
+ // B. Natively-versioned library:
+ //
+ // libfoo.so.1.2.3
+ // libfoo.so.1.2 -> libfoo.so.1.2.3
+ // libfoo.so.1 -> libfoo.so.1.2
+ // libfoo.so -> libfoo.so.1
+ //
+ // C. Non-versioned library:
+ //
+ // libfoo.so
+ //
+ // Note that in the (C) case the library should go into the main
+ // package. Based on this, the criteria appears to be
+ // straightforward: the extension is .so and it's a symlink. For
+ // good measure we also check that there is the `lib` prefix
+ // (plugins, etc).
+ //
+ // Also note that if <private>/ is specified, then to establish
+ // ownership of the libdir/<private>/ directory we also need to add
+ // it non-recursively to one of the potentially 3 sub-packages,
+ // which all can contain some of its sub-entries. Not doing this
+ // will result in an empty libdir/<private>/ subdirectory after the
+ // binary package uninstallation. Naturally, the owner should be the
+ // right-most sub-package on the following diagram which contains
+ // any of the libdir/<private>/ sub-entries:
+ //
+ // -static -> -devel -> main
+ //
+ // The same reasoning applies to libdir/<private>/pkgconfig/.
+ //
+ string* owners[] = {&static_, &devel, &main};
+
+ // Indexes (in owners) of sub-packages which should own
+ // libdir/<private>/ and libdir/<private>/pkgconfig/. If nullopt,
+ // then no additional directory ownership entry needs to be added
+ // (installation is not private, recursive directory entry is
+ // already added, etc).
+ //
+ optional<size_t> private_owner;
+ optional<size_t> pkgconfig_owner;
+
+ for (auto p (ies.find_sub (libdir)); p.first != p.second; )
+ {
+ const path& f (p.first->first);
+ const installed_entry& ie ((p.first++)->second);
+
+ path l (f.leaf (libdir));
+ const string& n (l.string ());
+ string* fs (&main); // Go to the main package as a last resort.
+
+ auto update_ownership = [&owners, &fs] (optional<size_t>& pi)
+ {
+ size_t i (0);
+ for (; owners[i] != fs; ++i) ;
+
+ if (!pi || *pi < i)
+ pi = i;
+ };
+
+ if (l.simple ())
+ {
+ if (n.size () > 3 && n.compare (0, 3, "lib") == 0)
+ {
+ string e (l.extension ());
+
+ if (e == "a")
+ {
+ fs = !st.static_.empty () ? &static_ : &devel;
+ }
+ else if (e == "so" && ie.target != nullptr &&
+ !st.devel.empty ())
+ {
+ fs = &devel;
+ }
+ }
+
+ *fs += "%{_libdir}/" + pd + n + '\n';
+ }
+ else
+ {
+ // Let's keep things tidy and, when possible, use a
+ // sub-directory rather than listing all its sub-entries
+ // verbatim.
+ //
+ dir_path sd (*l.begin ());
+ dir_path d (libdir / sd);
+
+ if (d == pkgdir)
+ {
+ // If the -static sub-package is not being generated, then the
+ // whole directory goes into the -devel sub-package.
+ // Otherwise, *.static.pc files go into the -static
+ // sub-package and the rest into the -devel sub-package,
+ // unless it is not being generated in which case it goes into
+ // the main package.
+ //
+ if (!st.static_.empty ())
+ {
+ if (n.size () > 10 &&
+ n.compare (n.size () - 10, 10, ".static.pc") == 0)
+ fs = &static_;
+ else if (!st.devel.empty ())
+ fs = &devel;
+
+ *fs += "%{_libdir}/" + pd + n;
+
+ // Update the index of a sub-package which should own
+ // libdir/<private>/pkgconfig/.
+ //
+ if (priv)
+ update_ownership (pkgconfig_owner);
+ }
+ else
+ {
+ fs = &devel;
+ *fs += "%{_libdir}/" + pd + sd.string () + (priv ? "/" : "/*");
+ }
+ }
+ else
+ *fs += "%{_libdir}/" + pd + sd.string () + '/';
+
+ // In the case of the directory (has the trailing slash) or
+ // wildcard (has the trailing asterisk) skip all the other
+ // entries in this subdirectory (in the prefix map they will all
+ // be in a contiguous range).
+ //
+ char c (fs->back ());
+
+ if (c == '/' || c == '*')
+ {
+ while (p.first != p.second && p.first->first.sub (d))
+ ++p.first;
+ }
+
+ *fs += '\n';
+ }
+
+ // We can only add files to the main package if we generate it.
+ //
+ assert (fs != &main || gen_main);
+
+ // Update the index of a sub-package which should own
+ // libdir/<private>/.
+ //
+ if (priv)
+ update_ownership (private_owner);
+ }
+
+ // Add the directory ownership entries.
+ //
+ if (private_owner)
+ *owners[*private_owner] += "%dir %{_libdir}/" + pd + '\n';
+
+ if (pkgconfig_owner)
+ *owners[*pkgconfig_owner] +=
+ "%dir %{_libdir}/" + pd + "pkgconfig/" + '\n';
+ }
+
+ // We cannot just do usr/share/* since it will clash with doc/, man/,
+ // and licenses/ below. So we have to list all the top-level entries
+ // in usr/share/ that are not doc/, man/, licenses/, or build2/.
+ //
+ if (gen_main)
+ {
+ // Note that if <private>/ is specified, then we also need to
+ // establish ownership of the sharedir/<private>/ directory (similar
+ // to what we do for libdir/<private>/ above).
+ //
+ string* private_owner (nullptr);
+
+ string& fs (!st.common.empty () ? common : main);
+
+ for (auto p (ies.find_sub (sharedir)); p.first != p.second; )
+ {
+ const path& f ((p.first++)->first);
+
+ if (f.sub (docdir) ||
+ f.sub (mandir) ||
+ f.sub (licensedir) ||
+ f.sub (build2dir))
+ continue;
+
+ path l (f.leaf (sharedir));
+
+ if (l.simple ())
+ {
+ fs += "%{_datadir}/" + pd + l.string () + '\n';
+ }
+ else
+ {
+ // Let's keep things tidy and use a sub-directory rather than
+ // listing all its sub-entries verbatim.
+ //
+ dir_path sd (*l.begin ());
+
+ fs += "%{_datadir}/" + pd + sd.string () + '/' + '\n';
+
+ // Skip all the other entries in this subdirectory (in the prefix
+ // map they will all be in a contiguous range).
+ //
+ dir_path d (sharedir / sd);
+ while (p.first != p.second && p.first->first.sub (d))
+ ++p.first;
+ }
+
+ // Indicate that we need to establish ownership of
+ // sharedir/<private>/.
+ //
+ if (priv)
+ private_owner = &fs;
+ }
+
+ // Add the directory ownership entry.
+ //
+ if (private_owner != nullptr)
+ *private_owner += "%dir %{_datadir}/" + pd + '\n';
+ }
+
+ // Note that we only consider the bfdir/<project>/* sub-entries,
+ // adding the bfdir/<project>/ subdirectories to the %files
+ // section. This way no additional directory ownership entry needs to
+ // be added. Any immediate sub-entries of bfdir/, if present, will be
+ // ignored, which will end up with the 'unpackaged files' rpmbuild
+ // error.
+ //
+ // Also note that the bfdir/ directory is not owned by any package.
+ //
+ if (gen_main)
+ {
+ for (auto p (ies.find_sub (bfdir)); p.first != p.second; )
+ {
+ const path& f ((p.first++)->first);
+
+ path l (f.leaf (bfdir));
+
+ if (!l.simple ())
+ {
+ // Let's keep things tidy and use a sub-directory rather than
+ // listing all its sub-entries verbatim.
+ //
+ dir_path sd (*l.begin ());
+
+ main += "%{_datadir}/build2/export/" + sd.string () + '/' + '\n';
+
+ // Skip all the other entries in this subdirectory (in the
+ // prefix map they will all be in a contiguous range).
+ //
+ dir_path d (bfdir / sd);
+ while (p.first != p.second && p.first->first.sub (d))
+ ++p.first;
+ }
+ }
+ }
+
+ // Should we put the documentation into -common if there is no -doc?
+ // While there doesn't seem to be anything explicit in the policy,
+ // there are packages that do it this way (e.g., mariadb-common). And
+ // the same logic seems to apply to -devel (e.g., zlib-devel).
+ //
+ {
+ string& fs (!st.doc.empty () ? doc :
+ !st.common.empty () ? common :
+ !st.devel.empty () ? devel :
+ main);
+
+ // We can only add doc files to the main or -common packages if we
+ // generate the main package.
+ //
+ assert ((&fs != &main && &fs != &common) || gen_main);
+
+ // Let's specify that the docdir/ sub-entries are documentation
+ // files. Note that the binary package documentation files can later
+ // be queried by the user via the `rpm --query --docfiles` command.
+ //
+ if (ies.contains_sub (docdir))
+ fs += "%doc %{_docdir}/" + (priv ? pd : "*") + '\n';
+
+ // Since the man file may not appear directly in the man/
+ // subdirectory we use the man/*/* wildcard rather than man/* not to
+ // declare ownership for standard directories.
+ //
+ // As a side note, rpmbuild compresses the man files in the
+ // installation directory, which needs to be taken into account if
+ // writing more specific wildcards (e.g., %{_mandir}/man1/foo.1*).
+ //
+ if (ies.contains_sub (mandir))
+ fs += "%{_mandir}/*/*\n";
+ }
+
+ // Let's specify that the licensedir/ sub-entries are license files.
+ // Note that the binary package license files can later be queried by
+ // the user via the `rpm --query --licensefiles` command.
+ //
+ if (ies.contains_sub (licensedir))
+ (gen_main ? main : devel) +=
+ "%license %{_licensedir}/" + (priv ? pd : "*") + '\n';
+
+ // Finally, write the %files sections.
+ //
+ if (!main.empty ())
+ {
+ assert (gen_main); // Shouldn't be here otherwise.
+
+ os << '\n'
+ << "# " << st.main << " files." << '\n'
+ << "#" << '\n'
+ << "%files" << '\n'
+ << main;
+ }
+
+ if (!devel.empty ())
+ {
+ os << '\n'
+ << "# " << st.devel << " files." << '\n'
+ << "#" << '\n'
+ << "%files -n " << st.devel << '\n'
+ << devel;
+ }
+
+ if (!static_.empty ())
+ {
+ os << '\n'
+ << "# " << st.static_ << " files." << '\n'
+ << "#" << '\n'
+ << "%files -n " << st.static_ << '\n'
+ << static_;
+ }
+
+ if (!doc.empty ())
+ {
+ os << '\n'
+ << "# " << st.doc << " files." << '\n'
+ << "#" << '\n'
+ << "%files -n " << st.doc << '\n'
+ << doc;
+ }
+
+ if (!common.empty ())
+ {
+ os << '\n'
+ << "# " << st.common << " files." << '\n'
+ << "#" << '\n'
+ << "%files -n " << st.common << '\n'
+ << common;
+ }
+ }
+
+ // Changelog section.
+ //
+ // The section entry has the following format:
+ //
+ // * <day-of-week> <month> <day> <year> <name> <surname> <email> - <version>-<release>
+ // - <change1-description>
+ // - <change2-description>
+ // ...
+ //
+ // For example:
+ //
+ // * Wed Feb 22 2023 John Doe <john@example.com> - 2.3.4-1
+ // - New bpkg package release 2.3.4.
+ //
+ // We will use the Packager value for the `<name> <surname> <email>`
+ // fields. Strictly speaking it may not exactly match the fields set but
+ // it doesn't seem to break anything if that's the case. For good
+ // measure, me will also use the English locale for the date.
+ //
+ // Note that the <release> field doesn't contain the distribution tag.
+ //
+ {
+ os << '\n'
+ << "%changelog" << '\n'
+ << "* ";
+
+ // Given that we don't include the timezone there is no much sense to
+ // print the current time as local.
+ //
+ std::locale l (os.imbue (std::locale ("C")));
+ to_stream (os,
+ system_clock::now (),
+ "%a %b %d %Y",
+ false /* special */,
+ false /* local */);
+ os.imbue (l);
+
+ os << ' ' << packager << " - ";
+
+ if (!sys_version.epoch.empty ())
+ os << sys_version.epoch << ':';
+
+ os << sys_version.version << '-' << sys_version.release << '\n'
+ << "- New bpkg package release " << pvs << '.' << '\n';
+ }
+
+ os.close ();
+ }
+ catch (const io_error& e)
+ {
+ fail << "unable to write to " << spec << ": " << e;
+ }
+
+ // Run rpmbuild.
+ //
+ // Note that rpmbuild causes recompilation periodically by setting the
+ // SOURCE_DATE_EPOCH environment variable (which we track for changes
+ // since it affects GCC). Its value depends on the timestamp of the latest
+ // change log entry and thus has a day resolution. Note that since we
+ // don't have this SOURCE_DATE_EPOCH during dry-run caused by
+ // installed_entries(), there would be a recompilation even if the value
+ // weren't changing.
+ //
+ cstrings args {"rpmbuild", "-bb"}; // Only build binary packages.
+
+ // Map our verbosity to rpmbuild --quiet and -vv options (-v is the
+ // default). Note that there doesn't seem to be any way to control its
+ // progress.
+ //
+ // Also note that even in the quiet mode rpmbuild may still print some
+ // progress lines.
+ //
+ if (verb == 0)
+ args.push_back ("--quiet");
+ else if (verb >= 4) // Note that -vv feels too verbose for level 3.
+ args.push_back ("-vv");
+
+ // If requested, keep the installation directory, etc.
+ //
+ if (ops_->keep_output ())
+ args.push_back ("--noclean");
+
+ // Pass our --jobs value, if any.
+ //
+ string jobs_arg;
+ if (size_t n = ops_->jobs_specified () ? ops_->jobs () : 0)
+ {
+ jobs_arg = "--define=_smp_build_ncpus " + to_string (n);
+ args.push_back (jobs_arg.c_str ());
+ }
+
+ // Pass the rpmbuild/rpm common options.
+ //
+ for (const string& o: common_opts)
+ args.push_back (o.c_str ());
+
+ // Pass any additional options specified by the user.
+ //
+ for (const string& o: ops_->fedora_build_option ())
+ args.push_back (o.c_str ());
+
+ args.push_back (spec.string ().c_str ());
+ args.push_back (nullptr);
+
+ if (ops_->fedora_prepare_only ())
+ {
+ if (verb >= 1)
+ {
+ diag_record dr (text);
+
+ dr << "prepared " << spec <<
+ text << "command line: ";
+
+ print_process (dr, args);
+ }
+
+ return binary_files {};
+ }
+
+ try
+ {
+ process_path pp (process::path_search (args[0]));
+ process_env pe (pp);
+
+ // There is going to be quite a bit of diagnostics so print the command
+ // line unless quiet.
+ //
+ if (verb >= 1)
+ print_process (pe, args);
+
+ // Redirect stdout to stderr since some of rpmbuild diagnostics goes
+ // there. For good measure also redirect stdin to /dev/null to make sure
+ // there are no prompts of any kind.
+ //
+ process pr (pp, args, -2 /* stdin */, 2 /* stdout */, 2 /* stderr */);
+
+ if (!pr.wait ())
+ {
+ // Let's repeat the command line even if it was printed at the
+ // beginning to save the user a rummage through the logs.
+ //
+ diag_record dr (fail);
+ dr << args[0] << " exited with non-zero code" <<
+ info << "command line: "; print_process (dr, pe, args);
+ }
+ }
+ catch (const process_error& e)
+ {
+ error << "unable to execute " << args[0] << ": " << e;
+
+ if (e.child)
+ exit (1);
+
+ throw failed ();
+ }
+
+ // While it's tempting to always keep the spec file let's remove it,
+ // unless requested not to, since it contains absolute paths to
+ // configuration.
+ //
+ if (!ops_->keep_output ())
+ rm (spec);
+
+ // Collect and return the binary sub-package paths.
+ //
+ // Here we will use `rpm --eval` to resolve the RPM sub-package paths.
+ //
+ binary_files r;
+ r.system_version = st.system_version;
+ {
+ string expressions;
+
+ auto add_macro = [&expressions] (const string& name, const string& value)
+ {
+ expressions += "%global " + name + ' ' + value + '\n';
+ };
+
+ add_macro ("VERSION", sys_version.version);
+ add_macro ("RELEASE", sys_version.release + "%{?dist}");
+
+ const string& package_arch (!build_arch.empty () ? build_arch : arch);
+
+ vector<binary_file> files;
+
+ auto add_package = [&files, &expressions, &rpmfile, &add_macro]
+ (const string& name,
+ const string& arch,
+ const char* type) -> size_t
+ {
+ add_macro ("NAME", name);
+ add_macro ("ARCH", arch);
+ expressions += rpmfile + '\n';
+
+ // Note: path is unknown yet.
+ //
+ files.push_back (binary_file {type, path (), name});
+ return files.size () - 1;
+ };
+
+ if (gen_main)
+ add_package (st.main, package_arch, "main.rpm");
+
+ if (!st.devel.empty ())
+ add_package (st.devel, package_arch, "devel.rpm");
+
+ if (!st.static_.empty ())
+ add_package (st.static_, package_arch, "static.rpm");
+
+ if (!st.doc.empty ())
+ add_package (st.doc, "noarch", "doc.rpm");
+
+ if (!st.common.empty ())
+ add_package (st.common, "noarch", "common.rpm");
+
+ optional<size_t> di (
+ !binless
+ ? add_package (st.main + "-debuginfo", arch, "debuginfo.rpm")
+ : optional<size_t> ());
+
+ // Strip the trailing newline since rpm adds one.
+ //
+ expressions.pop_back ();
+
+ strings expansions (eval (cstrings ({expressions.c_str ()})));
+
+ if (expansions.size () != files.size ())
+ fail << "number of RPM file path expansions differs from number "
+ << "of path expressions";
+
+ for (size_t i (0); i != files.size(); ++i)
+ {
+ try
+ {
+ path p (move (expansions[i]));
+
+ if (p.empty ())
+ throw invalid_path ("");
+
+ // Note that the -debuginfo sub-package may potentially not be
+ // generated (no installed binaries to extract the debug info from,
+ // etc).
+ //
+ if (exists (p))
+ {
+ binary_file& f (files[i]);
+
+ r.push_back (
+ binary_file {move (f.type), move (p), move (f.system_name)});
+ }
+ else if (!di || i != *di) // Not a -debuginfo sub-package?
+ fail << "expected output file " << p << " does not exist";
+ }
+ catch (const invalid_path& e)
+ {
+ fail << "invalid path '" << e.path << "' in RPM file path expansions";
+ }
+ }
+ }
+
+ return r;
+ }
+}
diff --git a/bpkg/system-package-manager-fedora.hxx b/bpkg/system-package-manager-fedora.hxx
new file mode 100644
index 0000000..3e68b98
--- /dev/null
+++ b/bpkg/system-package-manager-fedora.hxx
@@ -0,0 +1,372 @@
+// file : bpkg/system-package-manager-fedora.hxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#ifndef BPKG_SYSTEM_PACKAGE_MANAGER_FEDORA_HXX
+#define BPKG_SYSTEM_PACKAGE_MANAGER_FEDORA_HXX
+
+#include <map>
+
+#include <bpkg/types.hxx>
+#include <bpkg/utility.hxx>
+
+#include <bpkg/system-package-manager.hxx>
+
+namespace bpkg
+{
+ // The system package manager implementation for Fedora and alike (Red Hat
+ // Enterprise Linux, CentOS, etc) using the DNF frontend.
+ //
+ // NOTE: THE BELOW DESCRIPTION IS ALSO REPRODUCED IN THE BPKG MANUAL.
+ //
+ // For background, a library in Fedora is normally split up into several
+ // packages: the shared library package (e.g., libfoo), the development
+ // files package (e.g., libfoo-devel), the static library package (e.g.,
+ // libfoo-static; may also be placed into the -devel package), the
+ // documentation files package (e.g., libfoo-doc), the debug symbols and
+ // source files packages (e.g., libfoo-debuginfo and libfoo-debugsource),
+ // and the common or architecture-independent files (e.g., libfoo-common).
+ // All the packages except -devel are optional and there is quite a bit of
+ // variability here. In particular, the lib prefix in libfoo is not a
+ // requirement (unlike in Debian) and is normally present only if upstream
+ // name has it (see some examples below).
+ //
+ // For mixed packages which include both applications and libraries, the
+ // shared library package normally has the -libs suffix (e.g., foo-libs).
+ // Such packages may have separate -debuginfo packages for applications and
+ // libraries (e.g. openssl-debuginfo and openssl-libs-debuginfo).
+ //
+ // A package name may also include an upstream version based suffix if
+ // multiple versions of the package can be installed simultaneously (e.g.,
+ // libfoo1.1 libfoo1.1-devel, libfoo2 libfoo2-devel).
+ //
+ // Terminology-wise, the term "base package" (sometime also "main package")
+ // normally refers to either the application or shared library package (as
+ // decided by the package maintainer in the spec file) with the suffixed
+ // packages (-devel, -doc, etc) called "subpackages".
+ //
+ // Here are a few examples:
+ //
+ // libpq libpq-devel
+ //
+ // zlib zlib-devel zlib-static
+ //
+ // catch-devel
+ //
+ // eigen3-devel eigen3-doc
+ //
+ // xerces-c xerces-c-devel xerces-c-doc
+ //
+ // libsigc++20 libsigc++20-devel libsigc++20-doc
+ // libsigc++30 libsigc++30-devel libsigc++30-doc
+ //
+ // icu libicu libicu-devel libicu-doc
+ //
+ // openssl openssl-libs openssl-devel openssl-static
+ // openssl1.1 openssl1.1-devel
+ //
+ // curl libcurl libcurl-devel
+ //
+ // sqlite sqlite-libs sqlite-devel sqlite-doc
+ //
+ // community-mysql community-mysql-libs community-mysql-devel
+ // community-mysql-common community-mysql-server
+ //
+ // ncurses ncurses-libs ncurses-c++-libs ncurses-devel ncurses-static
+ //
+ // keyutils keyutils-libs keyutils-libs-devel
+ //
+ // Note that while we support arbitrary -debug* sub-package names for
+ // consumption, we only generate <main-package>-debug*.
+ //
+ // Based on that, it seems our best bet when trying to automatically map our
+ // library package name to Fedora package names is to go for the -devel
+ // package first and figure out the shared library package from that based
+ // on the fact that the -devel package should have the == dependency on the
+ // shared library package with the same version and its name should normally
+ // start with the -devel package's stem and be potentially followed with the
+ // -libs suffix. Failed to find the -devel package, we may re-try but now
+ // using the project name instead of the package name (see, for example,
+ // openssl, sqlite).
+ //
+ // For application packages there is normally no -devel packages but
+ // -debug*, -doc, and -common are plausible.
+ //
+ // The format of the fedora-name (or alike) manifest value is a comma-
+ // separated list of one or more package groups:
+ //
+ // <package-group> [, <package-group>...]
+ //
+ // Where each <package-group> is the space-separated list of one or more
+ // package names:
+ //
+ // <package-name> [ <package-name>...]
+ //
+ // All the packages in the group should belong to the same "logical
+ // package", such as -devel, -doc, -common packages. They normally have the
+ // same version.
+ //
+ // The first group is called the main group and the first package in the
+ // group is called the main package. Note that all the groups are consumed
+ // (installed) but only the main group is produced (packaged).
+ //
+ // (Note that above we use the term "logical package" instead of "base
+ // package" since the main package may not be the base package, for example
+ // being the -libs subpackage.)
+ //
+ // We allow/recommend specifying the -devel package instead of the main
+ // package for libraries (the bpkg package name starts with lib), seeing
+ // that we are capable of detecting the main package automatically. If the
+ // library name happens to end with -devel (which poses an ambiguity), then
+ // the -devel package should be specified explicitly as the second package
+ // to disambiguate this situation (if a non-library name happened to start
+ // with lib and end with -devel, well, you are out of luck, I guess).
+ //
+ // Note also that for now we treat all the packages from the non-main groups
+ // as extras but in the future we may decide to sort them out like the main
+ // group (see parse_name_value() for details).
+ //
+ // The Fedora package version has the [<epoch>:]<version>-<release> form
+ // where the parts correspond to the Epoch (optional upstream versioning
+ // scheme), Version (upstream version), and Release (Fedora's package
+ // revision) RPM tags (see the Fedora Package Versioning Guidelines and RPM
+ // tags documentation for details). If no explicit mapping to the bpkg
+ // version is specified with the fedora-to-downstream-version (or alike)
+ // manifest values or none match, then we fallback to using the <version>
+ // part as the bpkg version. If explicit mapping is specified, then we match
+ // it against the [<epoch>:]<version> parts ignoring <release>.
+ //
+ struct system_package_status_fedora: system_package_status
+ {
+ string main;
+ string devel;
+ string static_;
+ string doc;
+ string debuginfo;
+ string debugsource;
+ string common;
+ strings extras;
+
+ string fallback; // Fallback for main/devel package based on project name.
+
+ // The `dnf list` output.
+ //
+ struct package_info
+ {
+ string name;
+ string installed_version; // Empty if none.
+ string candidate_version; // Empty if none and no installed_version.
+
+ // The architecture of the installed/candidate package version. Can only
+ // be the host architecture or noarch (so it could have been bool but
+ // it's more convenient to have the actual name).
+ //
+ // Note that in Fedora the same package version can be available for
+ // multiple architectures or be architecture-independent. For example:
+ //
+ // dbus-libs-1:1.12.22-1.fc35.i686
+ // dbus-libs-1:1.12.22-1.fc35.x86_64
+ // dbus-common-1:1.12.22-1.fc35.noarch
+ // code-insiders-1.75.0-1675123170.el7.armv7hl
+ // code-insiders-1.75.0-1675123170.el7.aarch64
+ // code-insiders-1.75.0-1675123170.el7.x86_64
+ //
+ // Thus, for a package query we normally need to qualify the package
+ // with the architecture suffix or filter the query result, normally
+ // skipping packages for architectures other than the host architecture.
+ //
+ string installed_arch;
+ string candidate_arch;
+
+ explicit
+ package_info (string n): name (move (n)) {}
+ };
+
+ vector<package_info> package_infos;
+ size_t package_infos_main = 0; // Size of the main group.
+
+ explicit
+ system_package_status_fedora (string m, string d = {}, string f = {})
+ : main (move (m)), devel (move (d)), fallback (move (f))
+ {
+ assert (!main.empty () || !devel.empty ());
+ }
+
+ system_package_status_fedora () = default;
+ };
+
+ class system_package_manager_fedora: public system_package_manager
+ {
+ public:
+ virtual optional<const system_package_status*>
+ status (const package_name&, const available_packages*) override;
+
+ virtual void
+ install (const vector<package_name>&) override;
+
+ virtual binary_files
+ generate (const packages&,
+ const packages&,
+ const strings&,
+ const dir_path&,
+ const package_manifest&,
+ const string&,
+ const small_vector<language, 1>&,
+ optional<bool>,
+ bool) override;
+
+ public:
+ // Expect os_release::name_id to be "fedora" or os_release::like_ids to
+ // contain "fedora".
+ //
+ system_package_manager_fedora (bpkg::os_release&& osr,
+ const target_triplet& h,
+ string a,
+ optional<bool> progress,
+ optional<size_t> fetch_timeout,
+ bool install,
+ bool fetch,
+ bool yes,
+ string sudo)
+ : system_package_manager (move (osr),
+ h,
+ a.empty () ? arch_from_target (h) : move (a),
+ progress,
+ fetch_timeout,
+ install,
+ fetch,
+ yes,
+ move (sudo)) {}
+
+ // Note: options can only be NULL when testing functions that don't need
+ // them.
+ //
+ system_package_manager_fedora (bpkg::os_release&& osr,
+ const target_triplet& h,
+ string a,
+ optional<bool> progress,
+ const pkg_bindist_options* ops)
+ : system_package_manager (move (osr),
+ h,
+ a.empty () ? arch_from_target (h) : move (a),
+ progress),
+ ops_ (ops) {}
+
+ // Implementation details exposed for testing (see definitions for
+ // documentation).
+ //
+ public:
+ using package_status = system_package_status_fedora;
+ using package_info = package_status::package_info;
+
+ void
+ dnf_list (vector<package_info>&, size_t = 0);
+
+ vector<pair<string, string>>
+ dnf_repoquery_requires (const string&, const string&, const string&, bool);
+
+ void
+ dnf_makecache ();
+
+ void
+ dnf_install (const strings&);
+
+ void
+ dnf_mark_install (const strings&);
+
+ pair<cstrings, const process_path&>
+ dnf_common (const char*,
+ optional<size_t> fetch_timeout,
+ strings& args_storage);
+
+ static package_status
+ parse_name_value (const string&, const string&, bool, bool, bool);
+
+ static string
+ main_from_devel (const string&,
+ const string&,
+ const vector<pair<string, string>>&);
+
+ static string
+ arch_from_target (const target_triplet&);
+
+ package_status
+ map_package (const package_name&,
+ const version&,
+ const available_packages&) const;
+
+ static strings
+ rpm_eval (const cstrings& opts, const cstrings& expressions);
+
+ // If simulate is not NULL, then instead of executing the actual dnf
+ // commands simulate their execution: (1) for `dnf list` and `dnf
+ // repoquery --requires` by printing their command lines and reading the
+ // results from files specified in the below dnf_* maps and (2) for `dnf
+ // makecache`, `dnf install`, and `dnf mark install` by printing their
+ // command lines and failing if requested.
+ //
+ // In the (1) case if the corresponding map entry does not exist or the
+ // path is empty, then act as if the specified package/version is
+ // unknown. If the path is special "-" then read from stdin. For `dnf
+ // list` and `dnf repoquery --requires` different post-fetch and (for the
+ // former) post-install results can be specified (if the result is not
+ // found in one of the later maps, the previous map is used as a
+ // fallback). Note that the keys in the dnf_list_* maps are the package
+ // sets and the corresponding result file is expected to contain (or not)
+ // the results for all of them. See dnf_list() and
+ // dnf_repoquery_requires() implementations for details on the expected
+ // results.
+ //
+ struct simulation
+ {
+ std::map<strings, path> dnf_list_;
+ std::map<strings, path> dnf_list_fetched_;
+ std::map<strings, path> dnf_list_installed_;
+
+ struct package
+ {
+ string name;
+ string version;
+ string arch;
+ bool installed;
+
+ bool
+ operator< (const package& p) const
+ {
+ if (int r = name.compare (p.name))
+ return r < 0;
+
+ if (int r = version.compare (p.version))
+ return r < 0;
+
+ if (int r = arch.compare (p.arch))
+ return r < 0;
+
+ return installed < p.installed;
+ }
+ };
+
+ std::map<package, path> dnf_repoquery_requires_;
+ std::map<package, path> dnf_repoquery_requires_fetched_;
+
+ bool dnf_makecache_fail_ = false;
+ bool dnf_install_fail_ = false;
+ bool dnf_mark_install_fail_ = false;
+ };
+
+ const simulation* simulate_ = nullptr;
+
+ private:
+ optional<system_package_status_fedora>
+ status (const package_name&, const available_packages&);
+
+ private:
+ bool fetched_ = false; // True if already fetched metadata.
+ bool installed_ = false; // True if already installed.
+
+ std::map<package_name, optional<system_package_status_fedora>> status_cache_;
+
+ const pkg_bindist_options* ops_ = nullptr; // Only for production.
+ };
+}
+
+#endif // BPKG_SYSTEM_PACKAGE_MANAGER_FEDORA_HXX
diff --git a/bpkg/system-package-manager-fedora.test.cxx b/bpkg/system-package-manager-fedora.test.cxx
new file mode 100644
index 0000000..4e59da1
--- /dev/null
+++ b/bpkg/system-package-manager-fedora.test.cxx
@@ -0,0 +1,431 @@
+// file : bpkg/system-package-manager-fedora.test.cxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#include <bpkg/system-package-manager-fedora.hxx>
+
+#include <map>
+#include <iostream>
+
+#include <bpkg/types.hxx>
+#include <bpkg/utility.hxx>
+
+#undef NDEBUG
+#include <cassert>
+
+#include <bpkg/system-package-manager.test.hxx>
+
+using namespace std;
+
+namespace bpkg
+{
+ using package_status = system_package_status_fedora;
+ using package_info_ = package_status::package_info;
+ using package = system_package_manager_fedora::simulation::package;
+
+ using butl::manifest_parser;
+ using butl::manifest_parsing;
+
+ // Usage: args[0] <command> ...
+ //
+ // Where <command> is one of:
+ //
+ // dnf-list <pkg>... result comes from stdin
+ //
+ // dnf-repoquery-requires <pkg> <ver> <arc> result comes from stdin
+ //
+ // parse-name-value <pkg> fedora-name value from stdin
+ //
+ // main-from-devel <dev-pkg> <dev-ver> depends comes from stdin in
+ // the `<dep-pkg> <dep-ver>`
+ // per line form
+ //
+ // map-package manifest comes from stdin
+ //
+ // build <query-pkg>... [--install [--no-fetch] <install-pkg>...]
+ //
+ // The stdin of the build command is used to read the simulation description
+ // which consists of lines in the following forms (blanks are ignored):
+ //
+ // manifest: <query-pkg> <file>
+ //
+ // Available package manifest for one of <query-pkg>. If none is
+ // specified, then a stub is automatically added.
+ //
+ // dnf-list[-{fetched,installed}]: <sys-pkg>... <file>
+ //
+ // Values for simulation::dnf_list_*. If <file> is the special `!` value,
+ // then make the entry empty.
+ //
+ // dnf-repoquery-requires[-fetched]: <sys-pkg> <sys-ver> <sys-arch> <file>
+ //
+ // Values for simulation::dnf_repoquery_requires_*. If <file> is the
+ // special `!` value, then make the entry empty.
+ //
+ // dnf_makecache-fail: true
+ // dnf-install-fail: true
+ // dnf-mark-install-fail: true
+ //
+ // Values for simulation::dnf_{makecache,install,mark_install}_fail_.
+ //
+ // While creating the system package manager always pretend to be the x86_64
+ // Fedora host (x86_64-redhat-linux-gnu), regardless of the actual host
+ // platform.
+ //
+ int
+ main (int argc, char* argv[])
+ try
+ {
+ assert (argc >= 2); // <command>
+
+ target_triplet host_triplet ("x86_64-redhat-linux-gnu");
+
+ string cmd (argv[1]);
+
+ // @@ TODO: add option to customize? Maybe option before command?
+ //
+ os_release osr {"fedora", {}, "35", "", "Fedora Linux", "", ""};
+
+ auto to_bool = [] (const string& s)
+ {
+ assert (s == "true" || s == "false");
+ return s == "true";
+ };
+
+ if (cmd == "dnf-list")
+ {
+ assert (argc >= 3); // <pkg>...
+
+ strings key;
+ vector<package_info_> pis;
+ for (int i (2); i != argc; ++i)
+ {
+ key.push_back (argv[i]);
+ pis.push_back (package_info_ (argv[i]));
+ }
+
+ system_package_manager_fedora::simulation s;
+ s.dnf_list_.emplace (move (key), path ("-"));
+
+ system_package_manager_fedora m (move (osr),
+ host_triplet,
+ "" /* arch */,
+ nullopt /* progress */,
+ nullopt /* fetch_timeout */,
+ false /* install */,
+ false /* fetch */,
+ false /* yes */,
+ "sudo");
+ m.simulate_ = &s;
+
+ m.dnf_list (pis);
+
+ for (const package_info_& pi: pis)
+ {
+ cout << pi.name << " '"
+ << pi.installed_version << "' '"
+ << pi.installed_arch << "' '"
+ << pi.candidate_version << "' '"
+ << pi.candidate_arch << "'\n";
+ }
+ }
+ else if (cmd == "dnf-repoquery-requires")
+ {
+ assert (argc == 6); // <pkg> <ver> <arch> <installed>
+
+ package key {argv[2], argv[3], argv[4], to_bool (argv[5])};
+
+ system_package_manager_fedora::simulation s;
+ s.dnf_repoquery_requires_.emplace (key, path ("-"));
+
+ system_package_manager_fedora m (move (osr),
+ host_triplet,
+ "" /* arch */,
+ nullopt /* progress */,
+ nullopt /* fetch_timeout */,
+ false /* install */,
+ false /* fetch */,
+ false /* yes */,
+ "sudo");
+ m.simulate_ = &s;
+
+ for (const pair<string, string>& d:
+ m.dnf_repoquery_requires (key.name,
+ key.version,
+ key.arch,
+ key.installed))
+ {
+ cout << d.first << ' ' << d.second << '\n';
+ }
+ }
+ else if (cmd == "parse-name-value")
+ {
+ assert (argc == 3); // <pkg>
+
+ package_name pn (argv[2]);
+ string pt (package_manifest::effective_type (nullopt, pn));
+
+ string v;
+ getline (cin, v);
+
+ package_status s (
+ system_package_manager_fedora::parse_name_value (
+ pt, v, false, false, false));
+
+ if (!s.main.empty ()) cout << "main: " << s.main << '\n';
+ if (!s.devel.empty ()) cout << "devel: " << s.devel << '\n';
+ if (!s.static_.empty ()) cout << "static: " << s.static_ << '\n';
+ if (!s.doc.empty ()) cout << "doc: " << s.doc << '\n';
+ if (!s.debuginfo.empty ()) cout << "debuginfo: " << s.debuginfo << '\n';
+ if (!s.debugsource.empty ()) cout << "debugsource: " << s.debugsource << '\n';
+ if (!s.common.empty ()) cout << "common: " << s.common << '\n';
+ if (!s.extras.empty ())
+ {
+ cout << "extras:";
+ for (const string& e: s.extras)
+ cout << ' ' << e;
+ cout << '\n';
+ }
+ }
+ else if (cmd == "main-from-devel")
+ {
+ assert (argc == 4); // <dev-pkg> <dev-ver>
+
+ string n (argv[2]);
+ string v (argv[3]);
+ vector<pair<string, string>> ds;
+
+ for (string l; !eof (getline (cin, l)); )
+ {
+ size_t p (l.find (' '));
+ assert (p != string::npos);
+
+ ds.emplace_back (string (l, 0, p), string (l, p + 1));
+ }
+
+ cout << system_package_manager_fedora::main_from_devel (n, v, ds) << '\n';
+ }
+ else if (cmd == "map-package")
+ {
+ assert (argc == 2);
+
+ available_packages aps;
+ aps.push_back (make_available_from_manifest ("", "-"));
+
+ const package_name& n (aps.front ().first->id.name);
+ const version& v (aps.front ().first->version);
+
+ system_package_manager_fedora m (move (osr),
+ host_triplet,
+ "" /* arch */,
+ nullopt /* progress */,
+ nullptr /* options */);
+
+ package_status s (m.map_package (n, v, aps));
+
+ cout << "version: " << s.system_version << '\n'
+ << "main: " << s.main << '\n';
+ if (!s.devel.empty ()) cout << "devel: " << s.devel << '\n';
+ if (!s.static_.empty ()) cout << "static: " << s.static_ << '\n';
+ if (!s.doc.empty ()) cout << "doc: " << s.doc << '\n';
+ if (!s.debuginfo.empty ()) cout << "debuginfo: " << s.debuginfo << '\n';
+ if (!s.debugsource.empty ()) cout << "debugsource: " << s.debugsource << '\n';
+ if (!s.common.empty ()) cout << "common: " << s.common << '\n';
+ }
+ else if (cmd == "build")
+ {
+ assert (argc >= 3); // <query-pkg>...
+
+ strings qps;
+ map<string, available_packages> aps;
+
+ // Parse <query-pkg>...
+ //
+ int argi (2);
+ for (; argi != argc; ++argi)
+ {
+ string a (argv[argi]);
+
+ if (a.compare (0, 2, "--") == 0)
+ break;
+
+ aps.emplace (a, available_packages {});
+ qps.push_back (move (a));
+ }
+
+ // Parse --install [--no-fetch]
+ //
+ bool install (false);
+ bool fetch (true);
+
+ for (; argi != argc; ++argi)
+ {
+ string a (argv[argi]);
+
+ if (a == "--install") install = true;
+ else if (a == "--no-fetch") fetch = false;
+ else break;
+ }
+
+ // Parse the description.
+ //
+ system_package_manager_fedora::simulation s;
+
+ for (string l; !eof (getline (cin, l)); )
+ {
+ if (l.empty ())
+ continue;
+
+ size_t p (l.find (':')); assert (p != string::npos);
+ string k (l, 0, p);
+
+ if (k == "manifest")
+ {
+ size_t q (l.rfind (' ')); assert (q != string::npos);
+ string n (l, p + 2, q - p - 2); trim (n);
+ string f (l, q + 1); trim (f);
+
+ auto i (aps.find (n));
+ if (i == aps.end ())
+ fail << "unknown package " << n << " in '" << l << "'";
+
+ i->second.push_back (make_available_from_manifest (n, f));
+ }
+ else if (
+ map<strings, path>* infos =
+ k == "dnf-list" ? &s.dnf_list_ :
+ k == "dnf-list-fetched" ? &s.dnf_list_fetched_ :
+ k == "dnf-list-installed" ? &s.dnf_list_installed_ :
+ nullptr)
+ {
+ size_t q (l.rfind (' ')); assert (q != string::npos);
+ string n (l, p + 2, q - p - 2); trim (n);
+ string f (l, q + 1); trim (f);
+
+ strings ns;
+ for (size_t b (0), e (0); next_word (n, b, e); )
+ ns.push_back (string (n, b, e - b));
+
+ if (f == "!")
+ f.clear ();
+
+ infos->emplace (move (ns), path (move (f)));
+ }
+ else if (map<package, path>* req =
+ k == "dnf-repoquery-requires" ? &s.dnf_repoquery_requires_ :
+ k == "dnf-repoquery-requires-fetched" ? &s.dnf_repoquery_requires_fetched_ :
+ nullptr)
+ {
+ size_t q (l.rfind (' ')); assert (q != string::npos);
+ string n (l, p + 2, q - p - 2); trim (n);
+ string f (l, q + 1); trim (f);
+
+ q = n.rfind (' '); assert (q != string::npos);
+ bool i (to_bool (string (n, q + 1)));
+ n.resize (q);
+
+ q = n.rfind (' '); assert (q != string::npos);
+ string a (n, q + 1);
+ n.resize (q);
+
+ q = n.find (' '); assert (q != string::npos);
+
+ package pkg {string (n, 0, q), string (n, q + 1), move (a), i};
+
+ if (f == "!")
+ f.clear ();
+
+ req->emplace (move (pkg), path (move (f)));
+ }
+ else if (k == "dnf-makecache-fail")
+ {
+ s.dnf_makecache_fail_ = true;
+ }
+ else if (k == "dnf-install-fail")
+ {
+ s.dnf_install_fail_ = true;
+ }
+ else if (k == "dnf-mark-install-fail")
+ {
+ s.dnf_mark_install_fail_ = true;
+ }
+ else
+ fail << "unknown keyword '" << k << "' in simulation description";
+ }
+
+ // Fallback to stubs and sort in the version descending order.
+ //
+ for (pair<const string, available_packages>& p: aps)
+ {
+ if (p.second.empty ())
+ p.second.push_back (make_available_stub (p.first));
+
+ sort_available (p.second);
+ }
+
+ system_package_manager_fedora m (move (osr),
+ host_triplet,
+ "" /* arch */,
+ nullopt /* progress */,
+ nullopt /* fetch_timeout */,
+ install,
+ fetch,
+ false /* yes */,
+ "sudo");
+ m.simulate_ = &s;
+
+ // Query each package.
+ //
+ for (const string& n: qps)
+ {
+ package_name pn (n);
+
+ const system_package_status* s (*m.status (pn, &aps[n]));
+
+ assert (*m.status (pn, nullptr) == s); // Test caching.
+
+ if (s == nullptr)
+ fail << "no installed " << (install ? "or available " : "")
+ << "system package for " << pn;
+
+ cout << pn << ' ' << s->version
+ << " (" << s->system_name << ' ' << s->system_version << ") ";
+
+ switch (s->status)
+ {
+ case package_status::installed: cout << "installed"; break;
+ case package_status::partially_installed: cout << "part installed"; break;
+ case package_status::not_installed: cout << "not installed"; break;
+ }
+
+ cout << '\n';
+ }
+
+ // Install if requested.
+ //
+ if (install)
+ {
+ assert (argi != argc); // <install-pkg>...
+
+ vector<package_name> ips;
+ for (; argi != argc; ++argi)
+ ips.push_back (package_name (argv[argi]));
+
+ m.install (ips);
+ }
+ }
+ else
+ fail << "unknown command '" << cmd << "'";
+
+ return 0;
+ }
+ catch (const failed&)
+ {
+ return 1;
+ }
+}
+
+int
+main (int argc, char* argv[])
+{
+ return bpkg::main (argc, argv);
+}
diff --git a/bpkg/system-package-manager-fedora.test.testscript b/bpkg/system-package-manager-fedora.test.testscript
new file mode 100644
index 0000000..b1d5b8c
--- /dev/null
+++ b/bpkg/system-package-manager-fedora.test.testscript
@@ -0,0 +1,1410 @@
+# file : bpkg/system-package-manager-fedora.test.testscript
+# license : MIT; see accompanying LICENSE file
+
+: dnf-list
+:
+{
+ test.arguments += dnf-list
+
+ : basics
+ :
+ $* openssl-libs openssl-devel openssl1.1 openssl1.1-devel libsigc++40 libcurl lrmi rust-uuid+std-devel <<EOI 2>>EOE >>EOO
+ Installed Packages
+ libcurl.i686 7.79.1-5.fc35 @updates
+ libcurl.x86_64 7.79.1-5.fc35 @updates
+ openssl-devel.x86_64 1:1.1.1q-1.fc35 @updates
+ openssl-libs.i686 1:1.1.1q-1.fc35 @updates
+ openssl-libs.x86_64 1:1.1.1q-1.fc35 @updates
+ rpm.x86_64 4.17.1-2.fc35 @updates
+ Available Packages
+ libcurl.i686 7.79.1-7.fc35 updates
+ libcurl.x86_64 7.79.1-7.fc35 updates
+ lrmi.i686 0.10-28.fc35 fedora
+ openssl-devel.i686 1:1.1.1q-1.fc35 updates
+ openssl1.1.i686 1:1.1.1i-3.fc35 fedora
+ openssl1.1.x86_64 1:1.1.1i-3.fc35 fedora
+ openssl1.1-devel.i686 1:1.1.1i-3.fc35 fedora
+ openssl1.1-devel.x86_64 1:1.1.1i-3.fc35 fedora
+ rpm.x86_64 4.17.1-3.fc35 updates
+ rust-uuid+std-devel.noarch 1.2.1-1.fc35 updates
+ EOI
+ LC_ALL=C dnf list --all --cacheonly --quiet openssl-libs openssl-devel openssl1.1 openssl1.1-devel libsigc++40 libcurl lrmi rust-uuid+std-devel rpm <-
+ EOE
+ openssl-libs '1:1.1.1q-1.fc35' 'x86_64' '1:1.1.1q-1.fc35' 'x86_64'
+ openssl-devel '1:1.1.1q-1.fc35' 'x86_64' '1:1.1.1q-1.fc35' 'x86_64'
+ openssl1.1 '' '' '1:1.1.1i-3.fc35' 'x86_64'
+ openssl1.1-devel '' '' '1:1.1.1i-3.fc35' 'x86_64'
+ libsigc++40 '' '' '' ''
+ libcurl '7.79.1-5.fc35' 'x86_64' '7.79.1-7.fc35' 'x86_64'
+ lrmi '' '' '' ''
+ rust-uuid+std-devel '' '' '1.2.1-1.fc35' 'noarch'
+ EOO
+
+ : unknown
+ :
+ $* libsigc++40 <<EOI 2>>EOE >>EOO
+ Installed Packages
+ rpm.x86_64 4.17.1-3.fc35 updates
+ Available Packages
+ EOI
+ LC_ALL=C dnf list --all --cacheonly --quiet libsigc++40 rpm <-
+ EOE
+ libsigc++40 '' '' '' ''
+ EOO
+
+ : non-host-arc
+ :
+ $* lrmi <<EOI 2>>EOE >>EOO
+ Installed Packages
+ rpm.x86_64 4.17.1-2.fc35 @updates
+ Available Packages
+ lrmi.i686 0.10-28.fc35 fedora
+ rpm.x86_64 4.17.1-3.fc35 updates
+ EOI
+ LC_ALL=C dnf list --all --cacheonly --quiet lrmi rpm <-
+ EOE
+ lrmi '' '' '' ''
+ EOO
+
+ : dnf
+ :
+ $* rpm <<EOI 2>>EOE >>EOO
+ Installed Packages
+ rpm.x86_64 4.17.1-2.fc35 @updates
+ Available Packages
+ rpm.x86_64 4.17.1-3.fc35 updates
+ EOI
+ LC_ALL=C dnf list --all --cacheonly --quiet rpm rpm <-
+ EOE
+ rpm '4.17.1-2.fc35' 'x86_64' '4.17.1-3.fc35' 'x86_64'
+ EOO
+
+ : dnf-not-exist
+ :
+ $* openssl-libs <<EOI 2>>EOE != 0
+ Installed Packages
+ openssl-libs.i686 1:1.1.1q-1.fc35 @updates
+ openssl-libs.x86_64 1:1.1.1q-1.fc35 @updates
+ EOI
+ LC_ALL=C dnf list --all --cacheonly --quiet openssl-libs rpm <-
+ error: rpm package doesn't exist
+ EOE
+}
+
+: dnf-repoquery-requires
+:
+{
+ test.arguments += dnf-repoquery-requires
+
+ : basics
+ :
+ $* openssl-devel '1:1.1.1q-1.fc35' x86_64 true <<EOI 2>>EOE >>EOO
+ opae-devel x86_64 0:2.0.0-2.3.fc35
+ openssl-devel i686 1:1.1.1q-1.fc35
+ openssl-devel x86_64 1:1.1.1q-1.fc35
+ openssl-libs x86_64 1:1.1.1q-1.fc35
+ openssl1.1 x86_64 1:1.1.1i-3.fc35
+ openssl1.1-devel i686 1:1.1.1i-3.fc35
+ openssl1.1-devel x86_64 1:1.1.1i-3.fc35
+ pkgconf-pkg-config i686 0:1.8.0-1.fc35
+ pkgconf-pkg-config x86_64 0:1.8.0-1.fc35
+ EOI
+ LC_ALL=C dnf repoquery --requires --quiet --cacheonly --resolve --qf "%{name} %{arch} %{epoch}:%{version}-%{release}" --installed --disableexcludes=all openssl-devel-1:1.1.1q-1.fc35.x86_64 <-
+ EOE
+ opae-devel 2.0.0-2.3.fc35
+ openssl-libs 1:1.1.1q-1.fc35
+ openssl1.1 1:1.1.1i-3.fc35
+ openssl1.1-devel 1:1.1.1i-3.fc35
+ pkgconf-pkg-config 1.8.0-1.fc35
+ EOO
+
+ : no-arch
+ :
+ $* rust-uuid+std-devel 1.2.1-1.fc35 noarch false <<EOI 2>>EOE >>EOO
+ cargo x86_64 0:1.65.0-1.fc35
+ rust-uuid-devel noarch 0:1.2.1-1.fc35
+ EOI
+ LC_ALL=C dnf repoquery --requires --quiet --cacheonly --resolve --qf "%{name} %{arch} %{epoch}:%{version}-%{release}" rust-uuid+std-devel-1.2.1-1.fc35.noarch <-
+ EOE
+ cargo 1.65.0-1.fc35
+ rust-uuid-devel 1.2.1-1.fc35
+ EOO
+
+ : no-arch-dependency
+ :
+ $* dhcp-client '12:4.4.3-4.P1.fc35' x86_64 true <<EOI 2>>EOE >>EOO
+ bash i686 0:5.1.8-3.fc35
+ bash x86_64 0:5.1.8-3.fc35
+ coreutils x86_64 0:8.32-36.fc35
+ coreutils-single x86_64 0:8.32-36.fc35
+ dhcp-common noarch 12:4.4.3-4.P1.fc35
+ gawk i686 0:5.1.0-4.fc35
+ gawk x86_64 0:5.1.0-4.fc35
+ glibc i686 0:2.34-49.fc35
+ glibc x86_64 0:2.34-49.fc35
+ grep x86_64 0:3.6-4.fc35
+ ipcalc x86_64 0:1.0.1-2.fc35
+ iproute x86_64 0:5.13.0-2.fc35
+ iputils x86_64 0:20210722-1.fc35
+ libcap-ng x86_64 0:0.8.2-8.fc35
+ sed x86_64 0:4.8-8.fc35
+ systemd i686 0:249.13-6.fc35
+ systemd x86_64 0:249.13-6.fc35
+ EOI
+ LC_ALL=C dnf repoquery --requires --quiet --cacheonly --resolve --qf "%{name} %{arch} %{epoch}:%{version}-%{release}" --installed --disableexcludes=all dhcp-client-12:4.4.3-4.P1.fc35.x86_64 <-
+ EOE
+ bash 5.1.8-3.fc35
+ coreutils 8.32-36.fc35
+ coreutils-single 8.32-36.fc35
+ dhcp-common 12:4.4.3-4.P1.fc35
+ gawk 5.1.0-4.fc35
+ glibc 2.34-49.fc35
+ grep 3.6-4.fc35
+ ipcalc 1.0.1-2.fc35
+ iproute 5.13.0-2.fc35
+ iputils 20210722-1.fc35
+ libcap-ng 0.8.2-8.fc35
+ sed 4.8-8.fc35
+ systemd 249.13-6.fc35
+ EOO
+
+ : no-depends
+ :
+ $* glibc 2.34-38.fc35 x86_64 true <:'' 2>>EOE >:''
+ LC_ALL=C dnf repoquery --requires --quiet --cacheonly --resolve --qf "%{name} %{arch} %{epoch}:%{version}-%{release}" --installed --disableexcludes=all glibc-2.34-38.fc35.x86_64 <-
+ EOE
+
+ : unknown
+ :
+ $* glibg 2.34-38.fc35 x86_64 false <:'' 2>>EOE >:''
+ LC_ALL=C dnf repoquery --requires --quiet --cacheonly --resolve --qf "%{name} %{arch} %{epoch}:%{version}-%{release}" glibg-2.34-38.fc35.x86_64 <-
+ EOE
+}
+
+: parse-name-value
+:
+{
+ test.arguments += parse-name-value
+
+ : basics
+ :
+ $* libmysqlclient <<EOI >>EOO
+ community-mysql-libs community-mysql-devel community-mysql-common community-mysql-libs-debuginfo community-mysql-debugsource community-mysql-extras, libstdc++ libstdc++-devel libstdc++-docs libstdc++-static, libz-dev
+ EOI
+ main: community-mysql-libs
+ devel: community-mysql-devel
+ debuginfo: community-mysql-libs-debuginfo
+ debugsource: community-mysql-debugsource
+ common: community-mysql-common
+ extras: community-mysql-extras libstdc++ libstdc++-devel libstdc++-static libstdc++-docs libz-dev
+ EOO
+
+ : non-lib
+ :
+ $* sqlite3 <<EOI >>EOO
+ sqlite sqlite-doc sqlite-analyzer sqlite-tools
+ EOI
+ main: sqlite
+ doc: sqlite-doc
+ extras: sqlite-analyzer sqlite-tools
+ EOO
+
+ : lib-devel
+ :
+ $* libsqlite3 <<EOI >>EOO
+ sqlite-devel
+ EOI
+ devel: sqlite-devel
+ EOO
+
+ : non-lib-devel
+ :
+ $* ssl-devel <<EOI >>EOO
+ ssl-devel
+ EOI
+ main: ssl-devel
+ EOO
+
+ : lib-custom-devel
+ :
+ $* libfoo-devel <<EOI >>EOO
+ libfoo-devel libfoo-devel-devel
+ EOI
+ main: libfoo-devel
+ devel: libfoo-devel-devel
+ EOO
+}
+
+: main-from-devel
+:
+{
+ test.arguments += main-from-devel
+
+ : libs
+ :
+ $* sqlite-devel 3.36.0-3.fc35 <<EOI >'sqlite-libs'
+ pkgconf-pkg-config 1.8.0-1.fc35
+ sqlite 3.36.0-3.fc35
+ sqlite-libs 3.36.0-3.fc35
+ EOI
+
+ : no-libs
+ :
+ $* xerces-c-devel 3.2.3-4.fc35 <<EOI >'xerces-c'
+ pkgconf-pkg-config 1.8.0-1.fc35
+ xerces-c 3.2.3-4.fc35
+ EOI
+
+ : no-dependencies
+ :
+ $* boost-http-server-devel 0-1.20220116gitcd5245f.fc35 <:'' >''
+}
+
+: map-package
+:
+{
+ test.arguments += map-package
+
+ : default-name
+ :
+ $* <<EOI >>EOO
+ : 1
+ name: byacc
+ version: 20210808
+ summary: yacc parser generator
+ license: other: public domain
+ EOI
+ version: 20210808-1
+ main: byacc
+ EOO
+
+ : default-name-lib
+ :
+ $* <<EOI >>EOO
+ : 1
+ name: libsqlite3
+ version: 3.40.1
+ summary: database library
+ license: other: public domain
+ EOI
+ version: 3.40.1-1
+ main: libsqlite3
+ devel: libsqlite3-devel
+ EOO
+
+ : custom-name
+ :
+ $* <<EOI >>EOO
+ : 1
+ name: libsqlite3
+ fedora_35-name: libsqlite3 libsqlite3-devel
+ version: 3.40.1
+ summary: database library
+ license: other: public domain
+ EOI
+ version: 3.40.1-1
+ main: libsqlite3
+ devel: libsqlite3-devel
+ EOO
+
+ : custom-name-dev-only
+ :
+ $* <<EOI >>EOO
+ : 1
+ name: libsqlite3
+ fedora_35-name: libsqlite3-devel
+ version: 3.40.1
+ summary: database library
+ license: other: public domain
+ EOI
+ version: 3.40.1-1
+ main: libsqlite3
+ devel: libsqlite3-devel
+ EOO
+
+ : custom-name-non-native
+ :
+ $* <<EOI >>EOO
+ : 1
+ name: libsqlite3
+ fedora_0-name: libsqlite libsqlite-devel
+ fedor_35-name: libsqlite3 libsqlite3-devel
+ version: 3.40.1
+ summary: database library
+ license: other: public domain
+ EOI
+ version: 3.40.1-1
+ main: libsqlite
+ devel: libsqlite-devel
+ EOO
+
+ : version-upstream
+ :
+ $* <<EOI >>EOO
+ : 1
+ name: byacc
+ version: +2-1.2.3-beta.1+3
+ upstream-version: 20210808
+ summary: yacc parser generator
+ license: other: public domain
+ EOI
+ version: 20210808~beta.1-4
+ main: byacc
+ EOO
+
+ : version-distribution
+ :
+ $* <<EOI >>EOO
+ : 1
+ name: byacc
+ version: +2-1.2.3-beta.1+3
+ fedora-version: 20210808~beta.1
+ summary: yacc parser generator
+ license: other: public domain
+ EOI
+ version: 20210808~beta.1-1
+ main: byacc
+ EOO
+
+ : version-distribution-epoch-revision
+ :
+ $* <<EOI >>EOO
+ : 1
+ name: byacc
+ version: +2-1.2.3-beta.1+3
+ fedora-version: 1:1.2.3-2
+ summary: yacc parser generator
+ license: other: public domain
+ EOI
+ version: 1:1.2.3-2
+ main: byacc
+ EOO
+
+ : version-distribution-empty-prerelease
+ :
+ $* <<EOI >>EOO
+ : 1
+ name: byacc
+ version: +2-1.2.3-beta.1+3
+ fedora-version: 20210808~-4
+ summary: yacc parser generator
+ license: other: public domain
+ EOI
+ version: 20210808~beta.1-4
+ main: byacc
+ EOO
+
+ : version-distribution-empty-revision
+ :
+ $* <<EOI >>EOO
+ : 1
+ name: byacc
+ version: +2-1.2.3-beta.1+3
+ fedora-version: 20210808~b.1-
+ summary: yacc parser generator
+ license: other: public domain
+ EOI
+ version: 20210808~b.1-4
+ main: byacc
+ EOO
+
+ : version-distribution-empty-release-revision
+ :
+ $* <<EOI >>EOO
+ : 1
+ name: byacc
+ version: +2-1.2.3-beta.1+3
+ fedora-version: 20210808~-
+ summary: yacc parser generator
+ license: other: public domain
+ EOI
+ version: 20210808~beta.1-4
+ main: byacc
+ EOO
+}
+
+: build
+:
+{
+ test.arguments += build
+
+ : libpq
+ :
+ {
+ : installed
+ :
+ cat <<EOI >=libpq-devel+pq-devel.info;
+ Installed Packages
+ rpm.x86_64 4.17.1-2.fc35 @updates
+ libpq-devel.x86_64 13.4-1.fc35 @fedora
+ Available Packages
+ rpm.x86_64 4.17.1-3.fc35 updates
+ libpq-devel.i686 13.4-1.fc35 fedora
+ EOI
+ cat <<EOI >=libpq-devel.requires;
+ glibc i686 0:2.34-49.fc35
+ glibc x86_64 0:2.34-49.fc35
+ libpq x86_64 0:13.4-1.fc35
+ pkgconf-pkg-config i686 0:1.8.0-1.fc35
+ pkgconf-pkg-config x86_64 0:1.8.0-1.fc35
+ EOI
+ cat <<EOI >=libpq.info;
+ Installed Packages
+ rpm.x86_64 4.17.1-2.fc35 @updates
+ libpq.x86_64 13.4-1.fc35 @fedora
+ Available Packages
+ rpm.x86_64 4.17.1-3.fc35 updates
+ libpq.i686 13.4-1.fc35 fedora
+ EOI
+ $* libpq --install libpq <<EOI 2>>EOE >>EOO
+ dnf-list: libpq-devel pq-devel libpq-devel+pq-devel.info
+ dnf-repoquery-requires: libpq-devel 13.4-1.fc35 x86_64 true libpq-devel.requires
+ dnf-list: libpq libpq.info
+ EOI
+ LC_ALL=C dnf list --all --cacheonly --quiet libpq-devel pq-devel rpm <libpq-devel+pq-devel.info
+ LC_ALL=C dnf repoquery --requires --quiet --cacheonly --resolve --qf "%{name} %{arch} %{epoch}:%{version}-%{release}" --installed --disableexcludes=all libpq-devel-13.4-1.fc35.x86_64 <libpq-devel.requires
+ LC_ALL=C dnf list --all --cacheonly --quiet libpq rpm <libpq.info
+ sudo dnf mark --quiet --assumeno install --cacheonly libpq-13.4-1.fc35.x86_64 libpq-devel-13.4-1.fc35.x86_64
+ EOE
+ libpq 13.4 (libpq 13.4-1.fc35) installed
+ EOO
+
+
+ : part-installed
+ :
+ cat <<EOI >=libpq-devel+pq-devel.info;
+ Installed Packages
+ rpm.x86_64 4.17.1-2.fc35 @updates
+ Available Packages
+ rpm.x86_64 4.17.1-3.fc35 updates
+ libpq-devel.i686 13.4-1.fc35 fedora
+ libpq-devel.x86_64 13.4-1.fc35 fedora
+ EOI
+ cat <<EOI >=libpq-devel.requires;
+ glibc i686 0:2.34-49.fc35
+ glibc x86_64 0:2.34-49.fc35
+ libpq x86_64 0:13.4-1.fc35
+ pkgconf-pkg-config i686 0:1.8.0-1.fc35
+ pkgconf-pkg-config x86_64 0:1.8.0-1.fc35
+ EOI
+ cat <<EOI >=libpq.info;
+ Installed Packages
+ rpm.x86_64 4.17.1-2.fc35 @updates
+ libpq.x86_64 13.4-1.fc35 @fedora
+ Available Packages
+ rpm.x86_64 4.17.1-3.fc35 updates
+ libpq.i686 13.4-1.fc35 fedora
+ EOI
+ $* libpq --install libpq <<EOI 2>>EOE >>EOO
+ dnf-list: libpq-devel pq-devel libpq-devel+pq-devel.info
+ dnf-repoquery-requires: libpq-devel 13.4-1.fc35 x86_64 false libpq-devel.requires
+ dnf-list: libpq libpq.info
+ EOI
+ LC_ALL=C dnf list --all --cacheonly --quiet libpq-devel pq-devel rpm <libpq-devel+pq-devel.info
+ sudo dnf makecache --quiet --assumeno --refresh
+ LC_ALL=C dnf list --all --cacheonly --quiet libpq-devel pq-devel rpm <libpq-devel+pq-devel.info
+ LC_ALL=C dnf repoquery --requires --quiet --cacheonly --resolve --qf "%{name} %{arch} %{epoch}:%{version}-%{release}" libpq-devel-13.4-1.fc35.x86_64 <libpq-devel.requires
+ LC_ALL=C dnf list --all --cacheonly --quiet libpq rpm <libpq.info
+ sudo dnf install --quiet --assumeno libpq.x86_64 libpq-devel.x86_64
+ sudo dnf mark --quiet --assumeno install --cacheonly libpq.x86_64 libpq-devel.x86_64
+ LC_ALL=C dnf list --all --cacheonly --quiet libpq rpm <libpq.info
+ EOE
+ libpq 13.4 (libpq 13.4-1.fc35) part installed
+ EOO
+
+
+ : part-installed-upgrade
+ :
+ cat <<EOI >=libpq-devel+pq-devel.info;
+ Installed Packages
+ rpm.x86_64 4.17.1-2.fc35 @updates
+ Available Packages
+ rpm.x86_64 4.17.1-3.fc35 updates
+ libpq-devel.i686 13.3-3.fc35 fedora
+ libpq-devel.x86_64 13.3-3.fc35 fedora
+ EOI
+ cat <<EOI >=libpq-devel+pq-devel.info-fetched;
+ Installed Packages
+ rpm.x86_64 4.17.1-2.fc35 @updates
+ Available Packages
+ rpm.x86_64 4.17.1-3.fc35 updates
+ libpq-devel.i686 13.4-1.fc35 fedora
+ libpq-devel.x86_64 13.4-1.fc35 fedora
+ EOI
+ cat <<EOI >=libpq-devel.requires-fetched;
+ glibc i686 0:2.34-49.fc35
+ glibc x86_64 0:2.34-49.fc35
+ libpq x86_64 0:13.4-1.fc35
+ pkgconf-pkg-config i686 0:1.8.0-1.fc35
+ pkgconf-pkg-config x86_64 0:1.8.0-1.fc35
+ EOI
+ cat <<EOI >=libpq.info-fetched;
+ Installed Packages
+ rpm.x86_64 4.17.1-2.fc35 @updates
+ libpq.x86_64 13.3-3.fc35 @fedora
+ Available Packages
+ rpm.x86_64 4.17.1-3.fc35 updates
+ libpq.i686 13.4-1.fc35 fedora
+ libpq.x86_64 13.4-1.fc35 @fedora
+ EOI
+ cat <<EOI >=libpq.info-installed;
+ Installed Packages
+ rpm.x86_64 4.17.1-2.fc35 @updates
+ libpq.x86_64 13.4-1.fc35 @fedora
+ Available Packages
+ rpm.x86_64 4.17.1-3.fc35 updates
+ libpq.i686 13.4-1.fc35 fedora
+ EOI
+ $* libpq --install libpq <<EOI 2>>EOE >>EOO
+ dnf-list: libpq-devel pq-devel libpq-devel+pq-devel.info
+ dnf-list-fetched: libpq-devel pq-devel libpq-devel+pq-devel.info-fetched
+ dnf-repoquery-requires: libpq-devel 13.4-1.fc35 x86_64 false libpq-devel.requires-fetched
+ dnf-list-fetched: libpq libpq.info-fetched
+ dnf-list-installed: libpq libpq.info-installed
+ EOI
+ LC_ALL=C dnf list --all --cacheonly --quiet libpq-devel pq-devel rpm <libpq-devel+pq-devel.info
+ sudo dnf makecache --quiet --assumeno --refresh
+ LC_ALL=C dnf list --all --cacheonly --quiet libpq-devel pq-devel rpm <libpq-devel+pq-devel.info-fetched
+ LC_ALL=C dnf repoquery --requires --quiet --cacheonly --resolve --qf "%{name} %{arch} %{epoch}:%{version}-%{release}" libpq-devel-13.4-1.fc35.x86_64 <libpq-devel.requires-fetched
+ LC_ALL=C dnf list --all --cacheonly --quiet libpq rpm <libpq.info-fetched
+ sudo dnf install --quiet --assumeno libpq.x86_64 libpq-devel.x86_64
+ sudo dnf mark --quiet --assumeno install --cacheonly libpq.x86_64 libpq-devel.x86_64
+ LC_ALL=C dnf list --all --cacheonly --quiet libpq rpm <libpq.info-installed
+ EOE
+ libpq 13.4 (libpq 13.4-1.fc35) part installed
+ EOO
+
+
+ # Note that the semantics is unrealistic (maybe background metadata update
+ # happened right before installing libpq). Also note that in contrast to
+ # the part-installed-upgrade test we operate in the --sys-no-fetch mode.
+ #
+ : part-installed-upgrade-version-change
+ :
+ cat <<EOI >=libpq-devel+pq-devel.info;
+ Installed Packages
+ rpm.x86_64 4.17.1-2.fc35 @updates
+ Available Packages
+ rpm.x86_64 4.17.1-3.fc35 updates
+ libpq-devel.i686 13.3-3.fc35 fedora
+ libpq-devel.x86_64 13.3-3.fc35 fedora
+ EOI
+ cat <<EOI >=libpq-devel.requires;
+ glibc i686 0:2.34-49.fc35
+ glibc x86_64 0:2.34-49.fc35
+ libpq x86_64 0:13.3-3.fc35
+ pkgconf-pkg-config i686 0:1.8.0-1.fc35
+ pkgconf-pkg-config x86_64 0:1.8.0-1.fc35
+ EOI
+ cat <<EOI >=libpq.info;
+ Installed Packages
+ rpm.x86_64 4.17.1-2.fc35 @updates
+ libpq.x86_64 13.3-3.fc35 @fedora
+ Available Packages
+ rpm.x86_64 4.17.1-3.fc35 updates
+ libpq.i686 13.3-3.fc35 fedora
+ EOI
+ cat <<EOI >=libpq.info-installed;
+ Installed Packages
+ rpm.x86_64 4.17.1-2.fc35 @updates
+ libpq.x86_64 13.4-1.fc35 @fedora
+ Available Packages
+ rpm.x86_64 4.17.1-3.fc35 updates
+ libpq.i686 13.4-1.fc35 fedora
+ EOI
+ $* libpq --install --no-fetch libpq <<EOI 2>>EOE >>EOO != 0
+ dnf-list: libpq-devel pq-devel libpq-devel+pq-devel.info
+ dnf-repoquery-requires: libpq-devel 13.3-3.fc35 x86_64 false libpq-devel.requires
+ dnf-list: libpq libpq.info
+ dnf-list-installed: libpq libpq.info-installed
+ EOI
+ LC_ALL=C dnf list --all --cacheonly --quiet libpq-devel pq-devel rpm <libpq-devel+pq-devel.info
+ LC_ALL=C dnf repoquery --requires --quiet --cacheonly --resolve --qf "%{name} %{arch} %{epoch}:%{version}-%{release}" libpq-devel-13.3-3.fc35.x86_64 <libpq-devel.requires
+ LC_ALL=C dnf list --all --cacheonly --quiet libpq rpm <libpq.info
+ sudo dnf install --quiet --assumeno libpq.x86_64 libpq-devel.x86_64
+ sudo dnf mark --quiet --assumeno install --cacheonly libpq.x86_64 libpq-devel.x86_64
+ LC_ALL=C dnf list --all --cacheonly --quiet libpq rpm <libpq.info-installed
+ error: unexpected fedora package version for libpq
+ info: expected: 13.3-3.fc35
+ info: installed: 13.4-1.fc35
+ info: consider retrying the bpkg command
+ EOE
+ libpq 13.3 (libpq 13.3-3.fc35) part installed
+ EOO
+
+
+ : not-installed
+ :
+ cat <<EOI >=libpq-devel+pq-devel.info;
+ Installed Packages
+ rpm.x86_64 4.17.1-2.fc35 @updates
+ Available Packages
+ rpm.x86_64 4.17.1-3.fc35 updates
+ libpq-devel.i686 13.4-1.fc35 fedora
+ libpq-devel.x86_64 13.4-1.fc35 @fedora
+ EOI
+ cat <<EOI >=libpq-devel.requires;
+ glibc i686 0:2.34-49.fc35
+ glibc x86_64 0:2.34-49.fc35
+ libpq x86_64 0:13.4-1.fc35
+ pkgconf-pkg-config i686 0:1.8.0-1.fc35
+ pkgconf-pkg-config x86_64 0:1.8.0-1.fc35
+ EOI
+ cat <<EOI >=libpq.info;
+ Installed Packages
+ rpm.x86_64 4.17.1-2.fc35 @updates
+ Available Packages
+ rpm.x86_64 4.17.1-3.fc35 updates
+ libpq.i686 13.4-1.fc35 fedora
+ libpq.x86_64 13.4-1.fc35 @fedora
+ EOI
+ cat <<EOI >=libpq.info-installed;
+ Installed Packages
+ rpm.x86_64 4.17.1-2.fc35 @updates
+ libpq.x86_64 13.4-1.fc35 @fedora
+ Available Packages
+ rpm.x86_64 4.17.1-3.fc35 updates
+ libpq.i686 13.4-1.fc35 fedora
+ EOI
+ $* libpq --install libpq <<EOI 2>>EOE >>EOO
+ dnf-list: libpq-devel pq-devel libpq-devel+pq-devel.info
+ dnf-repoquery-requires: libpq-devel 13.4-1.fc35 x86_64 false libpq-devel.requires
+ dnf-list: libpq libpq.info
+ dnf-list-installed: libpq libpq.info-installed
+ EOI
+ LC_ALL=C dnf list --all --cacheonly --quiet libpq-devel pq-devel rpm <libpq-devel+pq-devel.info
+ sudo dnf makecache --quiet --assumeno --refresh
+ LC_ALL=C dnf list --all --cacheonly --quiet libpq-devel pq-devel rpm <libpq-devel+pq-devel.info
+ LC_ALL=C dnf repoquery --requires --quiet --cacheonly --resolve --qf "%{name} %{arch} %{epoch}:%{version}-%{release}" libpq-devel-13.4-1.fc35.x86_64 <libpq-devel.requires
+ LC_ALL=C dnf list --all --cacheonly --quiet libpq rpm <libpq.info
+ sudo dnf install --quiet --assumeno libpq.x86_64 libpq-devel.x86_64
+ sudo dnf mark --quiet --assumeno install --cacheonly libpq.x86_64 libpq-devel.x86_64
+ LC_ALL=C dnf list --all --cacheonly --quiet libpq rpm <libpq.info-installed
+ EOE
+ libpq 13.4 (libpq 13.4-1.fc35) not installed
+ EOO
+
+
+ : no-install
+ :
+ cat <<EOI >=libpq-devel+pq-devel.info;
+ Installed Packages
+ rpm.x86_64 4.17.1-2.fc35 @updates
+ Available Packages
+ rpm.x86_64 4.17.1-3.fc35 updates
+ libpq-devel.i686 13.4-1.fc35 fedora
+ libpq-devel.x86_64 13.4-1.fc35 @fedora
+ EOI
+ $* libpq <<EOI 2>>EOE != 0
+ dnf-list: libpq-devel pq-devel libpq-devel+pq-devel.info
+ EOI
+ LC_ALL=C dnf list --all --cacheonly --quiet libpq-devel pq-devel rpm <libpq-devel+pq-devel.info
+ error: no installed system package for libpq
+ EOE
+
+
+ : not-available
+ :
+ cat <<EOI >=libpq-devel+pq-devel.info;
+ Installed Packages
+ rpm.x86_64 4.17.1-2.fc35 @updates
+ Available Packages
+ rpm.x86_64 4.17.1-3.fc35 updates
+ EOI
+ $* libpq --install libpq <<EOI 2>>EOE != 0
+ dnf-list: libpq-devel pq-devel libpq-devel+pq-devel.info
+ EOI
+ LC_ALL=C dnf list --all --cacheonly --quiet libpq-devel pq-devel rpm <libpq-devel+pq-devel.info
+ sudo dnf makecache --quiet --assumeno --refresh
+ LC_ALL=C dnf list --all --cacheonly --quiet libpq-devel pq-devel rpm <libpq-devel+pq-devel.info
+ error: no installed or available system package for libpq
+ EOE
+
+
+ : not-available-no-fetch
+ :
+ cat <<EOI >=libpq-devel+pq-devel.info;
+ Installed Packages
+ rpm.x86_64 4.17.1-2.fc35 @updates
+ Available Packages
+ rpm.x86_64 4.17.1-3.fc35 updates
+ EOI
+ $* libpq --install --no-fetch libpq <<EOI 2>>EOE != 0
+ dnf-list: libpq-devel pq-devel libpq-devel+pq-devel.info
+ EOI
+ LC_ALL=C dnf list --all --cacheonly --quiet libpq-devel pq-devel rpm <libpq-devel+pq-devel.info
+ error: no installed or available system package for libpq
+ EOE
+ }
+
+ : libsqlite3
+ :
+ {
+ +cat <<EOI >=libsqlite3.manifest
+ : 1
+ name: libsqlite3
+ version: 3.39.4+1
+ project: sqlite
+ summary: SQL database engine as an in-process C library
+ license: blessing ; SQLite Blessing.
+ EOI
+
+
+ : dev-resolve-fail
+ :
+ cat <<EOI >=libsqlite3-devel+sqlite3-devel.info;
+ Installed Packages
+ rpm.x86_64 4.17.1-2.fc35 @updates
+ Available Packages
+ rpm.x86_64 4.17.1-3.fc35 updates
+ EOI
+ $* libsqlite3 --install libsqlite3 <<EOI 2>>EOE != 0
+ dnf-list: libsqlite3-devel sqlite3-devel libsqlite3-devel+sqlite3-devel.info
+ EOI
+ LC_ALL=C dnf list --all --cacheonly --quiet libsqlite3-devel sqlite3-devel rpm <libsqlite3-devel+sqlite3-devel.info
+ sudo dnf makecache --quiet --assumeno --refresh
+ LC_ALL=C dnf list --all --cacheonly --quiet libsqlite3-devel sqlite3-devel rpm <libsqlite3-devel+sqlite3-devel.info
+ error: no installed or available system package for libsqlite3
+ EOE
+
+
+ : installed
+ :
+ : In particular test the project name-based resolution of the -devel
+ : package.
+ :
+ ln -s ../libsqlite3.manifest ./;
+ cat <<EOI >=libsqlite3-devel+sqlite-devel.info;
+ Installed Packages
+ rpm.x86_64 4.17.1-2.fc35 @updates
+ sqlite-devel.x86_64 3.36.0-3.fc35 @fedora
+ Available Packages
+ rpm.x86_64 4.17.1-3.fc35 updates
+ sqlite-devel.i686 3.36.0-3.fc35 fedora
+ EOI
+ cat <<EOI >=sqlite-devel.requires;
+ pkgconf-pkg-config i686 0:1.8.0-1.fc35
+ pkgconf-pkg-config x86_64 0:1.8.0-1.fc35
+ sqlite x86_64 0:3.36.0-3.fc35
+ sqlite-libs x86_64 0:3.36.0-3.fc35
+ EOI
+ cat <<EOI >=sqlite-libs.info;
+ Installed Packages
+ rpm.x86_64 4.17.1-2.fc35 @updates
+ sqlite-libs.i686 3.36.0-3.fc35 @fedora
+ sqlite-libs.x86_64 3.36.0-3.fc35 @fedora
+ Available Packages
+ rpm.x86_64 4.17.1-3.fc35 updates
+ EOI
+ $* libsqlite3 --install libsqlite3 <<EOI 2>>EOE >>EOO
+ manifest: libsqlite3 libsqlite3.manifest
+
+ dnf-list: libsqlite3-devel sqlite-devel libsqlite3-devel+sqlite-devel.info
+ dnf-repoquery-requires: sqlite-devel 3.36.0-3.fc35 x86_64 true sqlite-devel.requires
+ dnf-list: sqlite-libs sqlite-libs.info
+ EOI
+ LC_ALL=C dnf list --all --cacheonly --quiet libsqlite3-devel sqlite-devel rpm <libsqlite3-devel+sqlite-devel.info
+ LC_ALL=C dnf repoquery --requires --quiet --cacheonly --resolve --qf "%{name} %{arch} %{epoch}:%{version}-%{release}" --installed --disableexcludes=all sqlite-devel-3.36.0-3.fc35.x86_64 <sqlite-devel.requires
+ LC_ALL=C dnf list --all --cacheonly --quiet sqlite-libs rpm <sqlite-libs.info
+ sudo dnf mark --quiet --assumeno install --cacheonly sqlite-libs-3.36.0-3.fc35.x86_64 sqlite-devel-3.36.0-3.fc35.x86_64
+ EOE
+ libsqlite3 3.36.0 (sqlite-libs 3.36.0-3.fc35) installed
+ EOO
+
+
+ : not-installed
+ :
+ : Note that without fetch the -devel package could not be resolved since
+ : `dnf list libsqlite3-devel sqlite-devel` provides no information
+ : regarding any of these packages.
+ :
+ ln -s ../libsqlite3.manifest ./;
+ cat <<EOI >=libsqlite3-devel+sqlite-devel.info;
+ Installed Packages
+ rpm.x86_64 4.17.1-2.fc35 @updates
+ Available Packages
+ rpm.x86_64 4.17.1-3.fc35 updates
+ EOI
+ cat <<EOI >=sqlite-devel.requires-fetched;
+ pkgconf-pkg-config i686 0:1.8.0-1.fc35
+ pkgconf-pkg-config x86_64 0:1.8.0-1.fc35
+ sqlite x86_64 0:3.36.0-3.fc35
+ sqlite-libs x86_64 0:3.36.0-3.fc35
+ EOI
+ cat <<EOI >=libsqlite3-devel+sqlite-devel.info-fetched;
+ Installed Packages
+ rpm.x86_64 4.17.1-2.fc35 @updates
+ Available Packages
+ rpm.x86_64 4.17.1-3.fc35 updates
+ sqlite-devel.i686 3.36.0-3.fc35 fedora
+ sqlite-devel.x86_64 3.36.0-3.fc35 @fedora
+ EOI
+ cat <<EOI >=sqlite-libs.info-fetched;
+ Installed Packages
+ rpm.x86_64 4.17.1-2.fc35 @updates
+ Available Packages
+ rpm.x86_64 4.17.1-3.fc35 updates
+ sqlite-libs.i686 3.36.0-3.fc35 @fedora
+ sqlite-libs.x86_64 3.36.0-3.fc35 @fedora
+ EOI
+ cat <<EOI >=sqlite-libs.info-installed;
+ Installed Packages
+ rpm.x86_64 4.17.1-2.fc35 @updates
+ sqlite-libs.x86_64 3.36.0-3.fc35 @fedora
+ Available Packages
+ rpm.x86_64 4.17.1-3.fc35 updates
+ sqlite-libs.i686 3.36.0-3.fc35 @fedora
+ EOI
+ $* libsqlite3 --install libsqlite3 <<EOI 2>>EOE >>EOO
+ manifest: libsqlite3 libsqlite3.manifest
+
+ dnf-list: libsqlite3-devel sqlite-devel libsqlite3-devel+sqlite-devel.info
+ dnf-repoquery-requires-fetched: sqlite-devel 3.36.0-3.fc35 x86_64 false sqlite-devel.requires-fetched
+ dnf-list-fetched: libsqlite3-devel sqlite-devel libsqlite3-devel+sqlite-devel.info-fetched
+ dnf-list-fetched: sqlite-libs sqlite-libs.info-fetched
+ dnf-list-installed: sqlite-libs sqlite-libs.info-installed
+ EOI
+ LC_ALL=C dnf list --all --cacheonly --quiet libsqlite3-devel sqlite-devel rpm <libsqlite3-devel+sqlite-devel.info
+ sudo dnf makecache --quiet --assumeno --refresh
+ LC_ALL=C dnf list --all --cacheonly --quiet libsqlite3-devel sqlite-devel rpm <libsqlite3-devel+sqlite-devel.info-fetched
+ LC_ALL=C dnf repoquery --requires --quiet --cacheonly --resolve --qf "%{name} %{arch} %{epoch}:%{version}-%{release}" sqlite-devel-3.36.0-3.fc35.x86_64 <sqlite-devel.requires-fetched
+ LC_ALL=C dnf list --all --cacheonly --quiet sqlite-libs rpm <sqlite-libs.info-fetched
+ sudo dnf install --quiet --assumeno sqlite-libs.x86_64 sqlite-devel.x86_64
+ sudo dnf mark --quiet --assumeno install --cacheonly sqlite-libs.x86_64 sqlite-devel.x86_64
+ LC_ALL=C dnf list --all --cacheonly --quiet sqlite-libs rpm <sqlite-libs.info-installed
+ EOE
+ libsqlite3 3.36.0 (sqlite-libs 3.36.0-3.fc35) not installed
+ EOO
+ }
+
+ : sqlite3
+ :
+ {
+ +cat <<EOI >=sqlite3.manifest
+ : 1
+ name: sqlite3
+ version: 3.39.4+1
+ project: sqlite
+ summary: SQLite database engine shell program
+ license: blessing ; SQLite Blessing.
+ EOI
+
+
+ : main-resolve-fail
+ :
+ cat <<EOI >=sqlite3.info;
+ Installed Packages
+ rpm.x86_64 4.17.1-2.fc35 @updates
+ Available Packages
+ rpm.x86_64 4.17.1-3.fc35 updates
+ EOI
+ $* sqlite3 --install sqlite3 <<EOI 2>>EOE != 0
+ dnf-list: sqlite3 sqlite3.info
+ EOI
+ LC_ALL=C dnf list --all --cacheonly --quiet sqlite3 rpm <sqlite3.info
+ sudo dnf makecache --quiet --assumeno --refresh
+ LC_ALL=C dnf list --all --cacheonly --quiet sqlite3 rpm <sqlite3.info
+ error: no installed or available system package for sqlite3
+ EOE
+
+
+ : installed
+ :
+ : In particular test the project name-based resolution of the main
+ : package.
+ :
+ ln -s ../sqlite3.manifest ./;
+ cat <<EOI >=sqlite3+sqlite.info;
+ Installed Packages
+ rpm.x86_64 4.17.1-2.fc35 @updates
+ sqlite.x86_64 3.36.0-3.fc35 @fedora
+ Available Packages
+ rpm.x86_64 4.17.1-3.fc35 updates
+ sqlite.i686 3.36.0-3.fc35 fedora
+ EOI
+ $* sqlite3 --install sqlite3 <<EOI 2>>EOE >>EOO
+ manifest: sqlite3 sqlite3.manifest
+
+ dnf-list: sqlite3 sqlite sqlite3+sqlite.info
+ EOI
+ LC_ALL=C dnf list --all --cacheonly --quiet sqlite3 sqlite rpm <sqlite3+sqlite.info
+ sudo dnf mark --quiet --assumeno install --cacheonly sqlite-3.36.0-3.fc35.x86_64
+ EOE
+ sqlite3 3.36.0 (sqlite 3.36.0-3.fc35) installed
+ EOO
+
+
+ : not-installed
+ :
+ ln -s ../sqlite3.manifest ./;
+ cat <<EOI >=sqlite3+sqlite.info;
+ Installed Packages
+ rpm.x86_64 4.17.1-2.fc35 @updates
+ Available Packages
+ rpm.x86_64 4.17.1-3.fc35 updates
+ sqlite.i686 3.35.0-1.fc35 fedora
+ sqlite.x86_64 3.35.0-1.fc35 @fedora
+ EOI
+ cat <<EOI >=sqlite3+sqlite.info-fetched;
+ Installed Packages
+ rpm.x86_64 4.17.1-2.fc35 @updates
+ Available Packages
+ rpm.x86_64 4.17.1-3.fc35 updates
+ sqlite.i686 3.36.0-3.fc35 fedora
+ sqlite.x86_64 3.36.0-3.fc35 @fedora
+ EOI
+ cat <<EOI >=sqlite.info-installed;
+ Installed Packages
+ rpm.x86_64 4.17.1-2.fc35 @updates
+ sqlite.x86_64 3.36.0-3.fc35 @fedora
+ Available Packages
+ rpm.x86_64 4.17.1-3.fc35 updates
+ sqlite.i686 3.36.0-3.fc35 fedora
+ EOI
+ $* sqlite3 --install sqlite3 <<EOI 2>>EOE >>EOO
+ manifest: sqlite3 sqlite3.manifest
+
+ dnf-list: sqlite3 sqlite sqlite3+sqlite.info
+ dnf-list-fetched: sqlite3 sqlite sqlite3+sqlite.info-fetched
+ dnf-list-installed: sqlite sqlite.info-installed
+ EOI
+ LC_ALL=C dnf list --all --cacheonly --quiet sqlite3 sqlite rpm <sqlite3+sqlite.info
+ sudo dnf makecache --quiet --assumeno --refresh
+ LC_ALL=C dnf list --all --cacheonly --quiet sqlite3 sqlite rpm <sqlite3+sqlite.info-fetched
+ sudo dnf install --quiet --assumeno sqlite.x86_64
+ sudo dnf mark --quiet --assumeno install --cacheonly sqlite.x86_64
+ LC_ALL=C dnf list --all --cacheonly --quiet sqlite rpm <sqlite.info-installed
+ EOE
+ sqlite3 3.36.0 (sqlite 3.36.0-3.fc35) not installed
+ EOO
+ }
+
+ : libncurses
+ :
+ {
+ +cat <<EOI >=libncurses.manifest
+ : 1
+ name: libncurses
+ version: 6.4
+ upstream-version: 6.4.0
+ project: ncurses
+ fedora-to-downstream-version: /([0-9]+)\.([0-9]+)/\1.\2.0/
+ summary: ncurses C library
+ license: MIT
+ EOI
+ +cat <<EOI >=libncurses-c++.manifest
+ : 1
+ name: libncurses-c++
+ version: 6.4
+ upstream-version: 6.4.0
+ project: ncurses
+ fedora-name: ncurses-c++-libs ncurses-devel
+ fedora-to-downstream-version: /([0-9]+)\.([0-9]+)/\1.\2.0/
+ summary: ncurses C++ library
+ license: MIT
+ EOI
+
+
+ : installed
+ :
+ ln -s ../libncurses.manifest ./;
+ ln -s ../libncurses-c++.manifest ./;
+ cat <<EOI >=libncurses-devel+ncurses-devel.info;
+ Installed Packages
+ rpm.x86_64 4.17.1-2.fc35 @updates
+ ncurses-devel.x86_64 6.2-8.20210508.fc35 @fedora
+ Available Packages
+ rpm.x86_64 4.17.1-3.fc35 updates
+ ncurses-devel.i686 6.2-8.20210508.fc35 fedora
+ EOI
+ cat <<EOI >=ncurses-devel.requires;
+ bash i686 0:5.1.8-3.fc35
+ bash x86_64 0:5.1.8-3.fc35
+ ncurses-c++-libs x86_64 0:6.2-8.20210508.fc35
+ ncurses-devel i686 0:6.2-8.20210508.fc35
+ ncurses-devel x86_64 0:6.2-8.20210508.fc35
+ ncurses-libs x86_64 0:6.2-8.20210508.fc35
+ pkgconf-pkg-config i686 0:1.8.0-1.fc35
+ pkgconf-pkg-config x86_64 0:1.8.0-1.fc35
+ EOI
+ cat <<EOI >=ncurses-libs.info;
+ Installed Packages
+ rpm.x86_64 4.17.1-2.fc35 @updates
+ ncurses-libs.i686 6.2-8.20210508.fc35 @fedora
+ ncurses-libs.x86_64 6.2-8.20210508.fc35 @fedora
+ Available Packages
+ rpm.x86_64 4.17.1-3.fc35 updates
+ EOI
+ cat <<EOI >=ncurses-c++-libs+ncurses-devel.info;
+ Installed Packages
+ rpm.x86_64 4.17.1-2.fc35 @updates
+ ncurses-c++-libs.x86_64 6.2-8.20210508.fc35 @fedora
+ ncurses-devel.x86_64 6.2-8.20210508.fc35 @fedora
+ Available Packages
+ rpm.x86_64 4.17.1-3.fc35 updates
+ ncurses-c++-libs.i686 6.2-8.20210508.fc35 fedora
+ ncurses-devel.i686 6.2-8.20210508.fc35 fedora
+ EOI
+ $* libncurses libncurses-c++ --install libncurses libncurses-c++ <<EOI 2>>EOE >>EOO
+ manifest: libncurses libncurses.manifest
+ manifest: libncurses-c++ libncurses-c++.manifest
+
+ dnf-list: libncurses-devel ncurses-devel libncurses-devel+ncurses-devel.info
+ dnf-repoquery-requires: ncurses-devel 6.2-8.20210508.fc35 x86_64 true ncurses-devel.requires
+ dnf-list: ncurses-libs ncurses-libs.info
+ dnf-list: ncurses-c++-libs ncurses-devel ncurses-c++-libs+ncurses-devel.info
+ EOI
+ LC_ALL=C dnf list --all --cacheonly --quiet libncurses-devel ncurses-devel rpm <libncurses-devel+ncurses-devel.info
+ LC_ALL=C dnf repoquery --requires --quiet --cacheonly --resolve --qf "%{name} %{arch} %{epoch}:%{version}-%{release}" --installed --disableexcludes=all ncurses-devel-6.2-8.20210508.fc35.x86_64 <ncurses-devel.requires
+ LC_ALL=C dnf list --all --cacheonly --quiet ncurses-libs rpm <ncurses-libs.info
+ LC_ALL=C dnf list --all --cacheonly --quiet ncurses-c++-libs ncurses-devel rpm <ncurses-c++-libs+ncurses-devel.info
+ sudo dnf mark --quiet --assumeno install --cacheonly ncurses-libs-6.2-8.20210508.fc35.x86_64 ncurses-devel-6.2-8.20210508.fc35.x86_64 ncurses-c++-libs-6.2-8.20210508.fc35.x86_64
+ EOE
+ libncurses 6.2.0 (ncurses-libs 6.2-8.20210508.fc35) installed
+ libncurses-c++ 6.2.0 (ncurses-c++-libs 6.2-8.20210508.fc35) installed
+ EOO
+
+
+ : part-installed
+ :
+ ln -s ../libncurses.manifest ./;
+ ln -s ../libncurses-c++.manifest ./;
+ cat <<EOI >=libncurses-devel+ncurses-devel.info;
+ Installed Packages
+ rpm.x86_64 4.17.1-2.fc35 @updates
+ Available Packages
+ rpm.x86_64 4.17.1-3.fc35 updates
+ ncurses-devel.i686 6.2-8.20210508.fc35 fedora
+ ncurses-devel.x86_64 6.2-8.20210508.fc35 @fedora
+ EOI
+ cat <<EOI >=ncurses-devel.requires-fetched;
+ bash i686 0:5.1.8-3.fc35
+ bash x86_64 0:5.1.8-3.fc35
+ ncurses-c++-libs x86_64 0:6.2-8.20210508.fc35
+ ncurses-devel i686 0:6.2-8.20210508.fc35
+ ncurses-devel x86_64 0:6.2-8.20210508.fc35
+ ncurses-libs x86_64 0:6.2-8.20210508.fc35
+ pkgconf-pkg-config i686 0:1.8.0-1.fc35
+ pkgconf-pkg-config x86_64 0:1.8.0-1.fc35
+ EOI
+ cat <<EOI >=ncurses-libs.info-fetched;
+ Installed Packages
+ rpm.x86_64 4.17.1-2.fc35 @updates
+ ncurses-libs.i686 6.2-8.20210508.fc35 @fedora
+ ncurses-libs.x86_64 6.2-8.20210508.fc35 @fedora
+ Available Packages
+ rpm.x86_64 4.17.1-3.fc35 updates
+ EOI
+ cat <<EOI >=ncurses-c++-libs+ncurses-devel.info-fetched;
+ Installed Packages
+ rpm.x86_64 4.17.1-2.fc35 @updates
+ ncurses-c++-libs.x86_64 6.2-8.20210508.fc35 @fedora
+ Available Packages
+ rpm.x86_64 4.17.1-3.fc35 updates
+ ncurses-c++-libs.i686 6.2-8.20210508.fc35 fedora
+ ncurses-devel.i686 6.2-8.20210508.fc35 fedora
+ ncurses-devel.x86_64 6.2-8.20210508.fc35 fedora
+ EOI
+ cat <<EOI >=ncurses-libs+ncurses-c++-libs.info-installed;
+ Installed Packages
+ rpm.x86_64 4.17.1-2.fc35 @updates
+ ncurses-c++-libs.x86_64 6.2-8.20210508.fc35 @fedora
+ ncurses-libs.i686 6.2-8.20210508.fc35 @fedora
+ ncurses-libs.x86_64 6.2-8.20210508.fc35 @fedora
+ Available Packages
+ rpm.x86_64 4.17.1-3.fc35 updates
+ ncurses-c++-libs.i686 6.2-8.20210508.fc35 fedora
+ EOI
+ $* libncurses libncurses-c++ --install libncurses libncurses-c++ <<EOI 2>>EOE >>EOO
+ manifest: libncurses libncurses.manifest
+ manifest: libncurses-c++ libncurses-c++.manifest
+
+ dnf-list: libncurses-devel ncurses-devel libncurses-devel+ncurses-devel.info
+ dnf-repoquery-requires-fetched: ncurses-devel 6.2-8.20210508.fc35 x86_64 false ncurses-devel.requires-fetched
+ dnf-list-fetched: ncurses-libs ncurses-libs.info-fetched
+ dnf-list-fetched: ncurses-c++-libs ncurses-devel ncurses-c++-libs+ncurses-devel.info-fetched
+ dnf-list-installed: ncurses-libs ncurses-c++-libs ncurses-libs+ncurses-c++-libs.info-installed
+ EOI
+ LC_ALL=C dnf list --all --cacheonly --quiet libncurses-devel ncurses-devel rpm <libncurses-devel+ncurses-devel.info
+ sudo dnf makecache --quiet --assumeno --refresh
+ LC_ALL=C dnf list --all --cacheonly --quiet libncurses-devel ncurses-devel rpm <libncurses-devel+ncurses-devel.info
+ LC_ALL=C dnf repoquery --requires --quiet --cacheonly --resolve --qf "%{name} %{arch} %{epoch}:%{version}-%{release}" ncurses-devel-6.2-8.20210508.fc35.x86_64 <ncurses-devel.requires-fetched
+ LC_ALL=C dnf list --all --cacheonly --quiet ncurses-libs rpm <ncurses-libs.info-fetched
+ LC_ALL=C dnf list --all --cacheonly --quiet ncurses-c++-libs ncurses-devel rpm <ncurses-c++-libs+ncurses-devel.info-fetched
+ sudo dnf install --quiet --assumeno ncurses-libs.x86_64 ncurses-devel.x86_64 ncurses-c++-libs.x86_64
+ sudo dnf mark --quiet --assumeno install --cacheonly ncurses-libs.x86_64 ncurses-devel.x86_64 ncurses-c++-libs.x86_64
+ LC_ALL=C dnf list --all --cacheonly --quiet ncurses-libs ncurses-c++-libs rpm <ncurses-libs+ncurses-c++-libs.info-installed
+ EOE
+ libncurses 6.2.0 (ncurses-libs 6.2-8.20210508.fc35) part installed
+ libncurses-c++ 6.2.0 (ncurses-c++-libs 6.2-8.20210508.fc35) part installed
+ EOO
+
+
+ : not-installed
+ :
+ ln -s ../libncurses.manifest ./;
+ ln -s ../libncurses-c++.manifest ./;
+ cat <<EOI >=libncurses-devel+ncurses-devel.info;
+ Installed Packages
+ rpm.x86_64 4.17.1-2.fc35 @updates
+ Available Packages
+ rpm.x86_64 4.17.1-3.fc35 updates
+ ncurses-devel.i686 6.2-8.20210508.fc35 fedora
+ ncurses-devel.x86_64 6.2-8.20210508.fc35 @fedora
+ EOI
+ cat <<EOI >=libncurses-devel+ncurses-devel.info-fetched;
+ Installed Packages
+ rpm.x86_64 4.17.1-2.fc35 @updates
+ Available Packages
+ rpm.x86_64 4.17.1-3.fc35 updates
+ ncurses-devel.i686 6.2-8.20210508.fc35 fedora
+ ncurses-devel.x86_64 6.2-8.20210508.fc35 fedora
+ EOI
+ cat <<EOI >=ncurses-devel.requires-fetched;
+ bash i686 0:5.1.8-3.fc35
+ bash x86_64 0:5.1.8-3.fc35
+ ncurses-c++-libs x86_64 0:6.2-8.20210508.fc35
+ ncurses-devel i686 0:6.2-8.20210508.fc35
+ ncurses-devel x86_64 0:6.2-8.20210508.fc35
+ ncurses-libs x86_64 0:6.2-8.20210508.fc35
+ pkgconf-pkg-config i686 0:1.8.0-1.fc35
+ pkgconf-pkg-config x86_64 0:1.8.0-1.fc35
+ EOI
+ cat <<EOI >=ncurses-libs.info-fetched;
+ Installed Packages
+ rpm.x86_64 4.17.1-2.fc35 @updates
+ ncurses-libs.i686 6.2-8.20210508.fc35 @fedora
+ Available Packages
+ rpm.x86_64 4.17.1-3.fc35 updates
+ ncurses-libs.x86_64 6.2-8.20210508.fc35 @fedora
+ EOI
+ cat <<EOI >=ncurses-c++-libs+ncurses-devel.info-fetched;
+ Installed Packages
+ rpm.x86_64 4.17.1-2.fc35 @updates
+ Available Packages
+ rpm.x86_64 4.17.1-3.fc35 updates
+ ncurses-c++-libs.i686 6.2-8.20210508.fc35 fedora
+ ncurses-c++-libs.x86_64 6.2-8.20210508.fc35 @fedora
+ ncurses-devel.i686 6.2-8.20210508.fc35 fedora
+ ncurses-devel.x86_64 6.2-8.20210508.fc35 fedora
+ EOI
+ cat <<EOI >=ncurses-libs+ncurses-c++-libs.info-installed;
+ Installed Packages
+ rpm.x86_64 4.17.1-2.fc35 @updates
+ ncurses-c++-libs.x86_64 6.2-8.20210508.fc35 @fedora
+ ncurses-libs.i686 6.2-8.20210508.fc35 @fedora
+ ncurses-libs.x86_64 6.2-8.20210508.fc35 @fedora
+ Available Packages
+ rpm.x86_64 4.17.1-3.fc35 updates
+ ncurses-c++-libs.i686 6.2-8.20210508.fc35 fedora
+ EOI
+ $* libncurses libncurses-c++ --install libncurses libncurses-c++ <<EOI 2>>EOE >>EOO
+ manifest: libncurses libncurses.manifest
+ manifest: libncurses-c++ libncurses-c++.manifest
+
+ dnf-list: libncurses-devel ncurses-devel libncurses-devel+ncurses-devel.info
+ dnf-list-fetched: libncurses-devel ncurses-devel libncurses-devel+ncurses-devel.info-fetched
+ dnf-repoquery-requires-fetched: ncurses-devel 6.2-8.20210508.fc35 x86_64 false ncurses-devel.requires-fetched
+ dnf-list-fetched: ncurses-libs ncurses-libs.info-fetched
+ dnf-list-fetched: ncurses-c++-libs ncurses-devel ncurses-c++-libs+ncurses-devel.info-fetched
+ dnf-list-installed: ncurses-libs ncurses-c++-libs ncurses-libs+ncurses-c++-libs.info-installed
+ EOI
+ LC_ALL=C dnf list --all --cacheonly --quiet libncurses-devel ncurses-devel rpm <libncurses-devel+ncurses-devel.info
+ sudo dnf makecache --quiet --assumeno --refresh
+ LC_ALL=C dnf list --all --cacheonly --quiet libncurses-devel ncurses-devel rpm <libncurses-devel+ncurses-devel.info-fetched
+ LC_ALL=C dnf repoquery --requires --quiet --cacheonly --resolve --qf "%{name} %{arch} %{epoch}:%{version}-%{release}" ncurses-devel-6.2-8.20210508.fc35.x86_64 <ncurses-devel.requires-fetched
+ LC_ALL=C dnf list --all --cacheonly --quiet ncurses-libs rpm <ncurses-libs.info-fetched
+ LC_ALL=C dnf list --all --cacheonly --quiet ncurses-c++-libs ncurses-devel rpm <ncurses-c++-libs+ncurses-devel.info-fetched
+ sudo dnf install --quiet --assumeno ncurses-libs.x86_64 ncurses-devel.x86_64 ncurses-c++-libs.x86_64
+ sudo dnf mark --quiet --assumeno install --cacheonly ncurses-libs.x86_64 ncurses-devel.x86_64 ncurses-c++-libs.x86_64
+ LC_ALL=C dnf list --all --cacheonly --quiet ncurses-libs ncurses-c++-libs rpm <ncurses-libs+ncurses-c++-libs.info-installed
+ EOE
+ libncurses 6.2.0 (ncurses-libs 6.2-8.20210508.fc35) not installed
+ libncurses-c++ 6.2.0 (ncurses-c++-libs 6.2-8.20210508.fc35) not installed
+ EOO
+ }
+
+ : libsigc++
+ :
+ {
+ +cat <<EOI >=libsigc++.manifest
+ : 1
+ name: libsigc++
+ version: 3.4.0
+ fedora-name: libsigc++30 libsigc++30-devel libsigc++30-doc
+ fedora-name: libsigc++20 libsigc++20-devel libsigc++20-doc
+ summary: Typesafe callback system for standard C++
+ license: LGPL-3.0-only
+ EOI
+
+
+ : one-full-installed
+ :
+ ln -s ../libsigc++.manifest ./;
+ cat <<EOI >=libsigc++30+libsigc++30-devel.info;
+ Installed Packages
+ rpm.x86_64 4.17.1-2.fc35 @updates
+ libsigc++30.x86_64 3.0.7-2.fc35 @fedora
+ Available Packages
+ rpm.x86_64 4.17.1-3.fc35 updates
+ libsigc++30.i686 3.0.7-2.fc35 fedora
+ libsigc++30-devel.i686 3.0.7-2.fc35 fedora
+ libsigc++30-devel.x86_64 3.0.7-2.fc35 fedora
+ EOI
+ cat <<EOI >=libsigc++20+libsigc++20-devel.info;
+ Installed Packages
+ rpm.x86_64 4.17.1-2.fc35 @updates
+ libsigc++20.x86_64 2.10.7-3.fc35 @fedora
+ libsigc++20-devel.x86_64 2.10.7-3.fc35 @fedora
+ Available Packages
+ rpm.x86_64 4.17.1-3.fc35 updates
+ libsigc++20.i686 2.10.7-3.fc35 fedora
+ libsigc++20-devel.i686 2.10.7-3.fc35 fedora
+ EOI
+ $* libsigc++ --install libsigc++ <<EOI 2>>EOE >>EOO
+ manifest: libsigc++ libsigc++.manifest
+
+ dnf-list: libsigc++30 libsigc++30-devel libsigc++30+libsigc++30-devel.info
+ dnf-list: libsigc++20 libsigc++20-devel libsigc++20+libsigc++20-devel.info
+ EOI
+ LC_ALL=C dnf list --all --cacheonly --quiet libsigc++30 libsigc++30-devel rpm <libsigc++30+libsigc++30-devel.info
+ LC_ALL=C dnf list --all --cacheonly --quiet libsigc++20 libsigc++20-devel rpm <libsigc++20+libsigc++20-devel.info
+ sudo dnf mark --quiet --assumeno install --cacheonly libsigc++20-2.10.7-3.fc35.x86_64 libsigc++20-devel-2.10.7-3.fc35.x86_64
+ EOE
+ libsigc++ 2.10.7 (libsigc++20 2.10.7-3.fc35) installed
+ EOO
+
+
+ : one-part-installed
+ :
+ ln -s ../libsigc++.manifest ./;
+ cat <<EOI >=libsigc++30+libsigc++30-devel.info;
+ Installed Packages
+ rpm.x86_64 4.17.1-2.fc35 @updates
+ Available Packages
+ rpm.x86_64 4.17.1-3.fc35 updates
+ libsigc++30.i686 3.0.7-2.fc35 fedora
+ libsigc++30.x86_64 3.0.7-2.fc35 fedora
+ libsigc++30-devel.i686 3.0.7-2.fc35 fedora
+ libsigc++30-devel.x86_64 3.0.7-2.fc35 fedora
+ EOI
+ cat <<EOI >=libsigc++20+libsigc++20-devel.info;
+ Installed Packages
+ rpm.x86_64 4.17.1-2.fc35 @updates
+ libsigc++20.x86_64 2.10.7-3.fc35 @fedora
+ Available Packages
+ rpm.x86_64 4.17.1-3.fc35 updates
+ libsigc++20.i686 2.10.7-3.fc35 fedora
+ libsigc++20-devel.i686 2.10.7-3.fc35 fedora
+ libsigc++20-devel.x86_64 2.10.7-3.fc35 fedora
+ EOI
+ cat <<EOI >=libsigc++20.info-installed;
+ Installed Packages
+ rpm.x86_64 4.17.1-2.fc35 @updates
+ libsigc++20.x86_64 2.10.7-3.fc35 @fedora
+ Available Packages
+ rpm.x86_64 4.17.1-3.fc35 updates
+ libsigc++20.i686 2.10.7-3.fc35 fedora
+ EOI
+ $* libsigc++ --install libsigc++ <<EOI 2>>EOE >>EOO
+ manifest: libsigc++ libsigc++.manifest
+
+ dnf-list: libsigc++30 libsigc++30-devel libsigc++30+libsigc++30-devel.info
+ dnf-list: libsigc++20 libsigc++20-devel libsigc++20+libsigc++20-devel.info
+ dnf-list-installed: libsigc++20 libsigc++20.info-installed
+ EOI
+ LC_ALL=C dnf list --all --cacheonly --quiet libsigc++30 libsigc++30-devel rpm <libsigc++30+libsigc++30-devel.info
+ LC_ALL=C dnf list --all --cacheonly --quiet libsigc++20 libsigc++20-devel rpm <libsigc++20+libsigc++20-devel.info
+ sudo dnf makecache --quiet --assumeno --refresh
+ LC_ALL=C dnf list --all --cacheonly --quiet libsigc++30 libsigc++30-devel rpm <libsigc++30+libsigc++30-devel.info
+ LC_ALL=C dnf list --all --cacheonly --quiet libsigc++20 libsigc++20-devel rpm <libsigc++20+libsigc++20-devel.info
+ sudo dnf install --quiet --assumeno libsigc++20.x86_64 libsigc++20-devel.x86_64
+ sudo dnf mark --quiet --assumeno install --cacheonly libsigc++20.x86_64 libsigc++20-devel.x86_64
+ LC_ALL=C dnf list --all --cacheonly --quiet libsigc++20 rpm <libsigc++20.info-installed
+ EOE
+ libsigc++ 2.10.7 (libsigc++20 2.10.7-3.fc35) part installed
+ EOO
+
+
+ : none-installed
+ :
+ ln -s ../libsigc++.manifest ./;
+ cat <<EOI >=libsigc++30+libsigc++30-devel.info;
+ Installed Packages
+ rpm.x86_64 4.17.1-2.fc35 @updates
+ Available Packages
+ libsigc++30.i686 3.0.7-2.fc35 fedora
+ libsigc++30.x86_64 3.0.7-2.fc35 fedora
+ libsigc++30-devel.i686 3.0.7-2.fc35 fedora
+ libsigc++30-devel.x86_64 3.0.7-2.fc35 fedora
+ rpm.x86_64 4.17.1-3.fc35 updates
+ EOI
+ cat <<EOI >=libsigc++20+libsigc++20-devel.info;
+ Installed Packages
+ rpm.x86_64 4.17.1-2.fc35 @updates
+ Available Packages
+ libsigc++20.i686 2.10.7-3.fc35 fedora
+ libsigc++20.x86_64 2.10.7-3.fc35 @fedora
+ libsigc++20-devel.i686 2.10.7-3.fc35 fedora
+ libsigc++20-devel.x86_64 2.10.7-3.fc35 fedora
+ rpm.x86_64 4.17.1-3.fc35 updates
+ EOI
+ $* libsigc++ --install libsigc++ <<EOI 2>>EOE != 0
+ manifest: libsigc++ libsigc++.manifest
+
+ dnf-list: libsigc++30 libsigc++30-devel libsigc++30+libsigc++30-devel.info
+ dnf-list: libsigc++20 libsigc++20-devel libsigc++20+libsigc++20-devel.info
+ EOI
+ LC_ALL=C dnf list --all --cacheonly --quiet libsigc++30 libsigc++30-devel rpm <libsigc++30+libsigc++30-devel.info
+ LC_ALL=C dnf list --all --cacheonly --quiet libsigc++20 libsigc++20-devel rpm <libsigc++20+libsigc++20-devel.info
+ sudo dnf makecache --quiet --assumeno --refresh
+ LC_ALL=C dnf list --all --cacheonly --quiet libsigc++30 libsigc++30-devel rpm <libsigc++30+libsigc++30-devel.info
+ LC_ALL=C dnf list --all --cacheonly --quiet libsigc++20 libsigc++20-devel rpm <libsigc++20+libsigc++20-devel.info
+ error: multiple available fedora packages for libsigc++
+ info: candidate: libsigc++30 3.0.7-2.fc35
+ info: candidate: libsigc++20 2.10.7-3.fc35
+ info: consider installing the desired package manually and retrying the bpkg command
+ EOE
+
+
+ : both-part-installed
+ :
+ ln -s ../libsigc++.manifest ./;
+ cat <<EOI >=libsigc++30+libsigc++30-devel.info;
+ Installed Packages
+ libsigc++30.x86_64 3.0.7-2.fc35 fedora
+ rpm.x86_64 4.17.1-2.fc35 @updates
+ Available Packages
+ libsigc++30.i686 3.0.7-2.fc35 fedora
+ libsigc++30-devel.i686 3.0.7-2.fc35 fedora
+ libsigc++30-devel.x86_64 3.0.7-2.fc35 fedora
+ rpm.x86_64 4.17.1-3.fc35 updates
+ EOI
+ cat <<EOI >=libsigc++20+libsigc++20-devel.info;
+ Installed Packages
+ libsigc++20.x86_64 2.10.7-3.fc35 @fedora
+ rpm.x86_64 4.17.1-2.fc35 @updates
+ Available Packages
+ libsigc++20.i686 2.10.7-3.fc35 fedora
+ libsigc++20-devel.i686 2.10.7-3.fc35 fedora
+ libsigc++20-devel.x86_64 2.10.7-3.fc35 fedora
+ rpm.x86_64 4.17.1-3.fc35 updates
+ EOI
+ $* libsigc++ --install libsigc++ <<EOI 2>>EOE != 0
+ manifest: libsigc++ libsigc++.manifest
+
+ dnf-list: libsigc++30 libsigc++30-devel libsigc++30+libsigc++30-devel.info
+ dnf-list: libsigc++20 libsigc++20-devel libsigc++20+libsigc++20-devel.info
+ EOI
+ LC_ALL=C dnf list --all --cacheonly --quiet libsigc++30 libsigc++30-devel rpm <libsigc++30+libsigc++30-devel.info
+ LC_ALL=C dnf list --all --cacheonly --quiet libsigc++20 libsigc++20-devel rpm <libsigc++20+libsigc++20-devel.info
+ sudo dnf makecache --quiet --assumeno --refresh
+ LC_ALL=C dnf list --all --cacheonly --quiet libsigc++30 libsigc++30-devel rpm <libsigc++30+libsigc++30-devel.info
+ LC_ALL=C dnf list --all --cacheonly --quiet libsigc++20 libsigc++20-devel rpm <libsigc++20+libsigc++20-devel.info
+ error: multiple partially installed fedora packages for libsigc++
+ info: candidate: libsigc++30 3.0.7-2.fc35, missing components: libsigc++30-devel
+ info: candidate: libsigc++20 2.10.7-3.fc35, missing components: libsigc++20-devel
+ info: consider fully installing the desired package manually and retrying the bpkg command
+ EOE
+ }
+}
diff --git a/bpkg/system-package-manager.cxx b/bpkg/system-package-manager.cxx
new file mode 100644
index 0000000..373e8ff
--- /dev/null
+++ b/bpkg/system-package-manager.cxx
@@ -0,0 +1,904 @@
+// file : bpkg/system-package-manager.cxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#include <bpkg/system-package-manager.hxx>
+
+#include <sstream>
+
+#include <libbutl/regex.hxx>
+#include <libbutl/semantic-version.hxx>
+#include <libbutl/json/parser.hxx>
+
+#include <bpkg/package.hxx>
+#include <bpkg/package-odb.hxx>
+#include <bpkg/database.hxx>
+#include <bpkg/diagnostics.hxx>
+
+#include <bpkg/pkg-bindist-options.hxx>
+
+#include <bpkg/system-package-manager-debian.hxx>
+#include <bpkg/system-package-manager-fedora.hxx>
+#include <bpkg/system-package-manager-archive.hxx>
+
+using namespace std;
+using namespace butl;
+
+namespace bpkg
+{
+ system_package_manager::
+ ~system_package_manager ()
+ {
+ // vtable
+ }
+
+ static optional<os_release>
+ host_release (const target_triplet& host)
+ try
+ {
+ return butl::host_os_release (host);
+ }
+ catch (const runtime_error& e)
+ {
+ fail << "unable to determine host operating system release: " << e << endf;
+ }
+
+ // Return true if the specified operating system is or like the specified
+ // id.
+ //
+ static inline bool
+ is_or_like (const os_release& os, const char* id)
+ {
+ return (os.name_id == id ||
+ find_if (os.like_ids.begin (), os.like_ids.end (),
+ [id] (const string& n)
+ {
+ return n == id;
+ }) != os.like_ids.end ());
+ }
+
+ unique_ptr<system_package_manager>
+ make_consumption_system_package_manager (const common_options& co,
+ const target_triplet& host,
+ const string& name,
+ const string& arch,
+ bool install,
+ bool fetch,
+ bool yes,
+ const string& sudo)
+ {
+ // Note: similar to make_consumption_system_package_manager() below.
+
+ optional<bool> progress (co.progress () ? true :
+ co.no_progress () ? false :
+ optional<bool> ());
+
+ optional<size_t> fetch_timeout (co.fetch_timeout_specified ()
+ ? co.fetch_timeout ()
+ : optional<size_t> ());
+
+ unique_ptr<system_package_manager> r;
+
+ if (optional<os_release> oos = host_release (host))
+ {
+ os_release& os (*oos);
+
+ if (host.class_ == "linux")
+ {
+ if (is_or_like (os, "debian") ||
+ is_or_like (os, "ubuntu"))
+ {
+ if (!name.empty () && name != "debian")
+ fail << "unsupported package manager '" << name << "' for "
+ << os.name_id << " host";
+
+ // If we recognized this as Debian-like in an ad hoc manner, then
+ // add debian to like_ids.
+ //
+ if (os.name_id != "debian" && !is_or_like (os, "debian"))
+ os.like_ids.push_back ("debian");
+
+ r.reset (new system_package_manager_debian (
+ move (os), host, arch,
+ progress, fetch_timeout, install, fetch, yes, sudo));
+ }
+ else if (is_or_like (os, "fedora") ||
+ is_or_like (os, "rhel") ||
+ is_or_like (os, "centos") ||
+ is_or_like (os, "rocky") ||
+ is_or_like (os, "almalinux"))
+ {
+ if (!name.empty () && name != "fedora")
+ fail << "unsupported package manager '" << name << "' for "
+ << os.name_id << " host";
+
+ // If we recognized this as Fedora-like in an ad hoc manner, then
+ // add fedora to like_ids.
+ //
+ if (os.name_id != "fedora" && !is_or_like (os, "fedora"))
+ os.like_ids.push_back ("fedora");
+
+ r.reset (new system_package_manager_fedora (
+ move (os), host, arch,
+ progress, fetch_timeout, install, fetch, yes, sudo));
+ }
+ // NOTE: remember to update the --sys-distribution pkg-build option
+ // documentation if adding support for another package manager.
+ }
+ }
+
+ if (r == nullptr)
+ {
+ if (!name.empty ())
+ fail << "unsupported package manager '" << name << "' for host "
+ << host;
+ }
+
+ return r;
+ }
+
+ pair<unique_ptr<system_package_manager>, string>
+ make_production_system_package_manager (const pkg_bindist_options& o,
+ const target_triplet& host,
+ const string& name,
+ const string& arch)
+ {
+ // Note: similar to make_production_system_package_manager() above.
+
+ optional<bool> progress (o.progress () ? true :
+ o.no_progress () ? false :
+ optional<bool> ());
+
+ optional<os_release> oos;
+ if (o.os_release_id_specified ())
+ {
+ oos = os_release ();
+ oos->name_id = o.os_release_id ();
+ }
+ else
+ oos = host_release (host);
+
+ if (o.os_release_name_specified ())
+ oos->name = o.os_release_name ();
+
+ if (o.os_release_version_id_specified ())
+ oos->version_id = o.os_release_version_id ();
+
+ pair<unique_ptr<system_package_manager>, string> r;
+ if (oos)
+ {
+ os_release& os (*oos);
+
+ // Note that we don't make archive the default on any platform in case
+ // we later want to support its native package format.
+ //
+ if (name == "archive")
+ {
+ r.first.reset (new system_package_manager_archive (
+ move (os), host, arch, progress, &o));
+ r.second = "archive";
+ }
+ else if (host.class_ == "linux")
+ {
+ if (is_or_like (os, "debian") ||
+ is_or_like (os, "ubuntu"))
+ {
+ if (!name.empty () && name != "debian")
+ fail << "unsupported package manager '" << name << "' for "
+ << os.name_id << " host";
+
+ if (os.name_id != "debian" && !is_or_like (os, "debian"))
+ os.like_ids.push_back ("debian");
+
+ r.first.reset (new system_package_manager_debian (
+ move (os), host, arch, progress, &o));
+ r.second = "debian";
+ }
+ else if (is_or_like (os, "fedora") ||
+ is_or_like (os, "rhel") ||
+ is_or_like (os, "centos") ||
+ is_or_like (os, "rocky") ||
+ is_or_like (os, "almalinux"))
+ {
+ if (!name.empty () && name != "fedora")
+ fail << "unsupported package manager '" << name << "' for "
+ << os.name_id << " host";
+
+ if (os.name_id != "fedora" && !is_or_like (os, "fedora"))
+ os.like_ids.push_back ("fedora");
+
+ r.first.reset (new system_package_manager_fedora (
+ move (os), host, arch, progress, &o));
+ r.second = "fedora";
+ }
+ // NOTE: remember to update the --distribution pkg-bindist option
+ // documentation if adding support for another package manager.
+ }
+ }
+
+ if (r.first == nullptr)
+ {
+ if (!name.empty ())
+ fail << "unsupported package manager '" << name << "' for host "
+ << host;
+ }
+
+ return r;
+ }
+
+ // Return the version id parsed as a semantic version if it is not empty and
+ // the "0" semantic version otherwise. Issue diagnostics and fail on parsing
+ // errors.
+ //
+ // Note: the name_id argument is only used for diagnostics.
+ //
+ static inline semantic_version
+ parse_version_id (const string& version_id, const string& name_id)
+ {
+ if (version_id.empty ())
+ return semantic_version (0, 0, 0);
+
+ try
+ {
+ return semantic_version (version_id, semantic_version::allow_omit_minor);
+ }
+ catch (const invalid_argument& e)
+ {
+ fail << "invalid version '" << version_id << "' for " << name_id
+ << " host: " << e << endf;
+ }
+ }
+
+ // Parse the <distribution> component of the specified <distribution>-*
+ // value into the distribution name and version (return as "0" if not
+ // present). Leave in the d argument the string representation of the
+ // version (used to detect the special non-native <name>_0). Issue
+ // diagnostics and fail on parsing errors.
+ //
+ // Note: the value_name, ap, and af arguments are only used for diagnostics.
+ //
+ static pair<string, semantic_version>
+ parse_distribution (string& d, // <name>[_<version>]
+ const string& value_name,
+ const shared_ptr<available_package>& ap,
+ const lazy_shared_ptr<repository_fragment>& af)
+ {
+ size_t p (d.rfind ('_')); // Version-separating underscore.
+
+ // If the '_' separator is present, then make sure that the right-hand
+ // part looks like a version (not empty and only contains digits and
+ // dots).
+ //
+ if (p != string::npos)
+ {
+ if (p != d.size () - 1)
+ {
+ for (size_t i (p + 1); i != d.size (); ++i)
+ {
+ if (!digit (d[i]) && d[i] != '.')
+ {
+ p = string::npos;
+ break;
+ }
+ }
+ }
+ else
+ p = string::npos;
+ }
+
+ // Parse the distribution version if present and leave it "0" otherwise.
+ //
+ string dn;
+ semantic_version dv (0, 0, 0);
+ if (p != string::npos)
+ {
+ dn.assign (d, 0, p);
+ d.erase (0, p + 1);
+
+ try
+ {
+ dv = semantic_version (d, semantic_version::allow_omit_minor);
+ }
+ catch (const invalid_argument& e)
+ {
+ // Note: the repository fragment may have no database associated when
+ // used in tests.
+ //
+ shared_ptr<repository_fragment> f (af.get_eager ());
+ database* db (!(f != nullptr && !af.loaded ()) // Not transient?
+ ? &af.database ()
+ : nullptr);
+
+ diag_record dr (fail);
+ dr << "invalid distribution version '" << d << "' in value "
+ << value_name << " for package " << ap->id.name << ' '
+ << ap->version;
+
+ if (db != nullptr)
+ dr << *db;
+
+ dr << " in repository " << (f != nullptr ? f : af.load ())->location
+ << ": " << e;
+ }
+ }
+ else
+ {
+ dn = move (d);
+ d.clear ();
+ }
+
+ return make_pair (move (dn), move (dv));
+ }
+
+ strings system_package_manager::
+ system_package_names (const available_packages& aps,
+ const string& name_id,
+ const string& version_id,
+ const vector<string>& like_ids,
+ bool native)
+ {
+ assert (!aps.empty ());
+
+ semantic_version vid (parse_version_id (version_id, name_id));
+
+ // Return those <name>[_<version>]-name distribution values of the
+ // specified available packages whose <name> component matches the
+ // specified distribution name and the <version> component (assumed as "0"
+ // if not present) is less or equal the specified distribution version.
+ // Suppress duplicate values.
+ //
+ auto name_values = [&aps, native] (const string& n,
+ const semantic_version& v)
+ {
+ strings r;
+
+ // For each available package sort the system package names in the
+ // distribution version descending order and then append them to the
+ // resulting list, keeping this order and suppressing duplicates.
+ //
+ using name_version = pair<string, semantic_version>;
+ vector<name_version> nvs; // Reuse the buffer.
+
+ for (const auto& a: aps)
+ {
+ nvs.clear ();
+
+ const shared_ptr<available_package>& ap (a.first);
+
+ for (const distribution_name_value& dv: ap->distribution_values)
+ {
+ if (optional<string> d = dv.distribution ("-name"))
+ {
+ pair<string, semantic_version> dnv (
+ parse_distribution (*d, dv.name, ap, a.second));
+
+ // Skip <name>_0 if we are only interested in the native mappings.
+ // If we are interested in the non-native mapping, then we treat
+ // <name>_0 as the matching version.
+ //
+ bool nn (*d == "0");
+ if (nn && native)
+ continue;
+
+ semantic_version& dvr (dnv.second);
+
+ if (dnv.first == n && (nn || dvr <= v))
+ {
+ // Add the name/version pair to the sorted vector.
+ //
+ // If this is the non-native mapping, then return just that.
+ //
+ if (nn)
+ {
+ r.clear (); // Drop anything we have accumulated so far.
+ r.push_back (move (dv.value));
+ return r;
+ }
+
+ name_version nv (make_pair (dv.value, move (dvr)));
+
+ nvs.insert (upper_bound (nvs.begin (), nvs.end (), nv,
+ [] (const name_version& x,
+ const name_version& y)
+ {return x.second > y.second;}),
+ move (nv));
+ }
+ }
+ }
+
+ // Append the sorted names to the resulting list.
+ //
+ for (name_version& nv: nvs)
+ {
+ if (find_if (r.begin (), r.end (),
+ [&nv] (const string& n) {return nv.first == n;}) ==
+ r.end ())
+ {
+ r.push_back (move (nv.first));
+ }
+ }
+ }
+
+ return r;
+ };
+
+ // Collect distribution values for those <distribution>-name names which
+ // match the name id and refer to the version which is less or equal than
+ // the version id.
+ //
+ strings r (name_values (name_id, vid));
+
+ // If the resulting list is empty and the like ids are specified, then
+ // re-collect but now using the like id and "0" version id instead.
+ //
+ if (r.empty ())
+ {
+ for (const string& like_id: like_ids)
+ {
+ r = name_values (like_id, semantic_version (0, 0, 0));
+ if (!r.empty ())
+ break;
+ }
+ }
+
+ return r;
+ }
+
+ optional<string> system_package_manager::
+ system_package_version (const shared_ptr<available_package>& ap,
+ const lazy_shared_ptr<repository_fragment>& af,
+ const string& name_id,
+ const string& version_id,
+ const vector<string>& like_ids)
+ {
+ semantic_version vid (parse_version_id (version_id, name_id));
+
+ // Iterate over the <name>[_<version>]-version distribution values of the
+ // passed available package. Only consider those values whose <name>
+ // component matches the specified distribution name and the <version>
+ // component (assumed as "0" if not present) is less or equal the
+ // specified distribution version. Return the system package version if
+ // the distribution version is equal to the specified one. Otherwise (the
+ // version is less), continue iterating while preferring system version
+ // candidates for greater distribution versions. Note that here we are
+ // trying to pick the system version with distribution version closest to
+ // (but never greater than) the specified distribution version, similar to
+ // what we do in downstream_package_version() (see its
+ // downstream_version() lambda for details).
+ //
+ auto system_version = [&ap, &af] (const string& n,
+ const semantic_version& v)
+ -> optional<string>
+ {
+ optional<string> r;
+ semantic_version rv;
+
+ for (const distribution_name_value& dv: ap->distribution_values)
+ {
+ if (optional<string> d = dv.distribution ("-version"))
+ {
+ pair<string, semantic_version> dnv (
+ parse_distribution (*d, dv.name, ap, af));
+
+ semantic_version& dvr (dnv.second);
+
+ if (dnv.first == n && dvr <= v)
+ {
+ // If the distribution version is equal to the specified one, then
+ // we are done. Otherwise, save the system version if it is
+ // preferable and continue iterating.
+ //
+ if (dvr == v)
+ return move (dv.value);
+
+ if (!r || rv < dvr)
+ {
+ r = move (dv.value);
+ rv = move (dvr);
+ }
+ }
+ }
+ }
+
+ return r;
+ };
+
+ // Try to deduce the system package version using the
+ // <distribution>-version values that match the name id and refer to the
+ // version which is less or equal than the version id.
+ //
+ optional<string> r (system_version (name_id, vid));
+
+ // If the system package version is not deduced and the like ids are
+ // specified, then re-try but now using the like id and "0" version id
+ // instead.
+ //
+ if (!r)
+ {
+ for (const string& like_id: like_ids)
+ {
+ r = system_version (like_id, semantic_version (0, 0, 0));
+ if (r)
+ break;
+ }
+ }
+
+ return r;
+
+ }
+
+ optional<version> system_package_manager::
+ downstream_package_version (const string& system_version,
+ const available_packages& aps,
+ const string& name_id,
+ const string& version_id,
+ const vector<string>& like_ids)
+ {
+ assert (!aps.empty ());
+
+ semantic_version vid (parse_version_id (version_id, name_id));
+
+ // Iterate over the passed available packages (in version descending
+ // order) and over the <name>[_<version>]-to-downstream-version
+ // distribution values they contain. Only consider those values whose
+ // <name> component matches the specified distribution name and the
+ // <version> component (assumed as "0" if not present) is less or equal
+ // the specified distribution version. For such values match the regex
+ // pattern against the passed system version and if it matches consider
+ // the replacement as the resulting downstream version candidate. Return
+ // this downstream version if the distribution version is equal to the
+ // specified one. Otherwise (the version is less), continue iterating
+ // while preferring downstream version candidates for greater distribution
+ // versions. Note that here we are trying to use a version mapping for the
+ // distribution version closest to (but never greater than) the specified
+ // distribution version. So, for example, if both following values contain
+ // a matching mapping, then for debian 11 we prefer the downstream version
+ // produced by the debian_10-to-downstream-version value:
+ //
+ // debian_9-to-downstream-version
+ // debian_10-to-downstream-version
+ //
+ auto downstream_version = [&aps, &system_version]
+ (const string& n,
+ const semantic_version& v) -> optional<version>
+ {
+ optional<version> r;
+ semantic_version rv;
+
+ for (const auto& a: aps)
+ {
+ const shared_ptr<available_package>& ap (a.first);
+
+ for (const distribution_name_value& nv: ap->distribution_values)
+ {
+ if (optional<string> d = nv.distribution ("-to-downstream-version"))
+ {
+ pair<string, semantic_version> dnv (
+ parse_distribution (*d, nv.name, ap, a.second));
+
+ semantic_version& dvr (dnv.second);
+
+ if (dnv.first == n && dvr <= v)
+ {
+ auto bad_value = [&nv, &ap, &a] (const string& d)
+ {
+ // Note: the repository fragment may have no database
+ // associated when used in tests.
+ //
+ const lazy_shared_ptr<repository_fragment>& af (a.second);
+ shared_ptr<repository_fragment> f (af.get_eager ());
+ database* db (!(f != nullptr && !af.loaded ()) // Not transient?
+ ? &af.database ()
+ : nullptr);
+
+ diag_record dr (fail);
+ dr << "invalid distribution value '" << nv.name << ": "
+ << nv.value << "' for package " << ap->id.name << ' '
+ << ap->version;
+
+ if (db != nullptr)
+ dr << *db;
+
+ dr << " in repository "
+ << (f != nullptr ? f : af.load ())->location << ": " << d;
+ };
+
+ // Parse the distribution value into the regex pattern and the
+ // replacement.
+ //
+ // Note that in the future we may add support for some regex
+ // flags.
+ //
+ pair<string, string> rep;
+ try
+ {
+ size_t end;
+ const string& val (nv.value);
+ rep = regex_replace_parse (val.c_str (), val.size (), end);
+ }
+ catch (const invalid_argument& e)
+ {
+ bad_value (e.what ());
+ }
+
+ // Match the regex pattern against the system version and skip
+ // the value if it doesn't match or proceed to parsing the
+ // downstream version resulting from the regex replacement
+ // otherwise.
+ //
+ string dv;
+ try
+ {
+ regex re (rep.first, regex::ECMAScript);
+
+ pair<string, bool> rr (
+ regex_replace_match (system_version, re, rep.second));
+
+ // Skip the regex if it doesn't match.
+ //
+ if (!rr.second)
+ continue;
+
+ dv = move (rr.first);
+ }
+ catch (const regex_error& e)
+ {
+ // Print regex_error description if meaningful (no space).
+ //
+ ostringstream os;
+ os << "invalid regex pattern '" << rep.first << "'" << e;
+ bad_value (os.str ());
+ }
+
+ // Parse the downstream version.
+ //
+ try
+ {
+ version ver (dv);
+
+ // If the distribution version is equal to the specified one,
+ // then we are done. Otherwise, save the downstream version if
+ // it is preferable and continue iterating.
+ //
+ // Note that bailing out immediately in the former case is
+ // essential. Otherwise, we can potentially fail later on, for
+ // example, some ill-formed regex which is already fixed in
+ // some newer package.
+ //
+ if (dvr == v)
+ return ver;
+
+ if (!r || rv < dvr)
+ {
+ r = move (ver);
+ rv = move (dvr);
+ }
+ }
+ catch (const invalid_argument& e)
+ {
+ bad_value ("resulting downstream version '" + dv +
+ "' is invalid: " + e.what ());
+ }
+ }
+ }
+ }
+ }
+
+ return r;
+ };
+
+ // Try to deduce the downstream version using the
+ // <distribution>-to-downstream-version values that match the name id and
+ // refer to the version which is less or equal than the version id.
+ //
+ optional<version> r (downstream_version (name_id, vid));
+
+ // If the downstream version is not deduced and the like ids are
+ // specified, then re-try but now using the like id and "0" version id
+ // instead.
+ //
+ if (!r)
+ {
+ for (const string& like_id: like_ids)
+ {
+ r = downstream_version (like_id, semantic_version (0, 0, 0));
+ if (r)
+ break;
+ }
+ }
+
+ return r;
+ }
+
+ auto system_package_manager::
+ installed_entries (const common_options& co,
+ const packages& pkgs,
+ const strings& vars,
+ const string& scope) -> installed_entry_map
+ {
+ process_path pp (search_b (co));
+
+ // Note that we don't use start_b() here since we want to be consistent
+ // with how things will be run when building the package.
+ //
+ cstrings args {
+ pp.recall_string (),
+ "--quiet", // Note: implies --no-progress.
+ "--dry-run"};
+
+ // Pass our --jobs value, if any.
+ //
+ string jobs;
+ if (size_t n = co.jobs_specified () ? co.jobs () : 0)
+ {
+ jobs = to_string (n);
+ args.push_back ("--jobs");
+ args.push_back (jobs.c_str ());
+ }
+
+ // Pass any --build-option.
+ //
+ for (const string& o: co.build_option ()) args.push_back (o.c_str ());
+
+ // Configuration variables.
+ //
+ for (const string& v: vars) args.push_back (v.c_str ());
+
+ string scope_arg;
+ args.push_back ((scope_arg = "!config.install.scope=" + scope).c_str ());
+
+ args.push_back ("!config.install.manifest=-");
+
+ // Package directories to install.
+ //
+ strings dirs;
+ for (const package& p: pkgs) dirs.push_back (p.out_root.representation ());
+ args.push_back ("install:");
+ for (const string& d: dirs) args.push_back (d.c_str ());
+
+ args.push_back (nullptr);
+
+ installed_entry_map r;
+ try
+ {
+ if (verb >= 2)
+ print_process (args);
+ else if (verb == 1)
+ text << "determining filesystem entries that would be installed...";
+
+ // Redirect stdout to a pipe.
+ //
+ process pr (pp,
+ args,
+ 0 /* stdin */,
+ -1 /* stdout */,
+ 2 /* stderr */);
+ try
+ {
+ ifdstream is (move (pr.in_ofd), fdstream_mode::skip);
+
+ json::parser p (is,
+ args[0] /* input_name */,
+ true /* multi_value */,
+ "\n" /* value_separators */);
+
+ using event = json::event;
+
+ // Note: recursive lambda.
+ //
+ auto parse_entry = [&r, &p] (const auto& parse_entry) -> void
+ {
+ // enter: after begin_object
+ // leave: after end_object
+
+ string t (p.next_expect_member_string ("type"));
+
+ if (t == "target")
+ {
+ p.next_expect_member_string ("name");
+
+ p.next_expect_member_array ("entries");
+ while (p.next_expect (event::begin_object, event::end_array))
+ parse_entry (parse_entry);
+ }
+ else if (t == "file" || t == "symlink")
+ {
+ path ep (p.next_expect_member_string ("path"));
+ assert (ep.absolute () && ep.normalized (false /* separators */));
+
+ if (t == "file")
+ {
+ string em (p.next_expect_member_string ("mode"));
+
+ auto p (
+ r.emplace (
+ move (ep), installed_entry {move (em), nullptr}));
+
+ if (!p.second)
+ fail << p.first->first << " is installed multiple times";
+ }
+ else
+ {
+ path et (p.next_expect_member_string ("target"));
+ if (et.relative ())
+ {
+ et = ep.directory () / et;
+ et.normalize ();
+ }
+
+ auto i (r.find (et));
+ if (i == r.end ())
+ fail << "symlink " << ep << " target " << et << " does not "
+ << "refer to previously installed entry";
+
+ auto p (r.emplace (move (ep), installed_entry {"", &*i}));
+
+ if (!p.second)
+ fail << p.first->first << " is installed multiple times";
+ }
+ }
+ else
+ {
+ // Fall through to skip all members of an unknown entry type.
+ //
+ // Note that this also covers the directory entires which we
+ // don't care about.
+ }
+
+ // Skip unknown members.
+ //
+ while (p.next_expect (event::name, event::end_object))
+ p.next_expect_value_skip ();
+ };
+
+ while (p.peek ()) // More values.
+ {
+ p.next_expect (event::begin_object); // entry
+ parse_entry (parse_entry);
+
+ if (p.next ()) // Consume value-terminating nullopt.
+ fail << "unexpected data after entry object";
+ }
+
+ is.close ();
+ }
+ catch (const json::invalid_json_input& e)
+ {
+ if (pr.wait ())
+ fail (location ("<stdin>", e.line, e.column))
+ << "invalid install manifest json input: " << e;
+
+ // Fall through.
+ }
+ catch (const io_error& e)
+ {
+ if (pr.wait ())
+ fail << "unable to read " << args[0] << " output: " << e;
+
+ // Fall through.
+ }
+
+ if (!pr.wait ())
+ {
+ diag_record dr (fail);
+ dr << args[0] << " exited with non-zero code";
+
+ if (verb < 2)
+ {
+ dr << info << "command line: ";
+ print_process (dr, args);
+ }
+ }
+ }
+ catch (const process_error& e)
+ {
+ error << "unable to execute " << args[0] << ": " << e;
+
+ if (e.child)
+ exit (1);
+
+ throw failed ();
+ }
+
+ return r;
+ }
+}
diff --git a/bpkg/system-package-manager.hxx b/bpkg/system-package-manager.hxx
new file mode 100644
index 0000000..7f5af7d
--- /dev/null
+++ b/bpkg/system-package-manager.hxx
@@ -0,0 +1,463 @@
+// file : bpkg/system-package-manager.hxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#ifndef BPKG_SYSTEM_PACKAGE_MANAGER_HXX
+#define BPKG_SYSTEM_PACKAGE_MANAGER_HXX
+
+#include <bpkg/types.hxx>
+#include <bpkg/utility.hxx>
+
+#include <libbutl/path-map.hxx>
+#include <libbutl/host-os-release.hxx>
+
+#include <bpkg/package.hxx>
+#include <bpkg/common-options.hxx>
+
+namespace bpkg
+{
+ using os_release = butl::os_release;
+
+ // The system/distribution package manager interface. Used by both pkg-build
+ // (to query and install system packages) and by pkg-bindist (to generate
+ // them).
+ //
+ // Note that currently the result of a query is a single available version.
+ // While some package managers may support having multiple available
+ // versions and may even allow installing multiple versions in parallel,
+ // supporting this on our side will complicate things quite a bit. While we
+ // can probably plug multiple available versions into our constraint
+ // satisfaction machinery, the rabbit hole goes deeper than that since, for
+ // example, different bpkg packages can be mapped to the same system
+ // package, as is the case for libcrypto/libssl which are both mapped to
+ // libssl on Debian. This means we will need to somehow coordinate (and
+ // likely backtrack) version selection between unrelated bpkg packages
+ // because only one underlying system version can be selected. (One
+ // simplified way to handle this would be to detect that different versions
+ // we selected and fail asking the user to resolve this manually.)
+ //
+ // Additionally, parallel installation is unlikely to be suppored for the
+ // packages we are interested in due to the underlying limitations.
+ // Specifically, the packages that we are primarily interested in are
+ // libraries with headers and executables (tools). While most package
+ // managers (e.g., Debian, Fedora) are able to install multiple libraries in
+ // parallel, they normally can only install a single set of headers, static
+ // libraries, pkg-config files, etc., (e.g., -dev/-devel package) at a time
+ // due to them being installed into the same location (e.g., /usr/include).
+ // The same holds for executables, which are installed into the same
+ // location (e.g., /usr/bin).
+ //
+ // It is possible that a certain library has made arrangements for
+ // multiple of its versions to co-exist. For example, hypothetically, our
+ // libssl package could be mapped to both libssl1.1 libssl1.1-dev and
+ // libssl3 libssl3-dev which could be installed at the same time (note
+ // that it is not the case in reality; there is only libssl-dev). However,
+ // in this case, we should probably also have two packages with separate
+ // names (e.g., libssl and libssl3) that can also co-exist. An example of
+ // this would be libQt5Core and libQt6Core. (Note that strictly speaking
+ // there could be different degrees of co-existence: for the system
+ // package manager it is sufficient for different versions not to clobber
+ // each other's files while for us we may also need the ability to use
+ // different versions in the base build).
+ //
+ // Note also that the above reasoning is quite C/C++-centric and it's
+ // possible that multiple versions of libraries (or equivalent) for other
+ // languages (e.g., Rust) can always co-exist. Plus, even in the case of
+ // C/C++ libraries, there is still the plausible case of picking one of
+ // the multiple available version.
+ //
+ // On the other hand, the ultimate goal of system package managers, at least
+ // traditional ones like Debian and Fedora, is to end up with a single,
+ // usually the latest available, version of the package that is used by
+ // everyone. In fact, if one looks at a stable distributions of Debian and
+ // Fedora, they normally provide only a single version of each package. This
+ // decision will also likely simplify the implementation. For example, on
+ // Debian, it's straightforward to get the installed and candidate versions
+ // (e.g., from apt-cache policy). But getting all the possible versions that
+ // can be installed without having to specify the release explicitly is a
+ // lot less straightforward (see the apt-cache command documentation in The
+ // Debian Administrator's Handbook for background).
+ //
+ // So for now we keep it simple and pick a single available version but can
+ // probably revise this decision later.
+ //
+ struct system_package_status
+ {
+ // Downstream (as in, bpkg package) version.
+ //
+ bpkg::version version;
+
+ // System (as in, distribution) package name and version for diagnostics.
+ //
+ // Note that this status may represent multiple system packages (for
+ // example, libfoo and libfoo-dev) and here we have only the
+ // main/representative package name (for example, libfoo).
+ //
+ string system_name;
+ string system_version;
+
+ // The system package can be either "available already installed",
+ // "available partially installed" (for example, libfoo but not
+ // libfoo-dev is installed) or "available not yet installed".
+ //
+ enum status_type {installed, partially_installed, not_installed};
+
+ status_type status = not_installed;
+ };
+
+ // As mentioned above the system package manager API has two parts:
+ // consumption (status() and install()) and production (generate()) and a
+ // particular implementation may only implement one, the other, or both. If
+ // a particular part is not implemented, then the correponding make_*()
+ // function below should never return an instance of such a system package
+ // manager.
+ //
+ class system_package_manager
+ {
+ public:
+ // Query the system package status.
+ //
+ // This function has two modes: cache-only (available_packages is NULL)
+ // and full (available_packages is not NULL). In the cache-only mode this
+ // function returns the status of this package if it has already been
+ // queried and nullopt otherwise. This allows the caller to only collect
+ // all the available packages (for the name/version mapping information)
+ // if really necessary.
+ //
+ // The returned status can be NULL, which indicates that no such package
+ // is available from the system package manager. Note that NULL is also
+ // returned if no fully installed package is available from the system and
+ // package installation is not enabled (see the constructor below).
+ //
+ // Note also that the implementation is expected to issue appropriate
+ // progress and diagnostics if fetching package metadata (again see the
+ // constructor below).
+ //
+ virtual optional<const system_package_status*>
+ status (const package_name&, const available_packages*) = 0;
+
+ // Install the specified subset of the previously-queried packages.
+ // Should only be called if installation is enabled (see the constructor
+ // below).
+ //
+ // Note that this function should be called only once after the final set
+ // of the required system packages has been determined. And the specified
+ // subset should contain all the selected packages, including the already
+ // fully installed. This allows the implementation to merge and de-
+ // duplicate the system package set to be installed (since some bpkg
+ // packages may be mapped to the same system package), perform post-
+ // installation verifications (such as making sure the versions of already
+ // installed packages have not changed due to upgrades), change properties
+ // of already installed packages (e.g., mark them as manually installed in
+ // Debian), etc.
+ //
+ // Note also that the implementation is expected to issue appropriate
+ // progress and diagnostics.
+ //
+ virtual void
+ install (const vector<package_name>&) = 0;
+
+ // Generate a binary distribution package. See the pkg-bindist(1) man page
+ // for background and the pkg_bindist() function implementation for
+ // details. The recursive_full argument corresponds to the --recursive
+ // auto (present false) and full (present true) modes.
+ //
+ // The available packages are loaded for all the packages in pkgs and
+ // deps. For non-system packages (so for all in pkgs) there is always a
+ // single available package that corresponds to the selected package. The
+ // out_root is only set for packages in pkgs. Note also that all the
+ // packages in pkgs and deps are guaranteed to belong to the same build
+ // configuration (as opposed to being spread over multiple linked
+ // configurations). Its absolute path is bassed in cfg_dir.
+ //
+ // The passed package manifest corresponds to the first package in pkgs
+ // (normally used as a source of additional package metadata such as
+ // summary, emails, urls, etc).
+ //
+ // The passed package type corresponds to the first package in pkgs while
+ // the languages -- to all the packages in pkgs plus, in the recursive
+ // mode, to all the non-system dependencies. In other words, the languages
+ // list contains every language that is used by anything that ends up in
+ // the package.
+ //
+ // Return the list of paths to binary packages and any other associated
+ // files (build metadata, etc) that could be useful for their consumption.
+ // Each returned file has a distribution-specific type that classifies it.
+ // If the result is empty, assume the prepare-only mode (or similar) with
+ // appropriate result diagnostics having been already issued.
+ //
+ // Note that this function may be called multiple times in the
+ // --recursive=separate mode. In this case the first argument indicates
+ // whether this is the first call (can be used, for example, to adjust the
+ // --wipe-output semantics).
+ //
+ struct package
+ {
+ shared_ptr<selected_package> selected;
+ available_packages available;
+ dir_path out_root; // Absolute and normalized.
+ };
+
+ using packages = vector<package>;
+
+ struct binary_file
+ {
+ string type;
+ bpkg::path path;
+ string system_name; // Empty if not applicable.
+ };
+
+ struct binary_files: public vector<binary_file>
+ {
+ string system_version; // Empty if not applicable.
+ };
+
+ virtual binary_files
+ generate (const packages& pkgs,
+ const packages& deps,
+ const strings& vars,
+ const dir_path& cfg_dir,
+ const package_manifest&,
+ const string& type,
+ const small_vector<language, 1>&,
+ optional<bool> recursive_full,
+ bool first) = 0;
+
+ public:
+ bpkg::os_release os_release;
+ target_triplet host;
+ string arch; // Architecture in system package manager spelling.
+
+ // Consumption constructor.
+ //
+ // If install is true, then enable package installation.
+ //
+ // If fetch is false, then do not re-fetch the system package repository
+ // metadata (that is, available packages/versions) before querying for the
+ // available version of the not yet installed or partially installed
+ // packages.
+ //
+ // If fetch timeout (in seconds) is specified, then use it for all the
+ // underlying network operations.
+ //
+ system_package_manager (bpkg::os_release&& osr,
+ const target_triplet& h,
+ string a,
+ optional<bool> progress,
+ optional<size_t> fetch_timeout,
+ bool install,
+ bool fetch,
+ bool yes,
+ string sudo)
+ : os_release (move (osr)),
+ host (h),
+ arch (move (a)),
+ progress_ (progress),
+ fetch_timeout_ (fetch_timeout),
+ install_ (install),
+ fetch_ (fetch),
+ yes_ (yes),
+ sudo_ (sudo != "false" ? move (sudo) : string ()) {}
+
+ // Production constructor.
+ //
+ system_package_manager (bpkg::os_release&& osr,
+ const target_triplet& h,
+ string a,
+ optional<bool> progress)
+ : os_release (move (osr)),
+ host (h),
+ arch (move (a)),
+ progress_ (progress),
+ install_ (false),
+ fetch_ (false),
+ yes_ (false) {}
+
+ virtual
+ ~system_package_manager ();
+
+ // Implementation details.
+ //
+ public:
+ // Given the available packages (as returned by find_available_all())
+ // return the list of system package names as mapped by the
+ // <distribution>-name values.
+ //
+ // The name_id, version_id, and like_ids are the values from os_release
+ // (refer there for background). If version_id is empty, then it's treated
+ // as "0".
+ //
+ // First consider <distribution>-name values corresponding to name_id.
+ // Assume <distribution> has the <name>[_<version>] form, where <version>
+ // is a semver-like version (e.g, 10, 10.15, or 10.15.1) and return all
+ // the values that are equal or less than the specified version_id
+ // (include the value with the absent <version>). In a sense, absent
+ // <version> is treated as a 0 semver-like version.
+ //
+ // If no value is found then repeat the above process for every like_ids
+ // entry (from left to right) instead of name_id with version_id equal 0.
+ //
+ // If still no value is found, then return empty list (in which case the
+ // caller may choose to fallback to the downstream package name or do
+ // something more elaborate, like translate version_id to one of the
+ // like_id's version and try that).
+ //
+ // Note that multiple -name values per same distribution can be returned
+ // as, for example, for the following distribution values:
+ //
+ // debian_10-name: libcurl4 libcurl4-doc libcurl4-openssl-dev
+ // debian_10-name: libcurl3-gnutls libcurl4-gnutls-dev (yes, 3 and 4)
+ //
+ // The <distribution> value in the <name>_0 form is the special "non-
+ // native" name mapping. If the native argument is false, then such a
+ // mapping is preferred over any other mapping. If it is true, then such a
+ // mapping is ignored. The purpose of this special value is to allow
+ // specifying different package names for production compared to
+ // consumption. Note, however, that such a deviation may make it
+ // impossible to use native and non-native binary packages
+ // interchangeably, for example, to satisfy dependencies.
+ //
+ // Note also that the values are returned in the "override order", that is
+ // from the newest package version to oldest and then from the highest
+ // distribution version to lowest.
+ //
+ static strings
+ system_package_names (const available_packages&,
+ const string& name_id,
+ const string& version_id,
+ const vector<string>& like_ids,
+ bool native);
+
+ // Given the available package and the repository fragment it belongs to,
+ // return the system package version as mapped by one of the
+ // <distribution>-version values.
+ //
+ // The rest of the arguments as well as the overalls semantics is the same
+ // as in system_package_names() above. That is, first consider
+ // <distribution>-version values corresponding to name_id. If none match,
+ // then repeat the above process for every like_ids entry with version_id
+ // equal 0. If still no match, then return nullopt (in which case the
+ // caller may choose to fallback to the upstream/bpkg package version or
+ // do something more elaborate).
+ //
+ // Note that lazy_shared_ptr<repository_fragment> is used only for
+ // diagnostics and conveys the database the available package object
+ // belongs to.
+ //
+ static optional<string>
+ system_package_version (const shared_ptr<available_package>&,
+ const lazy_shared_ptr<repository_fragment>&,
+ const string& name_id,
+ const string& version_id,
+ const vector<string>& like_ids);
+
+ // Given the system package version and available packages (as returned by
+ // find_available_all()) return the downstream package version as mapped
+ // by one of the <distribution>-to-downstream-version values.
+ //
+ // The rest of the arguments as well as the overalls semantics is the same
+ // as in system_package_names() above. That is, first consider
+ // <distribution>-to-downstream-version values corresponding to
+ // name_id. If none match, then repeat the above process for every
+ // like_ids entry with version_id equal 0. If still no match, then return
+ // nullopt (in which case the caller may choose to fallback to the system
+ // package version or do something more elaborate).
+ //
+ static optional<version>
+ downstream_package_version (const string& system_version,
+ const available_packages&,
+ const string& name_id,
+ const string& version_id,
+ const vector<string>& like_ids);
+
+ // Return the map of filesystem entries (files and symlinks) that would be
+ // installed for the specified packages with the specified configuration
+ // variables.
+ //
+ // In essence, this function runs:
+ //
+ // b --dry-run --quiet <vars> !config.install.scope=<scope>
+ // !config.install.manifest=- install: <pkgs>
+ //
+ // And converts the printed installation manifest into the path map.
+ //
+ // Note that this function prints an appropriate progress indicator since
+ // even in the dry-run mode it may take some time (see the --dry-run
+ // option documentation for details).
+ //
+ struct installed_entry
+ {
+ string mode; // Empty if symlink.
+ const pair<const path, installed_entry>* target; // Target if symlink.
+ };
+
+ class installed_entry_map: public butl::path_map<installed_entry>
+ {
+ public:
+ // Return true if there are filesystem entries in the specified
+ // directory or its subdirectories.
+ //
+ bool
+ contains_sub (const dir_path& d)
+ {
+ auto p (find_sub (d));
+ return p.first != p.second;
+ }
+ };
+
+ installed_entry_map
+ installed_entries (const common_options&,
+ const packages& pkgs,
+ const strings& vars,
+ const string& scope);
+
+ protected:
+ optional<bool> progress_; // --[no]-progress (see also stderr_term)
+ optional<size_t> fetch_timeout_; // --fetch-timeout
+
+ // The --sys-* option values.
+ //
+ bool install_;
+ bool fetch_;
+ bool yes_;
+ string sudo_;
+ };
+
+ // Create a package manager instance corresponding to the specified host
+ // target triplet as well as optional distribution package manager name and
+ // architecture. If name is empty, return NULL if there is no support for
+ // this platform. If architecture is empty, then derive it automatically
+ // from the host target triplet. Currently recognized names:
+ //
+ // debian -- Debian and alike (Ubuntu, etc) using the APT frontend.
+ // fedora -- Fedora and alike (RHEL, Centos, etc) using the DNF frontend.
+ // archive -- Installation archive, any platform, production only.
+ //
+ // Note: the name can be used to select an alternative package manager
+ // implementation on platforms that support multiple.
+ //
+ unique_ptr<system_package_manager>
+ make_consumption_system_package_manager (const common_options&,
+ const target_triplet&,
+ const string& name,
+ const string& arch,
+ bool install,
+ bool fetch,
+ bool yes,
+ const string& sudo);
+
+ // Create for production. The second half of the result is the effective
+ // distribution name.
+ //
+ // Note that the reference to options is expected to outlive the returned
+ // instance.
+ //
+ class pkg_bindist_options;
+
+ pair<unique_ptr<system_package_manager>, string>
+ make_production_system_package_manager (const pkg_bindist_options&,
+ const target_triplet&,
+ const string& name,
+ const string& arch);
+}
+
+#endif // BPKG_SYSTEM_PACKAGE_MANAGER_HXX
diff --git a/bpkg/system-package-manager.test.cxx b/bpkg/system-package-manager.test.cxx
new file mode 100644
index 0000000..f0d7c8f
--- /dev/null
+++ b/bpkg/system-package-manager.test.cxx
@@ -0,0 +1,160 @@
+// file : bpkg/system-package-manager.test.cxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#include <bpkg/system-package-manager.hxx>
+
+#include <iostream>
+
+#include <bpkg/types.hxx>
+#include <bpkg/utility.hxx>
+
+#undef NDEBUG
+#include <cassert>
+
+#include <bpkg/system-package-manager.test.hxx>
+
+using namespace std;
+
+namespace bpkg
+{
+ // Usage: args[0] <command> ...
+ //
+ // Where <command> is one of:
+ //
+ // system-package-names <name-id> <ver-id> [<like-id>...] -- [--non-native] <pkg> <file>...
+ //
+ // Where <pkg> is a package name, <file> is a package manifest file.
+ //
+ // system-package-version <name-id> <ver-id> [<like-id>...] -- <pkg> <file>
+ //
+ // Where <pkg> is a package name, <file> is a package manifest file.
+ //
+ // downstream-package-version <name-id> <ver-id> [<like-id>...] -- <ver> <pkg> <file>...
+ //
+ // Where <ver> is a system version to translate, <pkg> is a package
+ // name, and <file> is a package manifest file.
+ //
+ int
+ main (int argc, char* argv[])
+ try
+ {
+ assert (argc >= 2); // <command>
+
+ int argi (1);
+ string cmd (argv[argi++]);
+
+ os_release osr;
+ if (cmd == "system-package-names" ||
+ cmd == "system-package-version" ||
+ cmd == "downstream-package-version")
+ {
+ assert (argc >= 4); // <name-id> <ver-id>
+
+ osr.name_id = argv[argi++];
+ osr.version_id = argv[argi++];
+
+ for (; argi != argc; ++argi)
+ {
+ string a (argv[argi]);
+
+ if (a == "--")
+ break;
+
+ osr.like_ids.push_back (move (a));
+ }
+ }
+
+ if (cmd == "system-package-names")
+ {
+ assert (argi != argc); // --
+ string a (argv[argi++]);
+ assert (a == "--");
+
+ assert (argi != argc);
+ bool native (true);
+ if ((a = argv[argi]) == "--non-native")
+ {
+ native = false;
+ argi++;
+ }
+
+ assert (argi != argc); // <pkg>
+ string pn (argv[argi++]);
+
+ assert (argi != argc); // <file>
+ available_packages aps;
+ for (; argi != argc; ++argi)
+ aps.push_back (make_available_from_manifest (pn, argv[argi]));
+ sort_available (aps);
+
+ strings ns (
+ system_package_manager::system_package_names (
+ aps, osr.name_id, osr.version_id, osr.like_ids, native));
+
+ for (const string& n: ns)
+ cout << n << '\n';
+ }
+ else if (cmd == "system-package-version")
+ {
+ assert (argi != argc); // --
+ string a (argv[argi++]);
+ assert (a == "--");
+
+ assert (argi != argc); // <pkg>
+ string pn (argv[argi++]);
+
+ assert (argi != argc); // <file>
+ pair<shared_ptr<available_package>,
+ lazy_shared_ptr<repository_fragment>> apf (
+ make_available_from_manifest (pn, argv[argi++]));
+
+ assert (argi == argc); // No trailing junk.
+
+ if (optional<string> v =
+ system_package_manager::system_package_version (
+ apf.first, apf.second, osr.name_id, osr.version_id, osr.like_ids))
+ {
+ cout << *v << '\n';
+ }
+ }
+ else if (cmd == "downstream-package-version")
+ {
+ assert (argi != argc); // --
+ string a (argv[argi++]);
+ assert (a == "--");
+
+ assert (argi != argc); // <ver>
+ string sv (argv[argi++]);
+
+ assert (argi != argc); // <pkg>
+ string pn (argv[argi++]);
+
+ assert (argi != argc); // <file>
+ available_packages aps;
+ for (; argi != argc; ++argi)
+ aps.push_back (make_available_from_manifest (pn, argv[argi]));
+ sort_available (aps);
+
+ optional<version> v (
+ system_package_manager::downstream_package_version (
+ sv, aps, osr.name_id, osr.version_id, osr.like_ids));
+
+ if (v)
+ cout << *v << '\n';
+ }
+ else
+ fail << "unknown command '" << cmd << "'";
+
+ return 0;
+ }
+ catch (const failed&)
+ {
+ return 1;
+ }
+}
+
+int
+main (int argc, char* argv[])
+{
+ return bpkg::main (argc, argv);
+}
diff --git a/bpkg/system-package-manager.test.hxx b/bpkg/system-package-manager.test.hxx
new file mode 100644
index 0000000..688eb72
--- /dev/null
+++ b/bpkg/system-package-manager.test.hxx
@@ -0,0 +1,112 @@
+// file : bpkg/system-package-manager.test.hxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#ifndef BPKG_SYSTEM_PACKAGE_MANAGER_TEST_HXX
+#define BPKG_SYSTEM_PACKAGE_MANAGER_TEST_HXX
+
+#include <bpkg/system-package-manager.hxx>
+
+#include <algorithm> // sort()
+
+#include <bpkg/types.hxx>
+#include <bpkg/utility.hxx>
+
+#include <libbutl/manifest-parser.hxx>
+
+#include <libbpkg/manifest.hxx>
+
+#include <bpkg/package.hxx>
+
+namespace bpkg
+{
+ // Parse the manifest as if it comes from a git repository with a single
+ // package and make an available package out of it. If the file name is
+ // `-` then read fro stdin. If the package name is empty, then take the
+ // name from the manifest. Otherwise, assert they match.
+ //
+ inline
+ pair<shared_ptr<available_package>, lazy_shared_ptr<repository_fragment>>
+ make_available_from_manifest (const string& pn, const string& f)
+ {
+ using butl::manifest_parser;
+ using butl::manifest_parsing;
+
+ path fp (f);
+ path_name fn (fp);
+
+ try
+ {
+ ifdstream ifds;
+ istream& ifs (butl::open_file_or_stdin (fn, ifds));
+
+ manifest_parser mp (ifs, fn.name ? *fn.name : fn.path->string ());
+
+ package_manifest m (mp,
+ false /* ignore_unknown */,
+ true /* complete_values */);
+
+ const string& n (m.name.string ());
+ assert (pn.empty () || n == pn);
+
+ m.alt_naming = false;
+ m.bootstrap_build = "project = " + n + '\n';
+
+ shared_ptr<available_package> ap (
+ make_shared<available_package> (move (m)));
+
+ lazy_shared_ptr<repository_fragment> af (
+ make_shared<repository_fragment> (
+ repository_location ("https://example.com/" + n,
+ repository_type::git)));
+
+ ap->locations.push_back (package_location {af, current_dir});
+
+ return make_pair (move (ap), move (af));
+ }
+ catch (const manifest_parsing& e)
+ {
+ fail (e.name, e.line, e.column) << e.description << endf;
+ }
+ catch (const io_error& e)
+ {
+ fail << "unable to read from " << fn << ": " << e << endf;
+ }
+ }
+
+ // Make an available stub package as if it comes from git repository with
+ // a single package.
+ //
+ inline
+ pair<shared_ptr<available_package>, lazy_shared_ptr<repository_fragment>>
+ make_available_stub (const string& n)
+ {
+ shared_ptr<available_package> ap (
+ make_shared<available_package> (package_name (n)));
+
+ lazy_shared_ptr<repository_fragment> af (
+ make_shared<repository_fragment> (
+ repository_location ("https://example.com/" + n,
+ repository_type::git)));
+
+ ap->locations.push_back (package_location {af, current_dir});
+
+ return make_pair (move (ap), move (af));
+ }
+
+ // Sort available packages in the version descending order.
+ //
+ inline void
+ sort_available (available_packages& aps)
+ {
+ using element_type =
+ pair<shared_ptr<available_package>, lazy_shared_ptr<repository_fragment>>;
+
+ std::sort (aps.begin (), aps.end (),
+ [] (const element_type& x, const element_type& y)
+ {
+ return x.first->version > y.first->version;
+ });
+ }
+}
+
+#endif // BPKG_SYSTEM_PACKAGE_MANAGER_TEST_HXX
diff --git a/bpkg/system-package-manager.test.testscript b/bpkg/system-package-manager.test.testscript
new file mode 100644
index 0000000..74c6ad2
--- /dev/null
+++ b/bpkg/system-package-manager.test.testscript
@@ -0,0 +1,158 @@
+# file : bpkg/system-package-manager.test.testscript
+# license : MIT; see accompanying LICENSE file
+
+: system-package-names
+:
+{
+ test.arguments += system-package-names
+
+ : basics
+ :
+ cat <<EOI >=libcurl7.64.manifest;
+ : 1
+ name: libcurl
+ version: 7.64.0
+ debian-name: libcurl2 libcurl2-dev
+ summary: curl
+ license: curl
+ EOI
+ cat <<EOI >=libcurl7.84.manifest;
+ : 1
+ name: libcurl
+ version: 7.84.0
+ debian_9-name: libcurl2 libcurl2-dev libcurl2-doc
+ debian_10-name: libcurl4 libcurl4-openssl-dev
+ debian_10-name: libcurl3-gnutls libcurl4-gnutls-dev
+ summary: curl
+ license: curl
+ EOI
+
+ $* debian 10 -- libcurl libcurl7.64.manifest libcurl7.84.manifest >>EOO;
+ libcurl4 libcurl4-openssl-dev
+ libcurl3-gnutls libcurl4-gnutls-dev
+ libcurl2 libcurl2-dev libcurl2-doc
+ libcurl2 libcurl2-dev
+ EOO
+ $* debian 9 -- libcurl libcurl7.64.manifest libcurl7.84.manifest >>EOO;
+ libcurl2 libcurl2-dev libcurl2-doc
+ libcurl2 libcurl2-dev
+ EOO
+ $* debian '' -- libcurl libcurl7.64.manifest libcurl7.84.manifest >>EOO;
+ libcurl2 libcurl2-dev
+ EOO
+ $* ubuntu 16.04 debian -- libcurl libcurl7.64.manifest libcurl7.84.manifest >>EOO
+ libcurl2 libcurl2-dev
+ EOO
+
+ : native
+ :
+ cat <<EOI >=libcurl.manifest;
+ : 1
+ name: libcurl
+ version: 7.84.0
+ debian-name: libcurl4 libcurl4-openssl-dev
+ debian_0-name: libcurl libcurl-dev
+ summary: curl
+ license: curl
+ EOI
+ $* debian 10 -- libcurl libcurl.manifest >>EOO;
+ libcurl4 libcurl4-openssl-dev
+ EOO
+ $* debian 10 -- --non-native libcurl libcurl.manifest >>EOO
+ libcurl libcurl-dev
+ EOO
+}
+
+: system-package-version
+:
+{
+ test.arguments += system-package-version
+
+ : basics
+ :
+ cat <<EOI >=libssl1.1.1+19.manifest;
+ : 1
+ name: libssl
+ version: 1.1.1+19
+ fedora-name: openssl-libs
+ fedora-version: 1:1.1.1q-1
+ fedora_35-version: 1:1.1.1q-1.fc35
+ fedora_36-version: 1:1.1.1q-1.fc36
+ summary: openssl
+ license: openssl
+ EOI
+
+ $* fedora 34 -- libssl libssl1.1.1+19.manifest >>EOO;
+ 1:1.1.1q-1
+ EOO
+ $* fedora 35 -- libssl libssl1.1.1+19.manifest >>EOO;
+ 1:1.1.1q-1.fc35
+ EOO
+ $* fedora 36 -- libssl libssl1.1.1+19.manifest >>EOO;
+ 1:1.1.1q-1.fc36
+ EOO
+ $* fedora 37 -- libssl libssl1.1.1+19.manifest >>EOO;
+ 1:1.1.1q-1.fc36
+ EOO
+ $* fedora '' -- libssl libssl1.1.1+19.manifest >>EOO;
+ 1:1.1.1q-1
+ EOO
+ $* rhel 7.8 fedora -- libssl libssl1.1.1+19.manifest >>EOO
+ 1:1.1.1q-1
+ EOO
+}
+
+: downstream-package-version
+:
+{
+ test.arguments += downstream-package-version
+
+ : basics
+ :
+ cat <<EOI >=libssl1.manifest;
+ : 1
+ name: libssl
+ version: 1.1.1
+ upstream-version: 1.1.1n
+ debian-to-downstream-version: /1\.1\.1[a-z]/1.1.1/
+ summary: openssl
+ license: openssl
+ EOI
+ cat <<EOI >=libssl3.manifest;
+ : 1
+ name: libssl
+ version: 3.0.0
+ debian-to-downstream-version: /([3-9])\.([0-9]+)\.([0-9]+)/\1.\2.\3/
+ summary: openssl
+ license: openssl
+ EOI
+ $* debian 10 -- 1.1.1l libssl libssl1.manifest libssl3.manifest >'1.1.1';
+ $* debian 10 -- 3.0.7 libssl libssl1.manifest libssl3.manifest >'3.0.7';
+ $* debian '' -- 1.1.1l libssl libssl1.manifest libssl3.manifest >'1.1.1';
+ $* debian '' -- 3.0.7 libssl libssl1.manifest libssl3.manifest >'3.0.7';
+ $* ubuntu 16.04 debian -- 1.1.1l libssl libssl1.manifest libssl3.manifest >'1.1.1';
+ $* ubuntu 16.05 debian -- 3.0.7 libssl libssl1.manifest libssl3.manifest >'3.0.7'
+
+ : order
+ :
+ cat <<EOI >=libssl1.manifest;
+ : 1
+ name: libssl
+ version: 1.1.1
+ debian-to-downstream-version: /.*/0/
+ summary: openssl
+ license: openssl
+ EOI
+ cat <<EOI >=libssl3.manifest;
+ : 1
+ name: libssl
+ version: 3.0.0
+ debian_9-to-downstream-version: /.*/9/
+ debian_10-to-downstream-version: /.*/10/
+ summary: openssl
+ license: openssl
+ EOI
+ $* debian 10 -- 1 libssl libssl1.manifest libssl3.manifest >'10';
+ $* debian 9 -- 1 libssl libssl1.manifest libssl3.manifest >'9';
+ $* debian 8 -- 1 libssl libssl1.manifest libssl3.manifest >'0'
+}
diff --git a/bpkg/system-repository.cxx b/bpkg/system-repository.cxx
index de4e61e..c308ddb 100644
--- a/bpkg/system-repository.cxx
+++ b/bpkg/system-repository.cxx
@@ -5,12 +5,13 @@
namespace bpkg
{
- system_repository_type system_repository;
-
- const version& system_repository_type::
- insert (const package_name& name, const version& v, bool authoritative)
+ const version& system_repository::
+ insert (const package_name& name,
+ const version& v,
+ bool authoritative,
+ const system_package_status* s)
{
- auto p (map_.emplace (name, system_package {v, authoritative}));
+ auto p (map_.emplace (name, system_package {v, authoritative, s}));
if (!p.second)
{
@@ -24,6 +25,7 @@ namespace bpkg
{
sp.authoritative = authoritative;
sp.version = v;
+ sp.system_status = s;
}
}
diff --git a/bpkg/system-repository.hxx b/bpkg/system-repository.hxx
index 1168ec0..d524ee4 100644
--- a/bpkg/system-repository.hxx
+++ b/bpkg/system-repository.hxx
@@ -14,9 +14,11 @@
namespace bpkg
{
+ struct system_package_status; // <bpkg/system-package-manager.hxx>
+
// A map of discovered system package versions. The information can be
// authoritative (i.e., it was provided by the user or auto-discovered
- // on this run) or non-authoritative (i.e., comes from selected_packages
+ // on this run) or non-authoritative (i.e., comes from selected packages
// that are present in the database; in a sence it was authoritative but
// on some previous run.
//
@@ -30,16 +32,25 @@ namespace bpkg
version_type version;
bool authoritative;
+
+ // If the information is authoritative then this member indicates whether
+ // the version came from the system package manager (not NULL) or
+ // user/fallback (NULL).
+ //
+ const system_package_status* system_status;
};
- class system_repository_type
+ class system_repository
{
public:
const version&
- insert (const package_name& name, const version&, bool authoritative);
+ insert (const package_name& name,
+ const version&,
+ bool authoritative,
+ const system_package_status* = nullptr);
const system_package*
- find (const package_name& name)
+ find (const package_name& name) const
{
auto i (map_.find (name));
return i != map_.end () ? &i->second : nullptr;
@@ -48,8 +59,6 @@ namespace bpkg
private:
std::map<package_name, system_package> map_;
};
-
- extern system_repository_type system_repository;
}
#endif // BPKG_SYSTEM_REPOSITORY_HXX
diff --git a/bpkg/types-parsers.cxx b/bpkg/types-parsers.cxx
index be95219..f23751d 100644
--- a/bpkg/types-parsers.cxx
+++ b/bpkg/types-parsers.cxx
@@ -3,6 +3,8 @@
#include <bpkg/types-parsers.hxx>
+#include <libbpkg/manifest.hxx>
+
namespace bpkg
{
namespace cli
@@ -67,6 +69,60 @@ namespace bpkg
parse_path (x, s);
}
+ void parser<uuid>::
+ parse (uuid& x, bool& xs, scanner& s)
+ {
+ xs = true;
+
+ const char* o (s.next ());
+
+ if (!s.more ())
+ throw missing_value (o);
+
+ const char* v (s.next ());
+
+ try
+ {
+ x = uuid (v);
+
+ if (x.nil ())
+ throw invalid_value (o, v);
+ }
+ catch (const invalid_argument&)
+ {
+ throw invalid_value (o, v);
+ }
+ }
+
+ void parser<butl::standard_version>::
+ parse (butl::standard_version& x, bool& xs, scanner& s)
+ {
+ using butl::standard_version;
+
+ xs = true;
+
+ const char* o (s.next ());
+
+ if (!s.more ())
+ throw missing_value (o);
+
+ const char* v (s.next ());
+
+ try
+ {
+ // Note that we allow all kinds of versions, so that the caller can
+ // restrict them as they wish after the parsing.
+ //
+ x = standard_version (v,
+ standard_version::allow_earliest |
+ standard_version::allow_stub);
+ }
+ catch (const invalid_argument& e)
+ {
+ throw invalid_value (o, v, e.what ());
+ }
+ }
+
void parser<auth>::
parse (auth& x, bool& xs, scanner& s)
{
@@ -87,6 +143,101 @@ namespace bpkg
throw invalid_value (o, v);
}
+ void parser<git_protocol_capabilities>::
+ parse (git_protocol_capabilities& x, bool& xs, scanner& s)
+ {
+ xs = true;
+ const char* o (s.next ());
+
+ if (!s.more ())
+ throw missing_value (o);
+
+ const string v (s.next ());
+ if (v == "dumb")
+ x = git_protocol_capabilities::dumb;
+ else if (v == "smart")
+ x = git_protocol_capabilities::smart;
+ else if (v == "unadv")
+ x = git_protocol_capabilities::unadv;
+ else
+ throw invalid_value (o, v);
+ }
+
+ void parser<git_capabilities_map>::
+ parse (git_capabilities_map& x, bool& xs, scanner& s)
+ {
+ xs = true;
+ const char* o (s.next ());
+
+ if (!s.more ())
+ throw missing_value (o);
+
+ string v (s.next ());
+ size_t p (v.rfind ('='));
+
+ if (p == string::npos)
+ throw invalid_value (o, v);
+
+ string k (v, 0, p);
+
+ // Verify that the key is a valid remote git repository URL prefix.
+ //
+ try
+ {
+ repository_url u (k);
+
+ if (u.scheme == repository_protocol::file)
+ throw invalid_value (o, k, "local repository location");
+ }
+ catch (const invalid_argument& e)
+ {
+ throw invalid_value (o, k, e.what ());
+ }
+
+ // Parse the protocol capabilities value.
+ //
+ int ac (2);
+ char* av[] = {const_cast<char*> (o),
+ const_cast<char*> (v.c_str () + p + 1)};
+
+ argv_scanner vs (0, ac, av);
+
+ bool dummy;
+ parser<git_protocol_capabilities>::parse (x[k], dummy, vs);
+ }
+
+ void parser<git_capabilities_map>::
+ merge (git_capabilities_map& b, const git_capabilities_map& a)
+ {
+ for (const auto& o: a)
+ {
+ auto i (b.find (o.first));
+
+ if (i != b.end ())
+ i->second = o.second;
+ else
+ b.emplace (o.first, o.second);
+ }
+ }
+
+ void parser<stdout_format>::
+ parse (stdout_format& x, bool& xs, scanner& s)
+ {
+ xs = true;
+ const char* o (s.next ());
+
+ if (!s.more ())
+ throw missing_value (o);
+
+ const string v (s.next ());
+ if (v == "lines")
+ x = stdout_format::lines;
+ else if (v == "json")
+ x = stdout_format::json;
+ else
+ throw invalid_value (o, v);
+ }
+
void parser<repository_type>::
parse (repository_type& x, bool& xs, scanner& s)
{
diff --git a/bpkg/types-parsers.hxx b/bpkg/types-parsers.hxx
index 38b7cee..7bbb414 100644
--- a/bpkg/types-parsers.hxx
+++ b/bpkg/types-parsers.hxx
@@ -7,6 +7,8 @@
#ifndef BPKG_TYPES_PARSERS_HXX
#define BPKG_TYPES_PARSERS_HXX
+#include <libbutl/standard-version.hxx>
+
#include <libbpkg/manifest.hxx>
#include <bpkg/types.hxx>
@@ -49,6 +51,29 @@ namespace bpkg
};
template <>
+ struct parser<uuid>
+ {
+ static void
+ parse (uuid&, bool&, scanner&);
+
+ static void
+ merge (uuid& b, const uuid& a) {b = a;}
+ };
+
+ template <>
+ struct parser<butl::standard_version>
+ {
+ static void
+ parse (butl::standard_version&, bool&, scanner&);
+
+ static void
+ merge (butl::standard_version& b, const butl::standard_version& a)
+ {
+ b = a;
+ }
+ };
+
+ template <>
struct parser<auth>
{
static void
@@ -59,6 +84,39 @@ namespace bpkg
};
template <>
+ struct parser<git_protocol_capabilities>
+ {
+ static void
+ parse (git_protocol_capabilities&, bool&, scanner&);
+
+ static void
+ merge (git_protocol_capabilities& b, const git_protocol_capabilities& a)
+ {
+ b = a;
+ }
+ };
+
+ template <>
+ struct parser<git_capabilities_map>
+ {
+ static void
+ parse (git_capabilities_map&, bool&, scanner&);
+
+ static void
+ merge (git_capabilities_map&, const git_capabilities_map&);
+ };
+
+ template <>
+ struct parser<stdout_format>
+ {
+ static void
+ parse (stdout_format&, bool&, scanner&);
+
+ static void
+ merge (stdout_format& b, const stdout_format& a) {b = a;}
+ };
+
+ template <>
struct parser<repository_type>
{
static void
diff --git a/bpkg/types.hxx b/bpkg/types.hxx
index 65dba60..80e5a7d 100644
--- a/bpkg/types.hxx
+++ b/bpkg/types.hxx
@@ -21,15 +21,20 @@
#include <odb/lazy-ptr.hxx>
-#include <libbutl/url.mxx>
-#include <libbutl/path.mxx>
-#include <libbutl/process.mxx>
-#include <libbutl/utility.mxx> // icase_compare_string,
+#include <libbutl/b.hxx>
+#include <libbutl/url.hxx>
+#include <libbutl/path.hxx>
+#include <libbutl/uuid.hxx>
+#include <libbutl/uuid-io.hxx>
+#include <libbutl/sha256.hxx>
+#include <libbutl/process.hxx>
+#include <libbutl/utility.hxx> // icase_compare_string,
// compare_reference_target
-#include <libbutl/optional.mxx>
-#include <libbutl/fdstream.mxx>
-#include <libbutl/small-vector.mxx>
-#include <libbutl/default-options.mxx>
+#include <libbutl/optional.hxx>
+#include <libbutl/fdstream.hxx>
+#include <libbutl/small-vector.hxx>
+#include <libbutl/target-triplet.hxx>
+#include <libbutl/default-options.hxx>
namespace bpkg
{
@@ -53,7 +58,7 @@ namespace bpkg
using std::weak_ptr;
using std::vector;
- using butl::small_vector; // <libbutl/small-vector.mxx>
+ using butl::small_vector; // <libbutl/small-vector.hxx>
using strings = vector<string>;
using cstrings = vector<const char*>;
@@ -70,38 +75,45 @@ namespace bpkg
using std::system_error;
using io_error = std::ios_base::failure;
- // <libbutl/utility.mxx>
+ // <libbutl/utility.hxx>
//
using butl::icase_compare_string;
using butl::compare_reference_target;
- // <libbutl/optional.mxx>
+ // <libbutl/optional.hxx>
//
using butl::optional;
using butl::nullopt;
- // ODB smart pointers.
- //
- using odb::lazy_shared_ptr;
- using odb::lazy_weak_ptr;
-
- // <libbutl/path.mxx>
+ // <libbutl/path.hxx>
//
using butl::path;
+ using butl::path_name;
+ using butl::path_name_view;
using butl::dir_path;
using butl::basic_path;
using butl::invalid_path;
using butl::path_cast;
- using paths = std::vector<path>;
- using dir_paths = std::vector<dir_path>;
+ using paths = vector<path>;
+ using dir_paths = vector<dir_path>;
+
+ // <libbutl/uuid.hxx>
+ //
+ using butl::uuid;
- // <libbutl/url.mxx>
+ // <libbutl/url.hxx>
//
using butl::url;
- // <libbutl/process.mxx>
+ // <libbutl/sha256.hxx>
+ //
+ using butl::sha256;
+ using butl::sha256_to_fingerprint;
+ using butl::fingerprint_to_sha256;
+
+ // <libbutl/process.hxx>
//
using butl::process;
using butl::process_env;
@@ -109,7 +121,7 @@ namespace bpkg
using butl::process_exit;
using butl::process_error;
- // <libbutl/fdstream.mxx>
+ // <libbutl/fdstream.hxx>
//
using butl::auto_fd;
using butl::nullfd;
@@ -118,11 +130,94 @@ namespace bpkg
using butl::ofdstream;
using butl::fdstream_mode;
- // <libbutl/default-options.mxx>
+ // <libbutl/target-triplet.hxx>
+ //
+ using butl::target_triplet;
+
+ // <libbutl/default-options.hxx>
//
using butl::default_options_files;
using butl::default_options_entry;
using butl::default_options;
+
+ // <libbutl/b.hxx>
+ //
+ using package_info = butl::b_project_info;
+
+ // Derive from ODB smart pointers to return derived database (note that the
+ // database() functions are defined in database.hxx).
+ //
+ class database;
+
+ template <class T>
+ class lazy_shared_ptr: public odb::lazy_shared_ptr<T>
+ {
+ public:
+ using base_type = odb::lazy_shared_ptr<T>;
+
+ using base_type::base_type;
+
+ explicit
+ lazy_shared_ptr (base_type&& p): base_type (move (p)) {}
+
+ lazy_shared_ptr () = default;
+
+ bpkg::database&
+ database () const;
+ };
+
+ template <class T>
+ class lazy_weak_ptr: public odb::lazy_weak_ptr<T>
+ {
+ public:
+ using base_type = odb::lazy_weak_ptr<T>;
+
+ using base_type::base_type;
+
+ bpkg::database&
+ database () const;
+
+ lazy_shared_ptr<T>
+ lock () const
+ {
+ return lazy_shared_ptr<T> (base_type::lock ());
+ }
+ };
+
+ struct compare_lazy_ptr
+ {
+ template <typename P>
+ bool
+ operator() (const P& x, const P& y) const
+ {
+ // See operator==(database, database).
+ //
+ return x.object_id () != y.object_id ()
+ ? (x.object_id () < y.object_id ())
+ : less (static_cast<typename P::base_type> (x).database (),
+ static_cast<typename P::base_type> (y).database ());
+ }
+
+ private:
+ // Defined in database.cxx.
+ //
+ bool
+ less (const odb::database&, const odb::database&) const;
+ };
+
+ // Compare two lazy pointers via the pointed-to object ids.
+ //
+ struct compare_lazy_ptr_id
+ {
+ template <typename P>
+ bool
+ operator() (const P& x, const P& y) const
+ {
+ // Note: ignoring database is intentional.
+ //
+ return x.object_id () < y.object_id ();
+ }
+ };
}
// In order to be found (via ADL) these have to be either in std:: or in
@@ -140,6 +235,14 @@ namespace std
::butl::path::traits_type::canonicalize (r);
return os << r;
}
+
+ inline ostream&
+ operator<< (ostream& os, const ::butl::path_name_view& v)
+ {
+ assert (!v.empty ());
+
+ return v.name != nullptr && *v.name ? (os << **v.name) : (os << *v.path);
+ }
}
#endif // BPKG_TYPES_HXX
diff --git a/bpkg/utility.cxx b/bpkg/utility.cxx
index ef61870..d084b76 100644
--- a/bpkg/utility.cxx
+++ b/bpkg/utility.cxx
@@ -3,8 +3,8 @@
#include <bpkg/utility.hxx>
-#include <libbutl/prompt.mxx>
-#include <libbutl/fdstream.mxx>
+#include <libbutl/prompt.hxx>
+#include <libbutl/fdstream.hxx>
#include <bpkg/diagnostics.hxx>
#include <bpkg/common-options.hxx>
@@ -26,22 +26,48 @@ namespace bpkg
const dir_path certs_dir (dir_path (bpkg_dir) /= "certs");
const dir_path repos_dir (dir_path (bpkg_dir) /= "repos");
+ // Standard and alternative build file/directory naming schemes.
+ //
+ // build:
+ //
+ const dir_path std_build_dir ("build");
+ const dir_path std_config_dir (dir_path (std_build_dir) /= "config");
+ const path std_bootstrap_file (dir_path (std_build_dir) /= "bootstrap.build");
+ const path std_root_file (dir_path (std_build_dir) /= "root.build");
+ const string std_build_ext ("build");
+
+ // build2:
+ //
+ const dir_path alt_build_dir ("build2");
+ const dir_path alt_config_dir (dir_path (alt_build_dir) /= "config");
+ const path alt_bootstrap_file (dir_path (alt_build_dir) /= "bootstrap.build2");
+ const path alt_root_file (dir_path (alt_build_dir) /= "root.build2");
+ const string alt_build_ext ("build2");
+
const dir_path current_dir (".");
- dir_path temp_dir;
+ const target_triplet host_triplet (BPKG_HOST_TRIPLET);
+
+ map<dir_path, dir_path> tmp_dirs;
+
+ bool keep_tmp;
auto_rmfile
- tmp_file (const string& p)
+ tmp_file (const dir_path& cfg, const string& p)
{
- assert (!temp_dir.empty ());
- return auto_rmfile (temp_dir / path::traits_type::temp_name (p));
+ auto i (tmp_dirs.find (cfg));
+ assert (i != tmp_dirs.end ());
+ return auto_rmfile (i->second / path::traits_type::temp_name (p),
+ !keep_tmp);
}
auto_rmdir
- tmp_dir (const string& p)
+ tmp_dir (const dir_path& cfg, const string& p)
{
- assert (!temp_dir.empty ());
- return auto_rmdir (temp_dir / dir_path (path::traits_type::temp_name (p)));
+ auto i (tmp_dirs.find (cfg));
+ assert (i != tmp_dirs.end ());
+ return auto_rmdir (i->second / dir_path (path::traits_type::temp_name (p)),
+ !keep_tmp);
}
void
@@ -62,21 +88,26 @@ namespace bpkg
mk (d); // We shouldn't need mk_p().
- temp_dir = move (d);
+ tmp_dirs[cfg] = move (d);
}
void
clean_tmp (bool ignore_error)
{
- if (!temp_dir.empty () && exists (temp_dir))
+ for (const auto& d: tmp_dirs)
{
- rm_r (temp_dir,
- true /* dir_itself */,
- 3,
- ignore_error ? rm_error_mode::ignore : rm_error_mode::fail);
-
- temp_dir.clear ();
+ const dir_path& td (d.second);
+
+ if (exists (td))
+ {
+ rm_r (td,
+ true /* dir_itself */,
+ 3,
+ ignore_error ? rm_error_mode::ignore : rm_error_mode::fail);
+ }
}
+
+ tmp_dirs.clear ();
}
path&
@@ -84,7 +115,8 @@ namespace bpkg
{
try
{
- f.complete ().normalize ();
+ if (!f.complete ().normalized ())
+ f.normalize ();
}
catch (const invalid_path& e)
{
@@ -103,7 +135,8 @@ namespace bpkg
{
try
{
- d.complete ().normalize ();
+ if (!d.complete ().normalized ())
+ d.normalize ();
}
catch (const invalid_path& e)
{
@@ -117,7 +150,21 @@ namespace bpkg
return d;
}
- bool stderr_term;
+ dir_path
+ current_directory ()
+ {
+ try
+ {
+ return dir_path::current_directory ();
+ }
+ catch (const system_error& e)
+ {
+ fail << "unable to obtain current directory: " << e << endf;
+ }
+ }
+
+ optional<const char*> stderr_term = nullopt;
+ bool stderr_term_color = false;
bool
yn_prompt (const string& p, char d)
@@ -242,8 +289,8 @@ namespace bpkg
}
}
- void
- mv (const dir_path& from, const dir_path& to)
+ bool
+ mv (const dir_path& from, const dir_path& to, bool ie)
{
if (verb >= 3)
text << "mv " << from << ' ' << to; // Prints trailing slashes.
@@ -254,8 +301,40 @@ namespace bpkg
}
catch (const system_error& e)
{
- fail << "unable to move directory " << from << " to " << to << ": " << e;
+ error << "unable to move directory " << from << " to " << to << ": "
+ << e;
+
+ if (ie)
+ return false;
+
+ throw failed ();
}
+
+ return true;
+ }
+
+ bool
+ mv (const path& from, const path& to, bool ie)
+ {
+ if (verb >= 3)
+ text << "mv " << from << ' ' << to;
+
+ try
+ {
+ mvfile (from, to,
+ cpflags::overwrite_content | cpflags::overwrite_permissions);
+ }
+ catch (const system_error& e)
+ {
+ error << "unable to move file " << from << " to " << to << ": " << e;
+
+ if (ie)
+ return false;
+
+ throw failed ();
+ }
+
+ return true;
}
dir_path
@@ -313,4 +392,43 @@ namespace bpkg
? co.build ().string ().c_str ()
: BPKG_EXE_PREFIX "b" BPKG_EXE_SUFFIX;
}
+
+ process_path
+ search_b (const common_options& co)
+ {
+ const char* b (name_b (co));
+
+ try
+ {
+ // Use our executable directory as a fallback search since normally the
+ // entire toolchain is installed into one directory. This way, for
+ // example, if we installed into /opt/build2 and run bpkg with absolute
+ // path (and without PATH), then bpkg will be able to find "its" b.
+ //
+ return process::path_search (b, true /* init */, exec_dir);
+ }
+ catch (const process_error& e)
+ {
+ fail << "unable to execute " << b << ": " << e << endf;
+ }
+ }
+
+ void
+ dump_stderr (auto_fd&& fd)
+ {
+ ifdstream is (move (fd), fdstream_mode::skip, ifdstream::badbit);
+
+ // We could probably write something like this, instead:
+ //
+ // *diag_stream << is.rdbuf () << flush;
+ //
+ // However, it would never throw and we could potentially miss the reading
+ // failure, unless we decide to additionally mess with the diagnostics
+ // stream exception mask.
+ //
+ for (string l; !eof (getline (is, l)); )
+ *diag_stream << l << endl;
+
+ is.close ();
+ }
}
diff --git a/bpkg/utility.hxx b/bpkg/utility.hxx
index 4360118..7a51948 100644
--- a/bpkg/utility.hxx
+++ b/bpkg/utility.hxx
@@ -4,20 +4,21 @@
#ifndef BPKG_UTILITY_HXX
#define BPKG_UTILITY_HXX
+#include <map>
#include <memory> // make_shared()
#include <string> // to_string()
#include <cstring> // strcmp(), strchr()
#include <utility> // move(), forward(), declval(), make_pair()
#include <cassert> // assert()
-#include <iterator> // make_move_iterator()
+#include <iterator> // make_move_iterator(), back_inserter()
#include <algorithm> // *
#include <libbutl/ft/lang.hxx>
-#include <libbutl/utility.mxx> // icasecmp(), reverse_iterate(), etc
-#include <libbutl/process.mxx>
-#include <libbutl/filesystem.mxx>
-#include <libbutl/default-options.mxx>
+#include <libbutl/utility.hxx> // icasecmp(), reverse_iterate(), etc
+#include <libbutl/process.hxx>
+#include <libbutl/filesystem.hxx>
+#include <libbutl/default-options.hxx>
#include <bpkg/types.hxx>
#include <bpkg/version.hxx>
@@ -32,16 +33,27 @@ namespace bpkg
using std::make_pair;
using std::make_shared;
using std::make_move_iterator;
+ using std::back_inserter;
using std::to_string;
using std::strcmp;
using std::strchr;
- // <libbutl/utility.mxx>
+ // <libbutl/utility.hxx>
//
using butl::icasecmp;
using butl::reverse_iterate;
+ using butl::alpha;
+ using butl::alnum;
+ using butl::digit;
+ using butl::xdigit;
+
+ using butl::trim;
+ using butl::trim_left;
+ using butl::trim_right;
+ using butl::next_word;
+
using butl::make_guard;
using butl::make_exception_guard;
@@ -49,16 +61,19 @@ namespace bpkg
using butl::setenv;
using butl::unsetenv;
- // <libbutl/process.mxx>
+ using butl::eof;
+
+ // <libbutl/process.hxx>
//
using butl::process_start_callback;
+ using butl::process_print_callback;
- // <libbutl/filesystem.mxx>
+ // <libbutl/filesystem.hxx>
//
using butl::auto_rmfile;
using butl::auto_rmdir;
- // <libbutl/default-options.mxx>
+ // <libbutl/default-options.hxx>
//
using butl::load_default_options;
using butl::merge_default_options;
@@ -71,26 +86,46 @@ namespace bpkg
// Widely-used paths.
//
- extern const dir_path bpkg_dir; // .bpkg/
- extern const dir_path certs_dir; // .bpkg/certs/
- extern const dir_path repos_dir; // .bpkg/repos/
- extern const dir_path current_dir; // ./
+ extern const dir_path bpkg_dir; // .bpkg/
+ extern const dir_path certs_dir; // .bpkg/certs/
+ extern const dir_path repos_dir; // .bpkg/repos/
+
+ extern const dir_path std_build_dir; // build/
+ extern const dir_path std_config_dir; // build/config/
+ extern const path std_bootstrap_file; // build/bootstrap.build
+ extern const path std_root_file; // build/root.build
+ extern const string std_build_ext; // build
+
+ extern const dir_path alt_build_dir; // build2/
+ extern const dir_path alt_config_dir; // build2/config/
+ extern const path alt_bootstrap_file; // build2/bootstrap.build2
+ extern const path alt_root_file; // build2/root.build2
+ extern const string alt_build_ext; // build2
+
+ extern const dir_path current_dir; // ./
+
+ // Host target triplet for which we were built.
+ //
+ extern const target_triplet host_triplet;
// Temporary directory facility.
//
- // This is normally .bpkg/tmp/ but can also be some system-wide directory
- // (e.g., /tmp/bpkg-XXX/) if there is no bpkg configuration. This directory
- // is automatically created and cleaned up for most commands in main() so
- // you don't need to call init_tmp() explicitly except for certain special
- // commands (like cfg-create).
+ // An entry normally maps <cfg-dir> to <cfg-dir>/.bpkg/tmp/ but can also map
+ // an empty directory to some system-wide directory (e.g., /tmp/bpkg-XXX/)
+ // if there is no bpkg configuration. The temporary directory for the
+ // current configuration is automatically created and cleaned up for most
+ // commands in main(), so you don't need to call init_tmp() explicitly
+ // except for certain special commands (like cfg-create).
//
- extern dir_path temp_dir;
+ extern std::map<dir_path, dir_path> tmp_dirs;
+
+ extern bool keep_tmp; // --keep-tmp
auto_rmfile
- tmp_file (const string& prefix);
+ tmp_file (const dir_path& cfg, const string& prefix);
auto_rmdir
- tmp_dir (const string& prefix);
+ tmp_dir (const dir_path& cfg, const string& prefix);
void
init_tmp (const dir_path& cfg);
@@ -123,9 +158,20 @@ namespace bpkg
return move (normalize (r, what));
}
- // Progress.
+ dir_path
+ current_directory ();
+
+ // Diagnostics.
+ //
+ // If stderr is not a terminal, then the value is absent (so can be used as
+ // bool). Otherwise, it is the value of the TERM environment variable (which
+ // can be NULL).
+ //
+ extern optional<const char*> stderr_term;
+
+ // True if the color can be used on the stderr terminal.
//
- extern bool stderr_term; // True if stderr is a terminal.
+ extern bool stderr_term_color;
// Y/N prompt. See butl::yn_prompt() for details (this is a thin wrapper).
//
@@ -163,8 +209,13 @@ namespace bpkg
uint16_t verbosity = 3,
rm_error_mode = rm_error_mode::fail);
- void
- mv (const dir_path& from, const dir_path& to);
+ // Note that if ignore_error is true, the diagnostics is still issued.
+ //
+ bool
+ mv (const dir_path& from, const dir_path& to, bool ignore_errors = false);
+
+ bool
+ mv (const path& from, const path& to, bool ignore_errors = false);
// Set (with diagnostics at verbosity level 3 or higher) the new and return
// the previous working directory.
@@ -185,11 +236,8 @@ namespace bpkg
//
extern dir_path exec_dir;
- // Run build2, mapping verbosity levels. If quiet is true, then run build2
- // quiet if our verbosity level is 1.
+ // Run build2, mapping verbosity levels.
//
- const char*
- name_b (const common_options&);
// Verbosity level 1 mapping.
//
@@ -200,6 +248,20 @@ namespace bpkg
normal // Run normally (at verbosity 1).
};
+ template <typename V>
+ void
+ map_verb_b (const common_options&, verb_b, V& args, string& verb_arg);
+
+ const char*
+ name_b (const common_options&);
+
+ process_path
+ search_b (const common_options&);
+
+ template <typename... A>
+ void
+ print_b (const common_options&, verb_b, A&&... args);
+
template <typename O, typename E, typename... A>
process
start_b (const common_options&, O&& out, E&& err, verb_b, A&&... args);
@@ -207,6 +269,12 @@ namespace bpkg
template <typename... A>
void
run_b (const common_options&, verb_b, A&&... args);
+
+ // Read out the data from the specified file descriptor and dump it to
+ // stderr. Throw io_error on the underlying OS errors.
+ //
+ void
+ dump_stderr (auto_fd&&);
}
#include <bpkg/utility.txx>
diff --git a/bpkg/utility.txx b/bpkg/utility.txx
index b2c2a3c..33bb711 100644
--- a/bpkg/utility.txx
+++ b/bpkg/utility.txx
@@ -7,6 +7,88 @@ namespace bpkg
{
// *_b()
//
+ template <typename V>
+ void
+ map_verb_b (const common_options& co, verb_b v, V& ops, string& verb_arg)
+ {
+ // Map verbosity level. If we are running quiet or at level 1,
+ // then run build2 quiet. Otherwise, run it at the same level
+ // as us.
+ //
+ bool progress (co.progress ());
+ bool no_progress (co.no_progress ());
+
+ if (verb == 0)
+ {
+ ops.push_back ("-q");
+ no_progress = false; // Already suppressed with -q.
+ }
+ else if (verb == 1)
+ {
+ // NOTE: search for verb_b usage if changing anything here.
+ //
+ if (v != verb_b::normal)
+ {
+ ops.push_back ("-q");
+
+ if (!no_progress)
+ {
+ if (v == verb_b::progress && stderr_term)
+ {
+ ops.push_back ("--progress");
+ progress = false; // The option is already added.
+ }
+ }
+ else
+ no_progress = false; // Already suppressed with -q.
+ }
+ }
+ else if (verb == 2)
+ ops.push_back ("-v");
+ else
+ {
+ verb_arg = to_string (verb);
+ ops.push_back ("--verbose");
+ ops.push_back (verb_arg.c_str ());
+ }
+
+ if (progress)
+ ops.push_back ("--progress");
+
+ if (no_progress)
+ ops.push_back ("--no-progress");
+ }
+
+ template <typename... A>
+ void
+ print_b (const common_options& co, verb_b v, A&&... args)
+ {
+ process_path pp (search_b (co));
+
+ small_vector<const char*, 1> ops;
+
+ // As in start_b() below.
+ //
+ string verb_arg;
+ map_verb_b (co, v, ops, verb_arg);
+
+ if (co.diag_color ())
+ ops.push_back ("--diag-color");
+
+ if (co.no_diag_color ())
+ ops.push_back ("--no-diag-color");
+
+ process_print_callback (
+ [] (const char* const args[], size_t n)
+ {
+ print_process (args, n);
+ },
+ pp,
+ ops,
+ co.build_option (),
+ forward<A> (args)...);
+ }
+
template <typename O, typename E, typename... A>
process
start_b (const common_options& co,
@@ -15,57 +97,27 @@ namespace bpkg
verb_b v,
A&&... args)
{
- const char* b (name_b (co));
+ process_path pp (search_b (co));
try
{
- // Use our executable directory as a fallback search since normally the
- // entire toolchain is installed into one directory. This way, for
- // example, if we installed into /opt/build2 and run bpkg with absolute
- // path (and without PATH), then bpkg will be able to find "its" b.
- //
- process_path pp (process::path_search (b, exec_dir));
-
small_vector<const char*, 1> ops;
- // Map verbosity level. If we are running quiet or at level 1,
- // then run build2 quiet. Otherwise, run it at the same level
- // as us.
+ // NOTE: see print_b() above if changing anything here.
//
- string vl;
- bool no_progress (co.no_progress ());
+ // NOTE: see custom versions in system_package_manager* if adding
+ // anything new here (search for search_b()).
- if (verb == 0)
- {
- ops.push_back ("-q");
- no_progress = false; // Already suppressed with -q.
- }
- else if (verb == 1)
- {
- if (v != verb_b::normal)
- {
- ops.push_back ("-q");
+ string verb_arg;
+ map_verb_b (co, v, ops, verb_arg);
- if (!no_progress)
- {
- if (v == verb_b::progress && stderr_term)
- ops.push_back ("--progress");
- }
- else
- no_progress = false; // Already suppressed with -q.
- }
- }
- else if (verb == 2)
- ops.push_back ("-v");
- else
- {
- vl = to_string (verb);
- ops.push_back ("--verbose");
- ops.push_back (vl.c_str ());
- }
+ // Forward our --[no]diag-color options.
+ //
+ if (co.diag_color ())
+ ops.push_back ("--diag-color");
- if (no_progress)
- ops.push_back ("--no-progress");
+ if (co.no_diag_color ())
+ ops.push_back ("--no-diag-color");
return process_start_callback (
[] (const char* const args[], size_t n)
@@ -83,7 +135,7 @@ namespace bpkg
}
catch (const process_error& e)
{
- fail << "unable to execute " << b << ": " << e << endf;
+ fail << "unable to execute " << pp.recall_string () << ": " << e << endf;
}
}
diff --git a/bpkg/wrapper-traits.hxx b/bpkg/wrapper-traits.hxx
index 0288a9d..668c171 100644
--- a/bpkg/wrapper-traits.hxx
+++ b/bpkg/wrapper-traits.hxx
@@ -4,7 +4,7 @@
#ifndef BPKG_WRAPPER_TRAITS_HXX
#define BPKG_WRAPPER_TRAITS_HXX
-#include <libbutl/optional.mxx>
+#include <libbutl/optional.hxx>
#include <odb/wrapper-traits.hxx>
diff --git a/build/root.build b/build/root.build
index 1dcb07b..4a3a866 100644
--- a/build/root.build
+++ b/build/root.build
@@ -14,9 +14,17 @@ if ($cxx.target.system == 'win32-msvc')
cxx.poptions += -D_CRT_SECURE_NO_WARNINGS -D_SCL_SECURE_NO_WARNINGS
if ($cxx.class == 'msvc')
- cxx.coptions += /wd4251 /wd4275 /wd4800 /wd4819
+ cxx.coptions += /wd4251 /wd4275 /wd4800
elif ($cxx.id == 'gcc')
- cxx.coptions += -Wno-maybe-uninitialized -Wno-free-nonheap-object # libbutl
+{
+ cxx.coptions += -Wno-maybe-uninitialized -Wno-free-nonheap-object \
+-Wno-stringop-overread # libbutl
+
+ if ($cxx.version.major >= 13)
+ cxx.coptions += -Wno-dangling-reference
+}
+elif ($cxx.id.type == 'clang' && $cxx.version.major >= 15)
+ cxx.coptions += -Wno-unqualified-std-cast-call
cxx.poptions =+ "-I$out_root" "-I$src_root"
@@ -27,3 +35,7 @@ cxx.poptions =+ "-I$out_root" "-I$src_root"
# be there unless the module is configured).
#
using? cli
+
+# Disable build2 libraries importation using the built-in path.
+#
+import.build2 = [null]
diff --git a/doc/buildfile b/doc/buildfile
index 201d41c..d1592aa 100644
--- a/doc/buildfile
+++ b/doc/buildfile
@@ -3,6 +3,9 @@
cmds = \
bpkg-cfg-create \
+bpkg-cfg-info \
+bpkg-cfg-link \
+bpkg-cfg-unlink \
bpkg-help \
bpkg-pkg-build \
bpkg-pkg-checkout \
diff --git a/doc/cli.sh b/doc/cli.sh
index 995efcc..326d63c 100755
--- a/doc/cli.sh
+++ b/doc/cli.sh
@@ -1,6 +1,6 @@
#! /usr/bin/env bash
-version=0.14.0-a.0.z
+version=0.17.0-a.0.z
trap 'exit 1' ERR
set -o errtrace # Trap in functions.
@@ -58,30 +58,40 @@ function compile ()
-v date="$date" \
-v copyright="$copyright" \
--include-base-last "${o[@]}" \
---generate-man --man-suffix .1 \
+--generate-man --man-suffix .1 --ascii-tree \
--man-prologue-file man-prologue.1 \
--man-epilogue-file man-epilogue.1 \
--link-regex '%b(#.+)?%$1%' \
--link-regex '%bpkg(#.+)?%$1%' \
+--link-regex '%#.+%%' \
../bpkg/$n.cli
}
-o="--output-prefix bpkg- --class-doc bpkg::common_options=short"
+# Need global --suppress-undocumented because of few undocumented options
+# in common.cli.
+#
+o="--suppress-undocumented --output-prefix bpkg- --class-doc bpkg::common_options=short"
# A few special cases.
#
compile "common" $o --output-suffix "-options" --class-doc bpkg::common_options=long
-compile "bpkg" $o --output-prefix "" --suppress-undocumented --class-doc bpkg::commands=short --class-doc bpkg::topics=short
+compile "bpkg" $o --output-prefix "" --class-doc bpkg::commands=short --class-doc bpkg::topics=short
+
+compile "pkg-build" $o --class-doc bpkg::pkg_build_pkg_options=exclude-base
-compile "pkg-build" $o --class-doc bpkg::pkg_build_pkg_options=exclude-base
+compile "pkg-bindist" $o \
+ --class-doc bpkg::pkg_bindist_common_options=exclude-base \
+ --class-doc bpkg::pkg_bindist_debian_options=exclude-base \
+ --class-doc bpkg::pkg_bindist_fedora_options=exclude-base \
+ --class-doc bpkg::pkg_bindist_archive_options=exclude-base
# NOTE: remember to update a similar list in buildfile and bpkg.cli as well as
# the help topics sections in bpkg/buildfile and help.cxx.
#
-pages="cfg-create help pkg-clean pkg-configure pkg-disfigure \
-pkg-drop pkg-fetch pkg-checkout pkg-install pkg-purge pkg-status pkg-test \
-pkg-uninstall pkg-unpack pkg-update pkg-verify rep-add rep-remove rep-list \
-rep-create rep-fetch rep-info repository-signing repository-types \
+pages="cfg-create cfg-info cfg-link cfg-unlink help pkg-clean pkg-configure \
+pkg-disfigure pkg-drop pkg-fetch pkg-checkout pkg-install pkg-purge pkg-status \
+pkg-test pkg-uninstall pkg-unpack pkg-update pkg-verify rep-add rep-remove \
+rep-list rep-create rep-fetch rep-info repository-signing repository-types \
argument-grouping default-options-files"
for p in $pages; do
@@ -90,6 +100,9 @@ done
# Manual.
#
+
+# @@ Note that we now have --ascii-tree CLI option.
+#
function xhtml_to_ps () # <from> <to> [<html2ps-options>]
{
local from="$1"
@@ -110,6 +123,7 @@ cli -I .. \
--html-epilogue-file doc-epilogue.xhtml \
--link-regex '%b([-.].+)%../../build2/doc/b$1%' \
--link-regex '%b(#.+)?%../../build2/doc/build2-build-system-manual.xhtml$1%' \
+--link-regex '%bbot(#.+)?%../../bbot/doc/build2-build-bot-manual.xhtml$1%' \
--output-prefix build2-package-manager- \
manual.cli
diff --git a/doc/manual.cli b/doc/manual.cli
index a225cbd..e7e61c0 100644
--- a/doc/manual.cli
+++ b/doc/manual.cli
@@ -246,10 +246,10 @@ operators, shortcut (to range) operators, or ranges and has the following
form:
\
-<version-constraint> := <comparison> | <shortcut> | <range>
-<comparison> := ('==' | '>' | '<' | '>=' | '<=') <version>
-<shortcut> := ('^' | '~') <version>
-<range> := ('(' | '[') <version> <version> (')' | ']')
+<version-constraint> = <comparison> | <shortcut> | <range>
+<comparison> = ('==' | '>' | '<' | '>=' | '<=') <version>
+<shortcut> = ('^' | '~') <version>
+<range> = ('(' | '[') <version> <version> (')' | ']')
\
The shortcut operators can only be used with \l{b#module-version standard
@@ -297,6 +297,436 @@ operators. While it is possible that the original manifest specified equality
or shortucts as full ranges, it is acceptable to display/serialize them as
simpler operators.|
+Instead of a concrete value, the version in the constraint can be specified in
+terms of the dependent package's version (that is, the version of the package
+placing the constraint) using the special \c{$} value. For example:
+
+\
+libfoo == $
+\
+
+A constraint that contains \c{$} is called incomplete. This mechanism is
+primarily useful when developing related packages that should track each
+other's versions exactly or closely.
+
+In comparison operators and ranges the \c{$} value is replaced with the
+dependent version ignoring the revision. For shortcut operators, the dependent
+version must be a standard version and the following additional processing is
+applied depending on whether the version is a release, final pre-release, or a
+snapshot pre-release.
+
+\ol|
+
+\li|For a release we set the min version patch to zero. For \c{^} we also set
+the minor version to zero, unless the major version is zero (reduces to
+\c{~}). The max version is set according to the standard shortcut logic. For
+example, \c{~$} is completed as follows:
+
+\
+1.2.0 -> [1.2.0 1.3.0-)
+1.2.1 -> [1.2.0 1.3.0-)
+1.2.2 -> [1.2.0 1.3.0-)
+\
+
+And \c{^$} is completed as follows:
+
+\
+1.0.0 -> [1.0.0 2.0.0-)
+1.1.1 -> [1.0.0 2.0.0-)
+\
+
+|
+
+\li|For a final pre-release the key observation is that if the patch
+component for \c{~} or minor and patch components for \c{^} are not zero, then
+that means there has been a compatible release and we treat this case the same
+as release, ignoring the pre-release part. If, however, it/they are zero, then
+that means there may yet be no final release and we have to start from the
+first alpha. For example, for the \c{~$} case:
+
+\
+1.2.0-a.1 -> [1.2.0-a.1 1.3.0-)
+1.2.0-b.2 -> [1.2.0-a.1 1.3.0-)
+1.2.1-a.1 -> [1.2.0 1.3.0-)
+1.2.2-b.2 -> [1.2.0 1.3.0-)
+\
+
+And for the \c{^$} case:
+
+\
+1.0.0-a.1 -> [1.0.0-a.1 2.0.0-)
+1.0.0-b.2 -> [1.0.0-a.1 2.0.0-)
+1.0.1-a.1 -> [1.0.0 2.0.0-)
+1.1.0-b.2 -> [1.0.0 2.0.0-)
+\
+
+|
+
+\li|For a snapshot pre-release we distinguish two cases: a patch snapshot
+(the patch component is not zero) and a major/minor snapshot (the patch
+component is zero). For the patch snapshot case we assume that it is (most
+likely) developed independently of the dependency and we treat it the same as
+the final pre-release case. For example, if the dependent version is
+\c{1.2.1-a.0.nnn}, the dependency could be \c{1.2.0} or \c{1.2.2} (or
+somewhere in-between).
+
+For the major/minor snapshot we assume that all the packages are developed in
+the lockstep and have the same \c{X.Y.0} version. In this case we make the
+range start from the earliest possible version in this \"snapshot series\" and
+end before the final pre-release. For example (in this case \c{~} and \c{^}
+are treated the same):
+
+\
+1.2.0-a.0.nnn -> [1.2.0-a.0.1 1.2.0-a.1)
+2.0.0-b.2.nnn -> [2.0.0-b.2.1 2.0.0-b.3)
+\
+
+||
+
+
+\h1#package-skeleton|Package Build System Skeleton|
+
+There are situations where \c{bpkg} may need to evaluate \c{buildfile}
+expressions and fragments before committing to a particular version of the
+package and therefore before actually unpacking anything. For example,
+\c{bpkg} may need to evaluate a condition in the conditional dependency or it
+may need to negotiate a configuration among several dependents of a package
+which requires it to know this package's configuration variable types and
+default values.
+
+To solve this chicken and egg kind of problem, \c{bpkg} includes a minimal
+subset of the build system files along with the package's standard metadata
+(name, version, etc) into the repository metadata
+(\l{#manifest-package-list-pkg \c{packages.manifest}}). This subset is called
+the package build system skeleton, or just package skeleton for short, and
+includes the \c{build/bootstrap.build} and \c{build/root.build} files (or
+their alternative naming scheme variants) as well as any files that may be
+sourced by \c{root.build}.
+
+The inclusion of \c{build/bootstrap.build} and \c{build/root.build} (if
+present) as well as any \c{build/config/*.build} (or their alternative naming
+scheme variants) is automatic. However, if \c{root.build} sources any files
+other than \c{build/config/*.build}, then they must be specified explicitly in
+the package manifest using the \l{#manifest-package-build-file \c{build-file}}
+value.
+
+Inside these buildfiles the skeleton load can be distinguished from normal
+load by examining the \c{build.mode} variable, which is set to \c{skeleton}
+during the skeleton load. In particular, this variable must be used to omit
+loading of build system modules that are neither built-in nor standard
+pre-installed and which are therefore listed as package dependencies. Such
+modules are not yet available during the skeleton load. For example:
+
+\
+# root.build
+
+using cxx # Ok, built-in module.
+using autoconf # Ok, standard pre-installed module.
+
+if ($build.mode != 'skeleton')
+ using hello
+\
+
+The \c{build.mode} variable can also be used to omit parts of \c{root.build}
+that are expensive to evaluate and which are only necessary during the actual
+build. Here is a realistic example:
+
+\
+# root.build
+
+...
+
+using cxx
+
+# Determine the GCC plugin directory. But omit doing it during the
+# skeleton load.
+#
+if ($build.mode != 'skeleton')
+{
+ if ($cxx.id != 'gcc')
+ fail 'this project can only be built with GCC'
+
+ # If plugin support is disabled, then -print-file-name will print
+ # the name we have passed (the real plugin directory will always
+ # be absolute).
+ #
+ plugin_dir = [dir_path] \
+ $process.run($cxx.path -print-file-name=plugin)
+
+ if (\"$plugin_dir\" == plugin)
+ fail \"$recall($cxx.path) does not support plugins\"
+
+ plugin_dir = $normalize($plugin_dir)
+}
+\
+
+
+\h1#dep-config-negotiation|Dependency Configuration Negotiation|
+
+In \c{bpkg}, a dependent package may specify a desired configuration for a
+dependency package. Because there could be multiple such dependents, \c{bpkg}
+needs to come up with a dependency configuration that is acceptable to all of
+them. This process is called the dependency configuration negotiation.
+
+The desired dependency configuration is specified as part of the
+\l{#manifest-package-depends \c{depends}} manifest value and can be expressed
+as either a single \c{require} clause or as a pair of \c{prefer}/\c{accept}
+clauses.
+
+The \c{require} clause is essentially a shortcut for specifying the
+\c{prefer}/\c{accept} clauses where the \c{accept} condition simply verifies
+all the variable values assigned in the \c{prefer} clause. It is, however,
+further restricted to the common case of only setting \c{bool} variables and
+only to \c{true} to allow additional optimizations during the configuration
+negotiation. The remainder of this section only deals with the general
+\c{prefer}/\c{accept} semantics.
+
+While the exact format of \c{prefer}/\c{accept} is described as part of the
+\l{#manifest-package-depends \c{depends}} manifest value, for this section it
+is sufficient to know that the \c{prefer} clause is an arbitrary \c{buildfile}
+fragment that is expected to set one or more dependency configuration
+variables to the values preferred by this dependent while the \c{accept}
+clause is a \c{buildfile} eval context expression that should evaluate to
+\c{true} or \c{false} indicating whether the dependency configuration values
+it is evaluated on are acceptable to this dependent. For example:
+
+\
+libfoo ^1.0.0
+{
+ # We prefer the cache but can work without it.
+ # We need the buffer of at least 4KB.
+ #
+ prefer
+ {
+ config.libfoo.cache = true
+
+ config.libfoo.buffer = ($config.libfoo.buffer < 4096 \
+ ? 4096 \
+ : $config.libfoo.buffer)
+ }
+
+ accept ($config.libfoo.buffer >= 4096)
+}
+\
+
+The configuration negotiation algorithm can be summarized as cooperative
+refinement. Specifically, whenever a \c{prefer} clause of a dependent changes
+any configuration value, all other dependents' \c{prefer} clauses are
+re-evaluated. This process continues until there are no more changes
+(success), one of the \c{accept} clauses returned \c{false} (failure), or the
+process starts \"yo-yo'ing\" between two or more configurations (failure).
+
+The dependents are expected to cooperate by not overriding \"better\" values
+that were set by other dependents. Consider the following two \c{prefer}
+clauses:
+
+\
+prefer
+{
+ config.libfoo.buffer = 4096
+}
+
+prefer
+{
+ config.libfoo.buffer = ($config.libfoo.buffer < 4096 \
+ ? 4096 \
+ : $config.libfoo.buffer)
+}
+\
+
+The first version is non-cooperative and should only be used if this dependent
+requires the buffer to be exactly 4KB. The second version is cooperative: it
+will increase the buffer to the minimum required by this dependent but will
+respect values above 4KB.
+
+One case where we don't need to worry about this is when setting the
+configuration variable to the \"best\" possible value. One common example of
+this is setting a \c{bool} configuration to \c{true}.
+
+With a few exceptions discussed below, a dependent must always re-set the
+configuration variable, even if to the better value. For example, the
+following is an incorrect attempt at the above cooperative \c{prefer} clause:
+
+\
+prefer
+{
+ if ($config.libfoo.buffer < 4096) # Incorrect.
+ config.libfoo.buffer = 4096
+}
+\
+
+The problem with the above attempt is that the default value could be greater
+than 4KB, in which case \c{bpkg} will have no idea that there is a dependent
+relying on this configuration value.
+
+Before each \c{prefer} clause re-evaluation, variables that were first set to
+their current values by this dependent are reset to their defaults thus
+allowing the dependent to change its mind, for instance, in response to other
+configuration changes. For example:
+
+\
+# While we have no preference about the cache, if enabled/disabled,
+# we need a bigger/smaller buffer.
+#
+prefer
+{
+ min_buffer = ($config.libfoo.cache ? 8192 : 4096)
+
+ config.libfoo.buffer = ($config.libfoo.buffer < $min_buffer \
+ ? $min_buffer \
+ : $config.libfoo.buffer)
+}
+
+accept ($config.libfoo.buffer >= ($config.libfoo.cache ? 8192 : 4096))
+\
+
+The interesting case to consider in the above example is when
+\c{config.libfoo.cache} changes from \c{true} to \c{false}: without the reset
+to defaults semantics the \c{prefer} clause would have kept the buffer at 8KB
+(since it's greater than the 4KB minimum).
+
+\N|Currently \c{accept} is always evaluated after \c{prefer} and temporary
+variables (like \c{min_buffer} in the above example) set in \c{prefer} are
+visible in \c{accept}. But it's best not to rely on this in case it changes
+in the future. For example, we may try harder to resolve the \"yo-yo'ing\"
+case mentioned above by checking if one of the alternating configurations
+are acceptable to everyone without re-evaluation.
+
+This is also the reason why we need a separate \c{accept} in the first place.
+Plus, it allows for more advanced configuration techniques where we may need
+to have an acceptance criteria but no preferences.|
+
+Configuration variables that are set by the dependent in the \c{prefer} clause
+are visible in the subsequent clauses as well as in the subsequent \c{depends}
+values of this dependent. Configuration variables that are not set, however,
+are only visible until the immediately following \c{reflect} clause. For
+example, in the above listing, \c{config.libfoo.cache} would still be visible
+in the \c{reflect} clause if it were to follow \c{accept} but no further. As a
+result, if we need to make decisions based on configuration variables that we
+have no preference about, they need to be saved in the \c{reflect} clause. For
+example:
+
+\
+depends:
+\\
+libfoo ^1.0.0
+{
+ # We have no preference about the cache but need to
+ # observe its value.
+ #
+ prefer
+ {
+ }
+
+ accept (true)
+
+ reflect
+ {
+ config.hello.libfoo_cache = $config.libfoo.cache
+ }
+}
+\\
+
+depends: libbar ^1.0.0 ? ($config.hello.libfoo_cache)
+\
+
+It is possible to determine the origin of the configuration variable value
+using the \c{$config.origin()} function. It returns either \c{undefined} if
+the variable is undefined (only possible if it has no default value),
+\c{default} if the variable has the default value from the \c{config}
+directive in \c{root.build}, \c{buildfile} if the value is from a
+\c{buildfile}, normally \c{config.build}, or \c{override} if the value is a
+command line override (that is, user configuration). For example, this is how
+we could use it if we only wanted to change the default value (notice that
+it's the variable's name and not its \c{$}-expansion that we pass to
+\c{$config.origin()}):
+
+\
+prefer
+{
+ config.libfoo.buffer = ( \
+ $config.origin(config.libfoo.buffer) == 'default' \
+ ? 4096 \
+ : $config.libfoo.buffer)
+}
+\
+
+The following sub-sections discuss a number of more advanced configuration
+techniques that are based on the functionality described in this section.
+
+
+\h#dep-config-prefer-x-accept-xy|Prefer X but Accept X or Y|
+
+Consider a configuration variable that is a choice between several mutually
+exclusive values, for example, user interface backends that could be, say,
+\c{cli}, \c{gui}, or \c{none}. In such situations it's common to prefer one
+value but being able to work with some subset of them. For example, we could
+prefer \c{gui} but were also able to make do with \c{cli} but not with
+\c{none}. Here is how we could express such a configuration:
+
+\
+libfoo ^1.0.0
+{
+ # We prefer `gui`, can also work with `cli` but not `none`.
+ #
+ prefer
+ {
+ config.libfoo.ui = ( \
+ $config.origin(config.libfoo.ui) == 'default' || \
+ ($config.libfoo.ui != 'gui' && $config.libfoo.ui != 'cli') \
+ ? 'gui' \
+ : $config.libfoo.ui)
+ }
+
+ accept ($config.libfoo.ui == 'gui' || $config.libfoo.ui == 'cli')
+}
+\
+
+\h#dep-config-use-if-enabled|Use If Enabled|
+
+Sometimes we may want to use a feature if it is enabled by someone else but
+not enable it ourselves. For example, the feature might be expensive and our
+use of it tangential, but if it's enabled anyway, then we might as well take
+advantage of it. Here is how we could express such a configuration:
+
+\
+libfoo ^1.0.0
+{
+ # Use config.libfoo.x only if enabled by someone else.
+ #
+ prefer
+ {
+ }
+
+ accept (true)
+
+ reflect
+ {
+ config.hello.libfoo_x = $config.libfoo.x
+ }
+}
+\
+
+\h#dep-config-disable-default|Disable If Enabled by Default|
+
+Sometimes we may want to disable a feature that is enabled by default provided
+that nobody else needs it. For example, the feature might be expensive and we
+would prefer to avoid paying the cost if we are the only ones using this
+dependency. Here is how we could express such a configuration:
+
+\
+libfoo ^1.0.0
+{
+ prefer
+ {
+ if ($config.origin(config.libfoo.x) == 'default')
+ config.libfoo.x = false
+ }
+
+ accept (true)
+}
+\
+
\h1#manifests|Manifests|
@@ -375,7 +805,7 @@ version: 2.3.4
To disable treating of a newline as a name-value pair terminator we can escape
it with \c{\\}. Note that \c{\\} is only treated as an escape sequence when
followed by a newline and both are simply removed from the stream (as opposed
-to being replaced which a space). To enter a literal \c{\\} at the end of the
+to being replaced with a space). To enter a literal \c{\\} at the end of the
value, use the \c{\\\\} sequence. For example:
\
@@ -407,15 +837,16 @@ description: First line. Second line.
\
As a result, such a sequence is \"overloaded\" to provide more useful
-functionality in two ways: Firstly, if \c{:} after the name is immediately
-followed (ignoring whitespaces) by \c{\\} and a newline, then it signals the
-start of the multi-line mode. In this mode all subsequent newlines and \c{#}
-are treated as ordinary characters rather than value terminators or comments
-until a line consisting of just \\ and a newline (the multi-line mode
-terminator). For example:
+functionality in two ways: Firstly, if \c{:} after the name is followed on the
+next line by just \c{\\} and a newline, then it signals the start of the
+multi-line mode. In this mode all subsequent newlines and \c{#} are treated as
+ordinary characters rather than value terminators or comments until a line
+consisting of just \c{\\} and a newline (the multi-line mode terminator). For
+example:
\
-description:\
+description:
+\\
First paragraph.
#
Second paragraph.
@@ -429,35 +860,26 @@ Expressed as a C-string, the value in the above example is:
\
-\N|If we didn't expect to ever need to specify a name with an empty value,
-then an empty value could have turned on the multi-line mode, for example:
-
-\
-description:
-First paragraph.
-#
-Second paragraph.
-\\
-\
-
-There are two reasons we don't do this: we don't want to close the door on
-empty values and we want a more explicit \"introductor\" for the multi-line
-mode since it is quite different compared to the simple mode.|
+\N|Originally, the multi-line mode was entered if \c{:} after the name were
+immediately followed by \c{\\} and a newline but on the same line. While this
+syntax is still recognized for backwards compatibility, it is deprecated and
+will be discontinued in the future.|
Note that in the multi-line mode we can still use newline escaping to split
long lines, for example:
\
-description:\
+description:
+\\
First paragraph that doesn't fit into one line \
so it is continued on the next line.
Second paragraph.
\\
\
-In the simple (that is, non-multi-line) mode, the sole \c{\\} and newline
-sequence is overloaded to mean a newline. So the previous example can also be
-represented like this:
+And secondly, in the simple (that is, non-multi-line) mode, the sole \c{\\}
+and newline sequence is overloaded to mean a newline. So the previous example
+can also be represented like this:
\
description: First paragraph that doesn't fit into one \
@@ -470,7 +892,8 @@ Note that the multi-line mode can be used to capture a value with leading
and/or trailing whitespaces, for example:
\
-description:\
+description:
+\\
test
\\
@@ -487,7 +910,8 @@ values. For example the following representation results in the same value as
in the previous example.
\
-description:\
+description:
+\\
test
<EOF>
@@ -540,6 +964,37 @@ example:
url: http://git.example.com/?p=foo\;a=tree
\
+The only other recognized escape sequence in such values is \c{\\\\}, which is
+replaced with a single backslash. If a backslash is followed by any other
+character, then it is treated literally.
+
+If a value with a comment is multi-line, then \c{;} must appear on a separate
+line, for example:
+
+\
+url:
+\\
+http://git.example.com/?p=foo;a=tree
+;
+Git repository tree.
+\\
+\
+
+In this case, only lines that consist of a sole non-comment semicolon need
+escaping, for example:
+
+\
+license:
+\\
+other: strange
+\;
+license
+\\
+\
+
+The only other recognized escape sequence in such multi-line values is lines
+consisting of two or more backslashes followed by a semicolon.
+
In the manifest specifications described below optional components are
enclosed in square brackets (\c{[]}). If the name is enclosed in \c{[]} then
the name-value pair is optional, otherwise \- required. For example:
@@ -585,6 +1040,9 @@ a full package manifest they can be interleaved with non-header values.
\
name: <name>
version: <version>
+[upstream-version]: <string>
+[type]: <type>
+[language]: <lang>
[project]: <name>
[priority]: <priority> [; <comment>]
summary: <text>
@@ -597,8 +1055,12 @@ license: <licenses> [; <comment>]
[description]: <text>
[description-file]: <path> [; <comment>]
[description-type]: <text-type>
+[package-description]: <text>
+[package-description-file]: <path> [; <comment>]
+[package-description-type]: <text-type>
[changes]: <text>
[changes-file]: <path> [; <comment>]
+[changes-type]: <text-type>
[url]: <url> [; <comment>]
[doc-url]: <url> [; <comment>]
@@ -611,16 +1073,44 @@ license: <licenses> [; <comment>]
[build-warning-email]: <email> [; <comment>]
[build-error-email]: <email> [; <comment>]
-[depends]: [?][*] <alternatives> [; <comment>]
-[requires]: [?] [<alternatives>] [; <comment>]
+[depends]: [*] <alternatives> [; <comment>]
+[requires]: [*] <alternatives> [; <comment>]
-[tests]: <name> [<version-constraint>]
-[examples]: <name> [<version-constraint>]
-[benchmarks]: <name> [<version-constraint>]
+[tests]: [*] <name> [<version-constraint>]
+[examples]: [*] <name> [<version-constraint>]
+[benchmarks]: [*] <name> [<version-constraint>]
[builds]: <class-expr> [; <comment>]
[build-include]: <config>[/<target>] [; <comment>]
[build-exclude]: <config>[/<target>] [; <comment>]
+[build-auxiliary]: <config> [; <comment>]
+[build-auxiliary-<name>]: <config> [; <comment>]
+
+[*-build-config]: <args> [; <comment>]
+
+[*-builds]: <class-expr> [; <comment>]
+[*-build-include]: <config>[/<target>] [; <comment>]
+[*-build-exclude]: <config>[/<target>] [; <comment>]
+[*-build-auxiliary]: <config> [; <comment>]
+[*-build-auxiliary-<name>]: <config> [; <comment>]
+
+[*-build-email]: <email> [; <comment>]
+[*-build-warning-email]: <email> [; <comment>]
+[*-build-error-email]: <email> [; <comment>]
+
+[build-file]: <path>
+
+[bootstrap-build]: <text>
+[root-build]: <text>
+[*-build]: <text>
+
+[bootstrap-build2]: <text>
+[root-build2]: <text>
+[*-build2]: <text>
+
+[*-name]: <name> [<name>...]
+[*-version]: <string>
+[*-to-downstream-version]: <regex>
\
\h2#manifest-package-name|\c{name}|
@@ -653,6 +1143,53 @@ and use the \c{upstream-version} value to preserve the original version for
information.
+\h2#manifest-package-type-language|\c{type}, \c{language}|
+
+\
+[type]: <type>
+[language]: <lang>
+
+<type> = <name>[,<sub-options>]
+<lang> = <name>[=impl]
+\
+
+The package type and programming language(s).
+
+The currently recognized package types are \c{exe}, \c{lib}, and \c{other}. If
+the type is not specified, then if the package name starts with \c{lib}, then
+it is assumed to be \c{lib} and \c{exe} otherwise (see \l{#package-name
+Package Name} for details). Other package types may be added in the future and
+code that does not recognize a certain package type should treat it as
+\c{other}. The type name can be followed by a comma-separated list of
+sub-options. Currently, the only recognized sub-option is \c{binless} which
+applies to the \c{lib} type indicating a header-only (or equivalent) library.
+For example:
+
+\
+type: lib,binless
+\
+
+The package language must be in the lower case, for example, \c{c}, \c{c++},
+\c{rust}, \c{bash}. If the language is not specified, then if the package name
+has an extension (as in, for example, \c{libbutl.bash}; see \l{#package-name
+Package Name} for details) the extension is assumed to name the package
+language. Otherwise, \c{cc} (unspecified \c{c}-common language) is assumed. If
+a package uses multiple languages, then multiple \c{language} values must be
+specified. The languages which are only used in a library's implementation (as
+opposed to also in its interface) should be marked as such. For example, for a
+C library with C++ implementation:
+
+\
+type: lib
+language: c
+language: c++=impl
+\
+
+\N|If the use of a language, such as C++, also always implies the use of
+another language, such as C, then such an implied language need not be
+explicitly specified.|
+
+
\h2#manifest-package-project|\c{project}|
\
@@ -860,23 +1397,30 @@ as well as words from its summary are already considered to be keywords and
need not be repeated in this value.
-\h2#manifest-package-description|\c{description}|
+\h2#manifest-package-description|\c{description}, \c{package-description}|
\
[description]: <text>
[description-file]: <path> [; <comment>]
[description-type]: <text-type>
+[package-description]: <text>
+[package-description-file]: <path> [; <comment>]
+[package-description-type]: <text-type>
\
-The detailed description of the package. It can be provided either inline as a
-text fragment or by referring to a file within a package (e.g., \c{README}),
-but not both.
+The detailed description of the project (\c{description}) and package
+(\c{package-description}). If the package description is not specified, it is
+assumed to be the same as the project description. It only makes sense to
+specify the \c{package-description} value if the project and package are
+maintained separately. A description can be provided either inline as a text
+fragment or by referring to a file within a package (for example, \c{README}),
+but not both. For \c{package-description-file} the recommended file name is
+\c{PACKAGE-README} or \c{README-PACKAGE}.
In the web interface (\c{brep}) the description is displayed according to its
type. Currently, pre-formatted plain text, \l{https://github.github.com/gfm
GitHub-Flavored Markdown}, and \l{https://spec.commonmark.org/current
-CommonMark} are supported with the following \c{description-type} values,
-respectively:
+CommonMark} are supported with the following \c{*-type} values, respectively:
\
text/plain
@@ -887,13 +1431,13 @@ text/markdown;variant=CommonMark
If just \c{text/markdown} is specified, then the GitHub-Flavored Markdown
(which is a superset of CommonMark) is assumed.
-If the description type is not explicitly specified and the description is
-specified as \c{description-file}, then an attempt to derive the type from the
-file extension is made. Specifically, the \cb{.md} and \cb{.markdown}
-extensions are mapped to \c{text/markdown}, the \cb{.txt} and no extension are
-mapped to \c{text/plain}, and all other extensions are treated as an unknown
-type, similar to unknown \c{description-type} values. And if the description
-is not specified as a file, \c{text/plain} is assumed.
+If a description type is not explicitly specified and the description is
+specified as \c{*-file}, then an attempt to derive the type from the file
+extension is made. Specifically, the \cb{.md} and \cb{.markdown} extensions
+are mapped to \c{text/markdown}, the \cb{.txt} and no extension are mapped to
+\c{text/plain}, and all other extensions are treated as an unknown type,
+similar to unknown \c{*-type} values. And if a description is not specified as
+a file, \c{text/plain} is assumed.
\h2#manifest-package-changes|\c{changes}|
@@ -901,6 +1445,7 @@ is not specified as a file, \c{text/plain} is assumed.
\
[changes]: <text>
[changes-file]: <path> [; <comment>]
+[changes-type]: <text-type>
\
The description of changes in the release.
@@ -922,7 +1467,8 @@ changes-file: NEWS
Or:
\
-changes:\
+changes:
+\\
1.2.3-2
- applied upstream patch for critical bug bar
- regenerated documentation
@@ -933,8 +1479,11 @@ changes:\
changes-file: NEWS
\
-In the web interface (\c{brep}) the changes are displayed as pre-formatted
-plain text, similar to the package description.
+In the web interface (\c{brep}) the changes are displayed according to their
+type, similar to the package description (see the
+\l{#manifest-package-description \c{description}} value for details). If
+the changes type is not explicitly specified, then the types deduced for
+individual \c{changes} values must all be the same.
\h2#manifest-package-url|\c{url}|
@@ -1034,178 +1583,524 @@ build error notifications are sent to this email.
\h2#manifest-package-depends|\c{depends}|
\
-[depends]: [?][*] <alternatives> [; <comment>]
+[depends]: [*] <alternatives> [; <comment>]
+\
+
+Single-line form:
-<alternatives> = <dependency> [ '|' <dependency>]*
+\
+<alternatives> = <alternative> [ '|' <alternative>]*
+<alternative> = <dependencies> ['?' <enable-cond>] [<reflect-var>]
+<dependencies> = <dependency> | \
+ '{' <dependency> [<dependency>]* '}' [<version-constraint>]
<dependency> = <name> [<version-constraint>]
+<enable-cond> = '(' <buildfile-eval-expr> ')'
+<reflect-var> = <config-var> '=' <value>
\
-The prerequisite packages. If the \c{depends} value starts with \c{*}, then
-it is a \i{build-time} prerequisite. Otherwise it is \i{run-time}.
+Multi-line form:
+
+\
+<alternatives> =
+ <alternative>[
+ '|'
+ <alternative>]*
-\N|Most of the build-time prerequisites are expected to be tools such as code
-generator, so you can think of \c{*} as the executable mark printed by
+<alternative> =
+ <dependencies>
+ '{'
+ [
+ 'enable' <enable-cond>
+ ]
+
+ [
+ 'require'
+ '{'
+ <buildfile-fragment>
+ '}'
+
+ ] | [
+
+ 'prefer'
+ '{'
+ <buildfile-fragment>
+ '}'
+
+ 'accept' <accept-cond>
+ ]
+
+ [
+ 'reflect'
+ '{'
+ <buildfile-fragment>
+ '}'
+ ]
+ '}'
+
+<accept-cond> = '(' <buildfile-eval-expr> ')'
+\
+
+The dependency packages. The most common form of a dependency is a package
+name followed by the optional version constraint. For example:
+
+\
+depends: libhello ^1.0.0
+\
+
+See \l{#package-version-constraint Package Version Constraint} for the format
+and semantics of the version constraint. Instead of a concrete value, the
+version in the constraint can also be specified in terms of the dependent
+package's version (that is, its \l{#manifest-package-version \c{version}}
+value) using the special \c{$} value. This mechanism is primarily useful when
+developing related packages that should track each other's versions exactly or
+closely. For example:
+
+\
+name: sqlite3
+version: 3.18.2
+depends: libsqlite3 == $
+\
+
+If multiple packages are specified within a single \c{depends} value, they
+must be grouped with \c{{\}}. This can be useful if the packages share a
+version constraint. The group constraint applies to all the packages in
+the group that do not have their own constraint. For example:
+
+\
+depends: { libboost-any libboost-log libboost-uuid ~1.77.1 } ~1.77.0
+\
+
+If the \c{depends} value starts with \c{*}, then it is a \i{build-time}
+dependency. Otherwise it is \i{run-time}. For example:
+
+\
+depends: * byacc >= 20210619
+\
+
+\N|Most of the build-time dependencies are expected to be tools such as code
+generators, so you can think of \c{*} as the executable mark printed by
\c{ls}. An important difference between the two kinds of dependencies is that
in case of cross-compilation a build-time dependency must be built for the
-build machine, not the target.|
+host machine, not the target. Build system modules are also build-time
+dependencies.|
Two special build-time dependency names are recognized and checked in an ad
hoc manner: \c{build2} (the \c{build2} build system) and \c{bpkg} (the
-\c{build2} package manager). This allows us to specify the required build
-system and package manager versions, for example:
+\c{build2} package manager). This allows us to specify the minimum required
+build system and package manager versions, for example:
\
-depends: * build2 >= 0.6.0
-depends: * bpkg >= 0.6.0
+depends: * build2 >= 0.15.0
+depends: * bpkg >= 0.15.0
\
-Each \c{depends} value can specify multiple packages with the \i{OR}
-semantics. While multiple \c{depends} values are used to specify multiple
-packages with the \i{AND} semantics. A value that starts with \c{?} is a
-conditional prerequisite. Whether such a prerequisite will be in effect can
-only be determined at the package configuration time. It is recommended that
-you provide a comment for each conditional prerequisite as an aid to the user.
-For example:
+\N|If you are developing or packaging a project that uses features from the
+not yet released (staged) version of the \c{build2} toolchain, then you can
+use the pre-release version in the constraint. For example:
\
-depends: libz
-depends: libfoo ~1.2.0 ; Only works with libfoo 1.2.*.
-depends: libgnutls >= 1.2.3 | libopenssl >= 2.3.4
-depends: ? libboost-regex >= 1.52.0 ; Only if no C++11 <regex>.
-depends: ? libqtcore >= 5.0.0 ; Only if GUI is enabled.
+depends: * build2 >= 0.16.0-
+depends: * bpkg >= 0.16.0-
\
-It is recommended that you specify unconditional dependencies first with
-simple (no alternatives) dependencies leading each set.
+|
-See \l{#package-version-constraint Package Version Constraint} for the format
-and semantics of the optional version constraint. Instead of a concrete
-value, it can also be specified in terms of the dependent package's version
-(that is, its \l{#manifest-package-version \c{version}} value) using the
-special \c{$} value. A \c{depends} value that contains \c{$} is called
-incomplete. This mechanism is primarily useful when developing related
-packages that should track each other's versions exactly or closely. For
-example:
+A dependency can be conditional, that is, it is only enabled if a certain
+condition is met. For example:
\
-name: sqlite3
-version: 3.18.2
-depends: libsqlite3 == $
+depends: libposix-getopt ^1.0.0 ? ($cxx.target.class == 'windows')
\
-In comparison operators and ranges the \c{$} value is replaced with the
-dependent version ignoring the revision. For shortcut operators, the dependent
-version must be a standard version and the following additional processing is
-applied depending on whether the version is a release, final pre-release, or a
-snapshot pre-release.
+The condition after \c{?} inside \c{()} is a \c{buildfile} eval context
+expression that should evaluate to \c{true} or \c{false}, as if it were
+specified in the \c{buildfile} \c{if} directive (see \l{b#intro-lang-expand
+Expansion and Quoting} and \l{b#intro-if-else Conditions (\c{if-else})} for
+details).
-\ol|
+The condition expression is evaluated after loading the package build system
+skeleton, that is, after loading its \c{root.build} (see \l{#package-skeleton
+Package Build System Skeleton} for details). As a result, variable values set
+by build system modules that are loaded in \c{root.build} as well as the
+package's configuration (including previously reflected; see below) or
+computed values can be referenced in dependency conditions. For example, given
+the following \c{root.build}:
-\li|For a release we set the min version patch to zero. For \c{^} we also set
-the minor version to zero, unless the major version is zero (reduces to
-\c{~}). The max version is set according to the standard shortcut logic. For
-example, \c{~$} is completed as follows:
+\
+# root.build
+...
+
+using cxx
+
+# MinGW ships POSIX <getopt.h>.
+#
+need_getopt = ($cxx.target.class == 'windows' && \
+ $cxx.target.system != 'mingw32')
+
+config [bool] config.hello.regex ?= false
\
-1.2.0 -> [1.2.0 1.3.0-)
-1.2.1 -> [1.2.0 1.3.0-)
-1.2.2 -> [1.2.0 1.3.0-)
+
+We could have the following conditional dependencies:
+
+\
+depends: libposix-getopt ^1.0.0 ? ($need_getopt) ; Windows && !MinGW.
+depends: libposix-regex ^1.0.0 ? ($config.hello.regex && \
+ $cxx.target.class == 'windows')
\
-And \c{^$} is completed as follows:
+The first \c{depends} value in the above example also shows the use of an
+optional comment. It's a good idea to provide it if the condition is not
+sufficiently self-explanatory.
+
+A dependency can \"reflect\" configuration variables to the subsequent
+\c{depends} values and to the package configuration. This can be used to
+signal whether a conditional dependency is enabled or which dependency
+alternative was selected (see below). The single-line form of \c{depends} can
+only reflect one configuration variable. For example:
\
-1.0.0 -> [1.0.0 2.0.0-)
-1.1.1 -> [1.0.0 2.0.0-)
+depends: libposix-regex ^1.0.0 \
+ ? ($cxx.target.class == 'windows') \
+ config.hello.external_regex=true
+\
+
+\
+# root.build
+
+...
+
+using cxx
+
+config [bool] config.hello.external_regex ?= false
+\
+
+\
+# buildfile
+
+libs =
+
+if $config.hello.external_regex
+ import libs += libposix-regex%lib{posix-regex}
+
+exe{hello}: ... $libs
+\
+
+In the above example, if the \c{hello} package is built for Windows, then the
+dependency on \c{libposix-regex} will be enabled and the package will be
+configured with \c{config.hello.external_regex=true}. This is used in the
+\c{buildfile} to decide whether to import \c{libposix-regex}. While in this
+example it would have probably been easier to just duplicate the check for
+Windows in the \c{buildfile} (or, better yet, factor this check to
+\c{root.build} and share the result via a computed variable between
+\c{manifest} and \c{buildfile}), the reflect mechanism is the only way to
+communicate the selected dependency alternative (discussed next).
+
+\N|An attempt to set a reflected configuration variable that is overridden by
+the user is an error. In a sense, configuration variables that are used to
+reflect information should be treated as the package's implementation details
+if the package management is involved. If, however, the package is configured
+without \c{bpkg}'s involvement, then these variables could reasonably be
+provided as user configuration.
+
+If you feel the need to allow a reflected configuration variable to also
+potentially be supplied as user configuration, then it's probably a good sign
+that you should turn things around: make the variable only user-configurable
+and use the enable condition instead of reflect. Alternatively, you could try
+to recognize and handle user overrides with the help of the
+\c{$config.origin()} function discussed in \l{#dep-config-negotiation
+Dependency Configuration Negotiation}.|
+
+While multiple \c{depends} values are used to specify multiple packages with
+the \i{AND} semantics, inside \c{depends} we can specify multiple packages (or
+groups of packages) with the \i{OR} semantics, called dependency
+alternatives. For example:
+
+\
+depends: libmysqlclient >= 5.0.3 | libmariadb ^10.2.2
+\
+
+When selecting an alternative, \c{bpkg} only considers packages that are
+either already present in the build configuration or are selected as
+dependencies by other packages, picking the first alternative with a
+satisfactory version constraint and an acceptable configuration. As a result,
+the order of alternatives expresses a preference. If, however, this does not
+yield a suitable alternative, then \c{bpkg} fails asking the user to make the
+selection.
+
+For example, if the package with the above dependency is called \c{libhello}
+and we build it in a configuration that already has both \c{libmysqlclient}
+and \c{libmariadb}, then \c{bpkg} will select \c{libmysqlclient}, provided the
+existing version satisfies the version constraint. If, however, there are no
+existing packages in the build configuration and we attempt to build just
+\c{libhello}, then \c{bpkg} will fail asking the user to pick one of the
+alternatives. If we wanted to make \c{bpkg} select \c{libmariadb} we could
+run:
+
+\
+$ bpkg build libhello ?libmariadb
+\
+
+\N|While \c{bpkg}'s refusal to automatically pick an alternative that would
+require building a new package may at first seem unfriendly to the user,
+practical experience shows that such extra user-friendliness would rarely
+justify the potential confusion that it may cause.
+
+Also note that it's not only the user that can pick a certain alternative but
+also a dependent package. Continuing with the above example, if we had
+\c{hello} that depended on \c{libhello} but only supported MariaDB (or
+provided a configuration variable to explicitly select the database), then we
+could have the following in its \c{manifest}:
+
+\
+depends: libmariadb ; Select MariaDB in libhello.
+depends: libhello ^1.0.0
\
|
-\li|For a final pre-release the key observation is that if the patch
-component for \c{~} or minor and patch components for \c{^} are not zero, then
-that means there has been a compatible release and we treat this case the same
-as release, ignoring the pre-release part. If, however, it/they are zero, then
-that means there may yet be no final release and we have to start from the
-first alpha. For example, for the \c{~$} case:
+Dependency alternatives can be combined with all the other features discussed
+above: groups, conditional dependencies, and reflect. As mentioned earlier,
+reflect is the only way to communicate the selection to subsequent \c{depends}
+values and the package configuration. For example:
\
-1.2.0-a.1 -> [1.2.0-a.1 1.3.0-)
-1.2.0-b.2 -> [1.2.0-a.1 1.3.0-)
-1.2.1-a.1 -> [1.2.0 1.3.0-)
-1.2.2-b.2 -> [1.2.0 1.3.0-)
+depends: libmysqlclient >= 5.0.3 config.hello.db='mysql' | \
+ libmariadb ^10.2.2 ? ($cxx.target.class != 'windows') \
+ config.hello.db='mariadb'
+
+depends: libz ^1.2.1100 ? ($config.hello.db == 'mysql')
\
-And for the \c{^$} case:
+If an alternative is conditional and the condition evaluates to \c{false},
+then this alternative is not considered. If all but one alternative are
+disabled due to conditions, then this becomes an ordinary dependency. If all
+the alternatives are disabled due to conditions, then the entire dependency
+is disabled. For example:
\
-1.0.0-a.1 -> [1.0.0-a.1 2.0.0-)
-1.0.0-b.2 -> [1.0.0-a.1 2.0.0-)
-1.0.1-a.1 -> [1.0.0 2.0.0-)
-1.1.0-b.2 -> [1.0.0 2.0.0-)
+depends: libmysqlclient >= 5.0.3 ? ($config.hello.db == 'mysql') | \
+ libmariadb ^10.2.2 ? ($config.hello.db == 'mariadb')
\
+While there is no need to use the dependency alternatives in the above example
+(since the alternatives are mutually exclusive), it makes for good
+documentation of intent.
+
+Besides as a single line, the \c{depends} value can also be specified in a
+multi-line form which, besides potentially better readability, provides
+additional functionality. In the multi-line form, each dependency alternative
+occupies a separate line and \c{|} can be specified either at the end of
+the dependency alternative line or on a separate line. For example:
+
+\
+depends:
+\\
+libmysqlclient >= 5.0.3 ? ($config.hello.db == 'mysql') |
+libmariadb ^10.2.2 ? ($config.hello.db == 'mariadb')
+\\
+\
+
+A dependency alternative can be optionally followed by a block containing a
+number of clauses. The \c{enable} clause is the alternative way to specify the
+condition for a conditional dependency while the \c{reflect} clause is the
+alternative way to specify the reflected configuration variable. The block may
+also contain \c{#}-style comments, similar to \c{buildfile}. For example:
+
+\
+depends:
+\\
+libmysqlclient >= 5.0.3
+{
+ reflect
+ {
+ config.hello.db = 'mysql'
+ }
+}
|
+libmariadb ^10.2.2
+{
+ # TODO: MariaDB support on Windows.
+ #
+ enable ($cxx.target.class != 'windows')
+
+ reflect
+ {
+ config.hello.db = 'mariadb'
+ }
+}
+\\
+\
-\li|For a snapshot pre-release we distinguish two cases: a patch snapshot
-(the patch component is not zero) and a major/minor snapshot (the patch
-component is zero). For the patch snapshot case we assume that it is (most
-likely) developed independently of the dependency and we treat it the same as
-the final pre-release case. For example, if the dependent version is
-\c{1.2.1-a.0.nnn}, the dependency could be \c{1.2.0} or \c{1.2.2} (or
-somewhere in-between).
+While the \c{enable} clause is essentially the same as its inline \c{?}
+variant, the \c{reflect} clause is an arbitrary \c{buildfile} fragment that
+can have more complex logic and assign multiple configuration variables. For
+example:
-For the major/minor snapshot we assume that all the packages are developed in
-the lockstep and have the same \c{X.Y.0} version. In this case we make the
-range start from the earliest possible version in this \"snapshot series\" and
-end before the final pre-release. For example (in this case \c{~} and \c{^}
-are treated the same):
+\
+libmariadb ^10.2.2
+{
+ reflect
+ {
+ if ($cxx.target.class == 'windows')
+ config.hello.db = 'mariadb-windows'
+ else
+ config.hello.db = 'mariadb-posix'
+ }
+}
+\
+
+The multi-line form also allows us to express our preferences and requirements
+for the dependency configuration. If all we need is to set one or more
+\c{bool} configuration variables to \c{true} (which usually translates to
+enabling one or more features), then we can use the \c{require} clause. For
+example:
\
-1.2.0-a.0.nnn -> [1.2.0-a.0.1 1.2.0-a.1)
-2.0.0-b.2.nnn -> [2.0.0-b.2.1 2.0.0-b.3)
+libmariadb ^10.2.2
+{
+ require
+ {
+ config.libmariadb.cache = true
+
+ if ($cxx.target.class != 'windows')
+ config.libmariadb.tls = true
+ }
+}
\
-||
+For more complex dependency configurations instead of \c{require} we can use
+the \c{prefer} and \c{accept} clauses. The \c{prefer} clause can set
+configuration variables of any type and to any value in order to express the
+package's preferred configuration while the \c{accept} condition evaluates
+whether any given configuration is acceptable. If used instead of \c{require},
+both \c{prefer} and \c{accept} must be present. For example:
+
+\
+libmariadb ^10.2.2
+{
+ # We prefer the cache but can work without it.
+ # We need the buffer of at least 4KB.
+ #
+ prefer
+ {
+ config.libmariadb.cache = true
+
+ config.libmariadb.buffer = ($config.libmariadb.buffer < 4096 \
+ ? 4096 \
+ : $config.libmariadb.buffer)
+ }
+
+ accept ($config.libmariadb.buffer >= 4096)
+}
+\
+
+\N|The \c{require} clause is essentially a shortcut for specifying the
+\c{prefer}/\c{accept} clauses where the \c{accept} condition simply verifies
+all the variable values assigned in the \c{prefer} clause. It is, however,
+further restricted to the common case of only setting \c{bool} variables and
+only to \c{true} to allow additional optimizations during the configuration
+negotiation.|
+
+The \c{require} and \c{prefer} clauses are arbitrary \c{buildfile} fragments
+similar to \c{reflect} while the \c{accept} clause is a \c{buildfile} eval
+context expression that should evaluate to \c{true} or \c{false}, similar to
+\c{enable}.
+
+Given the \c{require} and \c{prefer}/\c{accept} clauses of all the dependents
+of a particular dependency, \c{bpkg} tries to negotiate a configuration
+acceptable to all of them as described in \l{#dep-config-negotiation
+Dependency Configuration Negotiation}.
+
+All the clauses are evaluated in the specified order, that is, \c{enable},
+then \c{require} or \c{prefer}/\c{accept}, and finally \c{reflect}, with the
+(negotiated, in case of \c{prefer}) configuration values set by preceding
+clauses available for examination by the subsequent clauses in this
+\c{depends} value as well as in all the subsequent ones. For example:
+
+\
+depends:
+\\
+libmariadb ^10.2.2
+{
+ prefer
+ {
+ config.libmariadb.cache = true
+
+ config.libmariadb.buffer = ($config.libmariadb.buffer < 4096 \
+ ? 4096 \
+ : $config.libmariadb.buffer)
+ }
+
+ accept ($config.libmariadb.buffer >= 4096)
+
+ reflect
+ {
+ config.hello.buffer = $config.libmariadb.buffer
+ }
+}
+\\
+
+depends: liblru ^1.0.0 ? ($config.libmariadb.cache)
+\
+
+The above example also highlights the difference between the
+\c{require}/\c{prefer} and \c{reflect} clauses that is easy to mix up: in
+\c{require}/\c{prefer} we set the dependency's while in \c{reflect} we set the
+dependent's configuration variables.
\h2#manifest-package-requires|\c{requires}|
\
-[requires]: [?] [<alternatives>] [; <comment>]
+[requires]: [*] <alternatives> [; <comment>]
-<alternatives> = <requirement> [ '|' <requirement>]*
-<requirement> = <id> | <dependency>
+<alternatives> = <alternative> [ '|' <alternative>]*
+<alternative> = <requirements> ['?' [<enable-cond>]] [<reflect-var>]
+<requirements> = [<requirement>] | \
+ '{' <requirement> [<requirement>]* '}' [<version-constraint>]
+<requirement> = <name> [<version-constraint>]
+<enable-cond> = '(' <buildfile-eval-expr> ')'
+<reflect-var> = <config-var> '=' <value>
\
-The package requirements (other than other packages). Such requirements are
-normally checked during package configuration by the build system and the only
-purpose of capturing them in the manifest is for documentation. Similar to
-\c{depends}, a value that starts with \c{?} is a conditional
-requirement. For example:
+The package requirements other than other packages. Such requirements are
+normally checked in an ad hoc way during package configuration by its
+\c{buildfiles} and the primary purpose of capturing them in the manifest is
+for documentation. However, there are some special requirements that are
+recognized by the tooling (see below). For example:
\
-requires: linux | windows | macosx
requires: c++11
-requires: ? ; VC 15 or later if targeting Windows.
-requires: ? ; libc++ if using Clang on Mac OS.
+requires: linux | windows | macos
+requires: libc++ ? ($macos) ; libc++ if using Clang on Mac OS.
\
-Notice that in the last two cases the id is omitted altogether with only the
-comment specifying the requirement.
-
-Note that \c{requires} should also be used to specify dependencies on external
-libraries, that is, the ones not packaged or not in the repository. In this
-case it may make sense to also specify the version constraint. For example:
+The format of the \c{requires} value is similar to
+\l{#manifest-package-depends \c{depends}} with the following differences. The
+requirement name (with or without version constraint) can mean anything (but
+must still be a valid package name). Only the \c{enable} and \c{reflect}
+clauses are permitted. There is a simplified syntax with either the
+requirement or enable condition or both being empty and where the comment
+carries all the information (and is thus mandatory). For example:
\
-requires: zlib >= 1.2.0 ; Most systems already have it or get from zlib.net.
+requires: ; X11 libs.
+requires: ? ($windows) ; Only 64-bit.
+requires: ? ; Only 64-bit if on Windows.
+requires: x86_64 ? ; Only if on Windows.
\
-It is recommended that you specify unconditional requirements first with
-simple (no alternatives) requirements leading each set.
+Note that \c{requires} can also be used to specify dependencies on system
+libraries, that is, the ones not to be packaged. In this case it may make
+sense to also specify the version constraint. For example:
+
+\
+requires: libx11 >= 1.7.2
+\
-To assist automated processing, the following pre-defined ids should be used
-for the common requirements:
+To assist potential future automated processing, the following pre-defined
+requirement names should be used for the common requirements:
\
c++98
@@ -1222,35 +2117,61 @@ posix
linux
macos
freebsd
+openbsd
+netbsd
windows
\
\
gcc[_X.Y.Z] ; For example: gcc_6, gcc_4.9, gcc_5.0.0
clang[_X.Y] ; For example: clang_6, clang_3.4, clang_3.4.1
-msvc[_NU] ; For example: msvc_14, msvc_15u3
+msvc[_N.U] ; For example: msvc_14, msvc_15.3
\
+The following pre-defined requirement names are recognized by automated build
+bots:
+
+\
+bootstrap
+host
+\
+
+The \c{bootstrap} value should be used to mark build system modules that
+require bootstrapping. The \c{host} value should be used to mark packages,
+such source code generators, that are normally specified as build-time
+dependencies by other packages and therefore should be built in a host
+configuration. See the \l{bbot \c{bbot} documentation} for details.
+
-\h2#manifest-package-tests-examples-benchmarks|\c{tests, examples, benchmarks}|
+\h2#manifest-package-tests-examples-benchmarks|\c{tests}, \c{examples}, \c{benchmarks}|
\
-[tests]: <name> [<version-constraint>]
-[examples]: <name> [<version-constraint>]
-[benchmarks]: <name> [<version-constraint>]
+[tests]: [*] <package> ['?' <enable-cond>] [<reflect-var>]
+[examples]: [*] <package> ['?' <enable-cond>] [<reflect-var>]
+[benchmarks]: [*] <package> ['?' <enable-cond>] [<reflect-var>]
+
+<package> = <name> [<version-constraint>]
+<enable-cond> = '(' <buildfile-eval-expr> ')'
+<reflect-var> = <config-var> '=' <value>
\
-Separate tests, examples, and benchmarks packages. These packages are built
-and tested by automated build bots together with the primary package (see the
-\c{bbot} documentation for details). This, in particular, implies that these
-packages must be available from the primary package's repository or its
-complement repositories, recursively. The recommended naming convention for
-these packages is the primary package name followed by \c{-tests},
-\c{-examples}, or \c{-benchmarks}, respectively. For example:
+Separate tests, examples, and benchmarks packages. If the value starts with
+\c{*}, then the primary package is a \i{build-time} dependency for the
+specified package. Otherwise it is \i{run-time}. See the
+\l{#manifest-package-depends \c{depends}} value for details on \i{build-time}
+dependencies.
+
+These packages are built and tested by automated build bots together with the
+primary package (see the \l{bbot \c{bbot} documentation} for details). This,
+in particular, implies that these packages must be available from the primary
+package's repository or its complement repositories, recursively. The
+recommended naming convention for these packages is the primary package name
+followed by \c{-tests}, \c{-examples}, or \c{-benchmarks}, respectively. For
+example:
\
name: hello
-tests : hello-tests
+tests: hello-tests
examples: hello-examples
\
@@ -1263,18 +2184,57 @@ it can also be specified in terms of the primary package's version (see the
tests: hello-tests ~$
\
-Note that normally the tests, etc., packages themselves do not have an
-explicit dependency on the primary package (in a sense, the primary package
-has a special dependency on them). They are also not built by automated build
-bots separately from their primary package but may have their own build
-constraints, for example, to be excluded from building on some platforms where
-the primary package is still built, for example:
+Note that normally the tests, etc., packages themselves (we'll call them all
+test packages for short) do not have an explicit dependency on the primary
+package (in a sense, the primary package has a special test dependency on
+them). They are also not built by automated build bots separately from their
+primary package but may have their own build constraints, for example, to be
+excluded from building on some platforms where the primary package is still
+built, for example:
\
name: hello-tests
builds: -windows
\
+Also note that a test package may potentially be used as a test dependency for
+multiple primary packages. In this case a primary package normally needs to
+reflect to the test package the fact that it is the one being tested. This can
+be achieved by setting the test package's configuration variable (see the
+\l{#manifest-package-depends \c{depends}} value for details on
+reflection). For example:
+
+\
+name: hello-foo
+tests: hello-tests config.hello_tests.test=hello-foo
+
+name: hello-bar
+tests: hello-tests config.hello_tests.test=hello-bar
+\
+
+If it is plausible that the test package may also be built explicitly, for
+example, to achieve a more complicated setup (test multiple main packages
+simultaneously, etc), then the test dependencies need to be made conditional
+in the primary packages so that the explicit configuration is preferred over
+the reflections (see the \l{#manifest-package-depends \c{depends}} value for
+details on conditional dependencies). For example:
+
+\
+name: hello-foo
+tests: hello-tests \
+? (!$defined(config.hello_tests.test)) config.hello_tests.test=hello-foo
+
+name: hello-bar
+tests: hello-tests \
+? (!$defined(config.hello_tests.test)) config.hello_tests.test=hello-bar
+\
+
+Note that in contrast to the \l{#manifest-package-depends \c{depends}} value,
+both the reflection and condition refer to the variables defined not by the
+package which specifies the test dependency (primary package), but the package
+such a dependency refers to (test package).
+
+
\h2#manifest-package-builds|\c{builds}|
\
@@ -1285,52 +2245,60 @@ builds: -windows
<class-term> = ('+'|'-'|'&')['!'](<class-name> | '(' <class-expr> ')')
\
-The package build configurations. They specify the build configuration classes
-the package should or should not be built for by automated build bots. For
-example:
+The common package build target configurations. They specify the target
+configuration classes the package should or should not be built for by
+automated build bots, unless overridden by a package configuration-specific
+value (see \l{#manifest-package-build-config \c{*-build-config}} for details).
+For example:
\
builds: -windows
\
-Build configurations can belong to multiple classes with their names and
-semantics varying between different build bot deployments. However, the
-pre-defined \c{none}, \c{default}, and \c{all} classes are always provided. If
-no \c{builds} value is specified in the package manifest, then the \c{default}
-class is assumed.
+Build target configurations can belong to multiple classes with their names
+and semantics varying between different build bot deployments. However, the
+pre-defined \c{none}, \c{default}, \c{all}, \c{host}, and \c{build2} classes
+are always provided. If no \c{builds} value is specified in the package
+manifest, then the \c{default} class is assumed.
-\N|A build configuration class can also derive from another class in which
+\N|A target configuration class can also derive from another class in which
case configurations that belong to the derived class are treated as also
belonging to the base class (or classes, recursively). See the Build
Configurations page of the build bot deployment for the list of available
-build configurations and their classes.|
+target configurations and their classes.|
The \c{builds} value consists of an optional underlying class set
(\c{<class-uset>}) followed by a class set expression (\c{<class-expr>}). The
underlying set is a space-separated list of class names that define the set of
-build configurations to consider. If not specified, then all the
+build target configurations to consider. If not specified, then all the
configurations belonging to the \c{default} class are assumed. The class set
expression can then be used to exclude certain configurations from this
initial set.
The class expression is a space-separated list of terms that are evaluated
from left to right. The first character of each term determines whether the
-build configuration that belong to its set are added to (\c{+}), subtracted
-from (\c{-}), or intersected with (\c{&}) the current set. If the second
-character in the term is \c{!}, then its set of configuration is inverted
-against the underlying set. The term itself can be either the class name or a
-parenthesized expression. Some examples:
-
-\
-builds: none ; None.
-builds: all ; All.
-builds: default legacy ; Default and legacy.
-builds: -windows ; Default except Windows.
-builds: all : -windows ; All except Windows.
-builds: all : &gcc ; All with GCC only.
-builds: all : &gcc-8+ ; All with GCC 8 and up only.
-builds: gcc : -optimized ; GCC without optimization.
-builds: gcc : &( +linux +macos ) ; GCC on Linux or Mac OS.
+build target configuration that belong to its set are added to (\c{+}),
+subtracted from (\c{-}), or intersected with (\c{&}) the current set. If the
+second character in the term is \c{!}, then its set of configuration is
+inverted against the underlying set. The term itself can be either the class
+name or a parenthesized expression. Some examples (based on the
+\l{https://ci.cppget.org/?build-configs cppget.org} deployment):
+
+\
+builds: none ; None.
+builds: all ; All (suitable for libraries).
+builds: all : &host ; All host (suitable for tools).
+builds: default ; All default.
+builds: default : &host ; Default host.
+builds: default legacy ; All default and legacy.
+builds: default legacy : &host ; Default and legacy host.
+builds: -windows ; Default except Windows.
+builds: all : -windows ; All except Windows.
+builds: all : -mobile ; All except mobile.
+builds: all : &gcc ; All with GCC only.
+builds: all : &gcc-8+ ; All with GCC 8 and up only.
+builds: all : &gcc -optimized ; All GCC without optimization.
+builds: all : &gcc &( +linux +macos ) ; All GCC on Linux and Mac OS.
\
Notice that the colon and parentheses must be separated with spaces from both
@@ -1348,8 +2316,13 @@ builds: -gcc ; GCC is not supported.
builds: -clang ; Clang is not supported.
\
+\
+builds: default
+builds: -( +macos &gcc) ; Homebrew GCC is not supported.
+\
+
\N|The \c{builds} value comments are used by the web interface (\c{brep}) to
-display the reason for the build configuration exclusion.|
+display the reason for the build target configuration exclusion.|
After evaluating all the \c{builds} values, the final configuration set can be
further fine-tuned using the \l{#manifest-package-include-exclude
@@ -1363,20 +2336,20 @@ further fine-tuned using the \l{#manifest-package-include-exclude
[build-exclude]: <config>[/<target>] [; <comment>]
\
-The package build inclusions and exclusions. The \c{build-include} and
+The common package build inclusions and exclusions. The \c{build-include} and
\c{build-exclude} values further reduce the configuration set produced by
evaluating the \l{#manifest-package-builds \c{builds}} values. The \i{config}
and \i{target} values are filesystem wildcard patterns which are matched
-against the build configuration names and target names (see the \c{bbot}
-documentation for details). In particular, the \c{*} wildcard matches zero or
-more characters within the name component while the \c{**} sequence matches
-across the components. Plus, wildcard-only pattern components match absent
-name components. For example:
+against the build target configuration names and target names (see the \l{bbot
+\c{bbot} documentation} for details). In particular, the \c{*} wildcard
+matches zero or more characters within the name component while the \c{**}
+sequence matches across the components. Plus, wildcard-only pattern components
+match absent name components. For example:
\
-build-exclude: windows** # matches windows_10-msvc_15
-build-exclude: macos*-gcc** # matches macos_10.13-gcc_8.1-O3
-build-exclude: linux-gcc*-* # matches linux-gcc_8.1 and linux-gcc_8.1-O3
+build-exclude: windows** # matches windows_10-msvc_15
+build-exclude: macos*-gcc** # matches macos_10.13-gcc_8.1-O3
+build-exclude: linux-gcc*-* # matches linux-gcc_8.1 and linux-gcc_8.1-O3
\
The exclusion and inclusion patterns are applied in the order specified with
@@ -1400,7 +2373,397 @@ build-exclude: ** ; Only supported on Linux.
\
Note that the comment of the matching exclusion is used by the web interface
-(\c{brep}) to display the reason for the build configuration exclusion.
+(\c{brep}) to display the reason for the build target configuration exclusion.
+
+
+\h2#manifest-package-build-auxiliary|\c{build-auxiliary}|
+
+\
+[build-auxiliary]: <config> [; <comment>]
+[build-auxiliary-<name>]: <config> [; <comment>]
+\
+
+The common package build auxiliary configurations. The \c{build-auxiliary}
+values can be used to specify auxiliary configurations that provide additional
+components which are required for building or testing a package and that are
+impossible or impractical to provide as part of the build configuration
+itself. For example, a package may need access to a suitably configured
+database, such as PostgreSQL, in order to run its tests. Currently no more
+than \c{9} auxiliary configurations can be specified.
+
+The \i{config} value is a filesystem wildcard patterns which is matched
+against the auxiliary configuration names (which are in turn derived from
+auxiliary machine names; see the \l{bbot \c{bbot} documentation} for
+details). In particular, the \c{*} wildcard matches zero or more characters
+within the name component while the \c{**} sequence matches across the
+components. Plus, wildcard-only pattern components match absent name
+components. For example:
+
+\
+build-auxiliary: linux_debian_12-postgresql_16
+build-auxiliary: linux_*-postgresql_*
+build-auxiliary: *-postgresql**
+\
+
+\N|If multiple auxiliary configurations match the specified pattern, then
+one is picked at random for every build.|
+
+If multiple auxiliary configurations are required, then they must be given
+distinct names with the \i{name} component. For example:
+
+\
+build-auxiliary-pgsql: *-postgresql_*
+build-auxiliary-mysql: *-mysql_*
+\
+
+Another example:
+
+\
+build-auxiliary-primary: *-postgresql_*
+build-auxiliary-secondary: *-postgresql_*
+\
+
+Auxiliary machines communicate information about their setup to the build
+machine using environment variables (see
+\l{bbot#arch-task-auxiliary-environment \c{auxiliary-environment}} for
+details). For example, an auxiliary machine that provides a test PostgreSQL
+database may need to communicate the host IP address and port on which it can
+be accessed as well as the user to login as and the database name to use. For
+example:
+
+\
+DATABASE_HOST=192.168.0.1
+DATABASE_PORT=5432
+DATABASE_USER=test
+DATABASE_NAME=test
+\
+
+If the auxiliary configuration is specified as \c{build-auxiliary-<name>},
+then capitalized and sanitized \i{name}_ is used as a prefix in the
+environment variables corresponding to the machine. For example, for the
+auxiliary configurations specified as:
+
+\
+build-auxiliary-pg-sql: *-postgresql_*
+build-auxiliary-my-sql: *-mysql_*
+\
+
+The environment variables could be:
+
+\
+PG_SQL_DATABASE_HOST=192.168.0.1
+PG_SQL_DATABASE_PORT=5432
+...
+
+MY_SQL_DATABASE_HOST=192.168.0.2
+MY_SQL_DATABASE_PORT=3306
+...
+\
+
+The auxiliary environment variables are in effect for the entire build. The
+recommended place to propagate them to the package configuration is the
+\c{*-build-config} value. For example:
+
+\
+build-auxiliary: *-postgresql_*
+default-build-config:
+\\
+config.hello.pgsql_host=$getenv(DATABASE_HOST)
+config.hello.pgsql_port=$getenv(DATABASE_PORT)
+...
+\\
+\
+
+
+\h2#manifest-package-build-config|\c{*-build-config}|
+
+\
+[*-build-config]: <args> [; <comment>]
+
+<args> = [[[+|-]<prefix>:](<option>|<config-var>)]* \\
+ [(+|-)<prefix>:]* \\
+ [<dependency-spec>]* \\
+ [<package-specific-vars>]*
+
+<dependency-spec> = [{ <config-var> [<config-var>]* }+] <dependency>
+<dependency> = (?[sys:]|sys:)<name>[<version-spec>]
+<version-spec> = /<version> | <version-constraint>
+<package-specific-vars> = { <config-var> [<config-var>]* }+ <name>
+
+[*-builds]: <class-expr> [; <comment>]
+[*-build-include]: <config>[/<target>] [; <comment>]
+[*-build-exclude]: <config>[/<target>] [; <comment>]
+[*-build-auxiliary]: <config> [; <comment>]
+[*-build-auxiliary-<name>]: <config> [; <comment>]
+
+[*-build-email]: <email> [; <comment>]
+[*-build-warning-email]: <email> [; <comment>]
+[*-build-error-email]: <email> [; <comment>]
+\
+
+The package build configurations where the substring matched by \c{*} in
+\c{*-build-config} denotes the configuration name. If specified, then the
+package is built in these configurations by automated build bots in addition
+to the default configuration (which is called \c{default}).
+
+The \c{*-build-config} values contain whitespace separated lists of
+potentially double/single-quoted package configuration arguments. The global
+(as opposed to package-specific) options and variables can be prefixed with
+the build bot worker script step ids or a leading portion thereof to restrict
+it to a specific step, operation, phase, or tool (see \l{bbot#arch-worker
+\cb{bbot} worker step ids}). The prefix can optionally begin with the \c{+} or
+\c{-} character (in this case the argument can be omitted) to enable or
+disable the respective step (see the list of \l{bbot#arch-controller worker
+steps} which can be enabled or disabled). Unprefixed global options,
+variables, and dependencies are passed to the \l{bpkg-pkg-build(1)} command at
+the \c{bpkg.configure.build} step. The package-specific configuration
+variables for this and/or the separate test packages are passed to
+\l{bpkg-pkg-build(1)} at the \c{bpkg.configure.build} and
+\c{bpkg.test-separate-installed.configure.build} steps. For example:
+
+\
+network-build-config: config.libfoo.network=true; Enable networking API.
+
+cache-build-config:
+\\
+config.libfoo.cache=true
+config.libfoo.buffer=4096
+;
+Enable caching.
+\\
+
+libbar-network-build-config:
+\\
+{ config.libbar.network=true }+ ?libbar
+;
+Enable networking API in libbar.
+\\
+
+older-libz-build-config: \"?libz ^1.0.0\"; Test with older libz version.
+
+sys-build-config:
+\\
+?sys:libbar ?sys:libz
+;
+Test with system dependencies.
+\\
+
+bindist-build-config:
+\\
++bpkg.bindist.debian:--recursive=full
+-bbot.sys-install:
++bbot.bindist.upload:
+;
+Generate and upload binary distribution package but don't test its installation.
+\\
+
+load-tests-build-config:
+\\
+{ config.libfoo_tests.load=true }+ libfoo-tests
+;
+Enable load testing.
+\\
+\
+
+Note that options with values can only be specified using the single argument
+notation, for example, \c{--verbose=4}.
+
+The package build configuration can also override the common build target
+configurations set (specified with \l{#manifest-package-builds \c{builds}} and
+\l{#manifest-package-include-exclude \c{build-{include, exclude\}}}) by
+specifying the matching \c{*-builds} and/or \c{*-build-{include, exclude\}}
+values. For example:
+
+\
+network-builds: linux; Only supported on Linux.
+network-build-config: config.libfoo.network=true; Enable networking API.
+\
+
+Note that the common build target configurations set is overridden
+hierarchically meaning that the \c{*-build-{include, exclude\}} overrides
+don't discard the common \c{builds} values.
+
+The package build configuration can also override the common build
+notification email addresses (specified with \l{#manifest-package-build-email
+\c{build-email}}, \l{#manifest-package-warning-email \c{build-warning-email}},
+and \l{#manifest-package-error-email \c{build-error-email}}) by specifying the
+matching \c{*-build-email} and/or \c{*-build-{warning, error\}-email} values.
+For example:
+
+\
+bindist-build-config:
+\\
++bpkg.bindist.debian:--recursive=full
++bbot.bindist.upload:
+;
+Generate and upload binary distribution package.
+\\
+bindist-build-error-email: builds@example.org
+\
+
+Note that to disable all the build notification emails for a specific package
+build configuration, specify the empty \c{*-build-email} value. For example:
+
+\
+sys-build-config: ?sys:libz; Test with system dependencies.
+sys-build-email:
+\
+
+The default configuration should normally build the package with no
+configuration arguments and for the common target build configurations
+set. While not recommended, this can be overridden by using the special
+\c{default} configuration name. For example:
+
+\
+default-build-config: config.libfoo.cache=true
+\
+
+
+\h2#manifest-package-build-file|\c{build-file}|
+
+\
+[build-file]: <path>
+
+[bootstrap-build]: <text>
+[root-build]: <text>
+[*-build]: <text>
+
+[bootstrap-build2]: <text>
+[root-build2]: <text>
+[*-build2]: <text>
+\
+
+The contents of the mandatory \c{bootstrap.build} file, optional
+\c{root.build} file, and additional files included by \c{root.build}, or their
+alternative naming scheme variants (\c{bootstrap.build2}, etc). Packages with
+the alternative naming scheme should use the \c{*-build2} values instead of
+\c{*-build}. See \l{#package-skeleton Package Build System Skeleton} for
+background.
+
+These files must reside in the package's \c{build/} subdirectory and have the
+\c{.build} extension (or their alternative names). They can be provided either
+inline as text fragments or, for additional files, by referring to them with a
+path relative to this subdirectory, but not both. The \c{*-build}/\c{*-build2}
+manifest value name prefixes must be the file paths relative to this
+subdirectory with the extension stripped.
+
+As an example, the following values correspond to the
+\c{build/config/common.build} file:
+
+\
+build-file: config/common.build
+
+config/common-build:
+\\
+config [bool] config.libhello.fancy ?= false
+\\
+\
+
+And the following values correspond to the \c{build2/config/common.build2}
+file in a package with the alternative naming scheme:
+
+\
+build-file: config/common.build2
+
+config/common-build2:
+\\
+config [bool] config.libhello.fancy ?= false
+\\
+\
+
+If unspecified, then the package's \c{bootstrap.build}, \c{root.build}, and
+\c{build/config/*.build} files (or their alternative names) will be
+automatically added, for example, when the \l{#manifest-package-list-pkg
+package list manifest} is created.
+
+
+\h2#manifest-package-distribution|\c{*-{name, version, to-downstream-version\}}|
+
+\
+[<distribution>-name]: <name> [<name>...]
+[<distribution>-version]: <string>
+[<distribution>-to-downstream-version]: <regex>
+
+<distribution> = <name>[_<version>]
+<regex> = /<pattern>/<replacement>/
+\
+
+The binary distribution package name and version mapping. The \c{-name} value
+specifies the distribution package(s) this \c{bpkg} package maps to. If
+unspecified, then appropriate name(s) are automatically derived from the
+\c{bpkg} package name (\l{#manifest-package-name \c{name}}). Similarly, the
+\c{-version} value specifies the distribution package version. If unspecified,
+then the \c{upstream-version} value is used if specified and the \c{bpkg}
+version (\l{#manifest-package-version \c{version}}) otherwise. While the
+\c{-to-downstream-version} values specify the reverse mapping, that is, from
+the distribution version to the \c{bpkg} version. If unspecified or none
+match, then the appropriate part of the distribution version is used. For
+example:
+
+\
+name: libssl
+version 1.1.1+18
+debian-name: libssl1.1 libssl-dev
+debian-version: 1.1.1n
+debian-to-downstream-version: /1\.1\.1[a-z]/1.1.1/
+debian-to-downstream-version: /([3-9])\.([0-9]+)\.([0-9]+)/\1.\2.\3/
+\
+
+If \c{upstream-version} is specified but the the distribution package version
+should be the same as the \c{bpkg} package version, then the special \c{$}
+\c{-version} value can be used. For example:
+
+\
+debian-version: $
+\
+
+The \c{<distribution>} name prefix consists of the distribution name followed
+by the optional distribution version. If the version is omitted, then the
+value applies to all versions. Some examples of distribution names and
+versions:
+
+\
+debian
+debian_10
+ubuntu_16.04
+fedora_32
+rhel_8.5
+freebsd_12.1
+windows_10
+macos_10
+macos_10.15
+macos_12
+\
+
+Note also that some distributions are like others (for example, \c{ubuntu} is
+like \c{debian}) and the corresponding \"base\" distribution values are
+considered if no \"derived\" values are specified.
+
+The \c{-name} value is used both during package consumption as a system
+package and production with the \l{bpkg-pkg-bindist(1)} command. During
+production, if multiple mappings match, then the value with the highest
+matching distribution version from the package \c{manifest} with the latest
+version is used. If it's necessary to use different names for the generated
+binary packages (called \"non-native packages\" in contrast to \"native
+packages\" that come from the distribution), the special \c{0} distribution
+version can be used to specify such a mapping. For example:
+
+\
+name: libsqlite3
+debian_9-name: libsqlite3-0 libsqlite3-dev
+debian_0-name: libsqlite3 libsqlite3-dev
+\
+
+Note that this special non-native mapping is ignored during consumption and a
+deviation in the package names that it introduces may make it impossible to
+use native and non-native binary packages interchangeably, for example, to
+satisfy dependencies.
+
+
+The exact format of the \c{-name} and \c{-version} values and the distribution
+version part that is matched against the \c{-to-downstream-version} pattern
+are distribution-specific. For details, see \l{#bindist-mapping-debian Debian
+Package Mapping} and \l{#bindist-mapping-fedora Fedora Package Mapping}.
\h#manifest-package-list-pkg|Package List Manifest for \cb{pkg} Repositories|
@@ -1419,7 +2782,8 @@ After the list manifest comes a (potentially empty) sequence of package
manifests. These manifests shall not contain any \c{*-file} or incomplete
\l{#manifest-package-depends \c{depends}} values (such values should be
converted to their inline versions or completed, respectively) but must
-contain the following additional (to package manifest) values:
+contain the \c{*-build} values (unless the corresponding files are absent) and
+the following additional (to package manifest) values:
\
location: <path>
@@ -1741,12 +3105,12 @@ The repository fragment id this repository belongs to.
terminology and semantics.
The repository list manifest (the \c{repositories.manifest} file found in the
-repository root directory) describes the repository. It is a sequence of
-repository manifests consisting of the base repository manifest (that is, the
-manifest for the repository that is being described) as well as manifests for
-its prerequisite and complement repositories. The individual repository
-manifests can appear in any order and the base repository manifest can be
-omitted.
+repository root directory) describes the repository. It starts with an
+optional header manifest optionally followed by a sequence of repository
+manifests consisting of the base repository manifest (that is, the manifest
+for the repository that is being described) as well as manifests for its
+prerequisite and complement repositories. The individual repository manifests
+can appear in any order and the base repository manifest can be omitted.
The \c{fragment} values can only be present in a merged
\c{repositories.manifest} file for a multi-fragment repository.
@@ -1758,6 +3122,8 @@ repository could look like this:
# math/testing
#
: 1
+min-bpkg-version: 0.14.0
+:
email: math-pkg@example.org
summary: Math package repository
:
@@ -1783,6 +3149,37 @@ Then the completement's location would be:
https://pkg.example.org/1/math/stable
\
+The header manifest synopsis is presented next followed by the detailed
+description of each value in subsequent sections.
+
+\
+[min-bpkg-version]: <ver>
+[compression]: <compressions>
+\
+
+\h2#manifest-repository-list-header-min-bpkg-version|\c{min-bpkg-version}|
+
+\
+[min-bpkg-version]: <ver>
+\
+
+The earliest version of \cb{bpkg} that is compatible with this repository.
+Note that if specified, it must be the first value in the header.
+
+
+\h2#manifest-repository-list-header-compression|\c{compression}|
+
+\
+[compression]: <compressions>
+
+<compressions> = <compression> [ <compression>]*
+\
+
+Available compressed variants of the \c{packages.manifest} file. The format is
+a space-separated list of the compression methods. The \c{none} method means
+no compression. Absent \c{compression} value is equivalent to specifying it
+with the \c{none} value.
+
\h#manifest-signature-pkg|Signature Manifest for \cb{pkg} Repositories|
@@ -1822,6 +3219,469 @@ signature: <sig>
The signature of the \c{packages.manifest} file. It should be calculated by
encrypting the above \c{sha256sum} value with the repository certificate's
private key and then \c{base64}-encoding the result.
+
+
+\h1#bindist-mapping|Binary Distribution Package Mapping|
+
+
+\h#bindist-mapping-debian|Debian Package Mapping|
+
+This section describes the distribution package mapping for Debian and
+alike (Ubuntu, etc).
+
+\h2#bindist-mapping-debian-consume|Debian Package Mapping for Consumption|
+
+A library in Debian is normally split up into several packages: the shared
+library package (e.g., \c{libfoo1} where \c{1} is the ABI version), the
+development files package (e.g., \c{libfoo-dev}), the documentation files
+package (e.g., \c{libfoo-doc}), the debug symbols package (e.g.,
+\c{libfoo1-dbg}), and the architecture-independent files (e.g.,
+\c{libfoo1-common}). All the packages except \c{-dev} are optional and there
+is quite a bit of variability. Here are a few examples:
+
+\
+libsqlite3-0 libsqlite3-dev
+
+libssl1.1 libssl-dev libssl-doc
+libssl3 libssl-dev libssl-doc
+
+libcurl4 libcurl4-openssl-dev libcurl4-doc
+libcurl3-gnutls libcurl4-gnutls-dev libcurl4-doc
+\
+
+Note that while most library package names in Debian start with \c{lib} (per
+the policy), there are exceptions (e.g., \c{zlib1g} \c{zlib1g-dev}). The
+header-only library package names may or may not start with \c{lib} and end
+with \c{-dev} (e.g., \c{libeigen3-dev}, \c{rapidjson-dev}, \c{catch2}). Also
+note that manual \c{-dbg} packages are obsolete in favor of automatic
+\c{-dbgsym} packages from Debian 9.
+
+For executable packages there is normally no \c{-dev} packages but \c{-dbg},
+\c{-doc}, and \c{-common} are plausible.
+
+Based on that, our approach when trying to automatically map a \c{bpkg}
+library package name to Debian package names is to go for the \c{-dev} package
+first and figure out the shared library package from that based on the fact
+that the \c{-dev} package should have the \c{==} dependency on the shared
+library package with the same version and its name should normally start with
+the \c{-dev} package's stem.
+
+The format of the \c{debian-name} (or alike) manifest value is a
+comma-separated list of one or more package groups:
+
+\
+<package-group> [, <package-group>...]
+\
+
+Where each \c{<package-group>} is the space-separated list of one or more
+package names:
+
+\
+<package-name> [ <package-name>...]
+\
+
+All the packages in the group should be \"package components\" (for the lack
+of a better term) of the same \"logical package\", such as \c{-dev}, \c{-doc},
+\c{-common} packages. They normally have the same version.
+
+The first group is called the main group and the first package in the
+group is called the main package. Note that all the groups are consumed
+(installed) but only the main group is produced (packaged).
+
+We allow/recommend specifying the \c{-dev} package instead of the main package
+for libraries (see \l{#manifest-package-type-language \c{type}} for details),
+seeing that we are capable of detecting the main package automatically (see
+above). If the library name happens to end with \c{-dev} (which poses an
+ambiguity), then the \c{-dev} package should be specified explicitly as the
+second package to disambiguate this situation.
+
+The Debian package version has the \c{[<epoch>:]<upstream>[-<revision>]} form
+(see \cb{deb-version(5)} for details). If no explicit mapping to the \c{bpkg}
+version is specified with the \c{debian-to-downstream-version} (or alike)
+manifest values or none match, then we fallback to using the \c{<upstream>}
+part as the \c{bpkg} version. If explicit mapping is specified, then we match
+it against the \c{[<epoch>:]<upstream>} parts ignoring \c{<revision>}.
+
+
+\h2#bindist-mapping-debian-produce|Debian Package Mapping for Production|
+
+The same \c{debian-name} (or alike) manifest values as used for consumption
+are also used to derive the package names for production except here we have
+the option to specify alternative non-native package names using the special
+\c{debian_0-name} (or alike) value. If only the \c{-dev} package is specified,
+then the main package name is derived from that by removing the \c{-dev}
+suffix. Note that regardless of whether the main package name is specified or
+not, the \l{bpkg-pkg-bindist(1)} command may omit generating the main package
+for a binless library.
+
+The generated binary package version can be specified with the
+\c{debian-version} (or alike) manifest value. If it's not specified, then the
+\c{upstream-version} is used if specified. Otherwise, the \c{bpkg} version
+is translated to the Debian version as described next.
+
+To recap, a Debian package version has the following form:
+
+\
+[<epoch>:]<upstream>[-<revision>]
+\
+
+For details on the ordering semantics, see the \c{Version} \c{control} file
+field documentation in the Debian Policy Manual. While overall unsurprising,
+one notable exception is \c{~}, which sorts before anything else and is
+commonly used for upstream pre-releases. For example, \c{1.0~beta1~svn1245}
+sorts earlier than \c{1.0~beta1}, which sorts earlier than \c{1.0}.
+
+There are also various special version conventions (such as all the revision
+components in \c{1.4-5+deb10u1~bpo9u1}) but they all appear to express
+relationships between native packages and/or their upstream and thus do not
+apply to our case.
+
+To recap, the \c{bpkg} version has the following form (see
+\l{#package-version Package Version} for details):
+
+\
+[+<epoch>-]<upstream>[-<prerel>][+<revision>]
+\
+
+Let's start with the case where neither distribution (\c{debian-version}) nor
+upstream version (\c{upstream-version}) is specified and we need to derive
+everything from the \c{bpkg} version (what follows is as much description as
+rationale).
+
+\dl|
+
+\li|\c{<epoch>}
+
+ On one hand, if we keep our (as in, \c{bpkg}) epoch, it won't necessarily
+ match Debian's native package epoch. But on the other it will allow our
+ binary packages from different epochs to co-exist. Seeing that this can be
+ easily overridden with a custom distribution version (see below), we keep
+ it.
+
+ Note that while the Debian start/default epoch is 0, ours is 1 (we use the 0
+ epoch for stub packages). So we shift this value range.|
+
+\li|\c{<upstream>[-<prerel>]}
+
+ Our upstream version maps naturally to Debian's. That is, our upstream
+ version format/semantics is a subset of Debian's.
+
+ If this is a pre-release, then we could fail (that is, don't allow
+ pre-releases) but then we won't be able to test on pre-release packages, for
+ example, to make sure the name mapping is correct. Plus sometimes it's
+ useful to publish pre-releases. We could ignore it, but then such packages
+ will be indistinguishable from each other and the final release, which is
+ not ideal. On the other hand, Debian has the mechanism (\c{~}) which is
+ essentially meant for this, so we use it. We will use \c{<prerel>} as is
+ since its format is the same as upstream and thus should map naturally.|
+
+\li|\c{<revision>}
+
+ Similar to epoch, our revision won't necessarily match Debian's native
+ package revision. But on the other hand it will allow us to establish a
+ correspondence between source and binary packages. Plus, upgrades between
+ binary package revisions will be handled naturally. Seeing that we allow
+ overriding the revision with a custom distribution version (see below),
+ we keep it.
+
+ Note also that both Debian and our revision start/default is 0. However, it
+ is Debian's convention to start revision from 1. But it doesn't seem worth
+ it for us to do any shifting here and so we will use our revision as is.
+
+ Another related question is whether we should also include some metadata
+ that identifies the distribution and its version that this package is
+ for. The strongest precedent here is probably Ubuntu's PPA. While there
+ doesn't appear to be a consistent approach, one can often see versions like
+ these:
+
+ \
+ 2.1.0-1~ppa0~ubuntu14.04.1,
+ 1.4-5-1.2.1~ubuntu20.04.1~ppa1
+ 22.12.2-0ubuntu1~ubuntu23.04~ppa1
+ \
+
+ Seeing that this is a non-sortable component (what in semver would be called
+ \"build metadata\"), using \c{~} is probably not the worst choice.
+
+ So we follow this lead and add the \c{~<ID><VERSION_ID>} \c{os-release(5)}
+ component to revision. Note that this also means we will have to make the 0
+ revision explicit. For example:
+
+ \
+ 1.2.3-1~debian10
+ 1.2.3-0~ubuntu20.04
+ \
+
+||
+
+The next case to consider is when we have the upstream version
+(\c{upstream-version} manifest value). After some rumination it feels correct
+to use it in place of the \c{<epoch>-<upstream>} components in the above
+mapping (upstream version itself cannot have epoch). In other words, we will
+add the pre-release and revision components from the \c{bpkg} version. If this
+is not the desired semantics, then it can always be overridden with the
+distribution version (see below).
+
+Finally, we have the distribution version. The Debian \c{<epoch>} and
+\c{<upstream>} components are straightforward: they should be specified by the
+distribution version as required. This leaves pre-release and revision. It
+feels like in most cases we would want these copied over from the \c{bpkg}
+version automatically \- it's too tedious and error-prone to maintain them
+manually. However, we want the user to have the full override ability. So
+instead, if empty revision is specified, as in \c{1.2.3-}, then we
+automatically add the \c{bpkg} revision. Similarly, if empty pre-release is
+specified, as in \c{1.2.3~}, then we add the \c{bpkg} pre-release. To add both
+automatically, we would specify \c{1.2.3~-} (other combinations are
+\c{1.2.3~b.1-} and \c{1.2.3~-1}).
+
+Note also that per the Debian version specification, if upstream contains
+\c{:} and/or \c{-}, then epoch and/or revision must be specified explicitly,
+respectively. Note that the \c{bpkg} upstream version may not contain either.
+
+
+\h#bindist-mapping-fedora|Fedora Package Mapping|
+
+This section describes the distribution package mapping for Fedora and alike
+(Red Hat Enterprise Linux, Centos, etc).
+
+\h2#bindist-mapping-fedora-consume|Fedora Package Mapping for Consumption|
+
+A library in Fedora is normally split up into several packages: the shared
+library package (e.g., \c{libfoo}), the development files package (e.g.,
+\c{libfoo-devel}), the static library package (e.g., \c{libfoo-static}; may
+also be placed into the \c{-devel} package), the documentation files package
+(e.g., \c{libfoo-doc}), the debug symbols and source files packages (e.g.,
+\c{libfoo-debuginfo} and \c{libfoo-debugsource}), and the common or
+architecture-independent files (e.g., \c{libfoo-common}). All the packages
+except \c{-devel} are optional and there is quite a bit of variability. In
+particular, the \c{lib} prefix in \c{libfoo} is not a requirement (unlike in
+Debian) and is normally present only if upstream name has it (see some
+examples below).
+
+For application packages there is normally no \c{-devel} packages but
+\c{-debug*}, \c{-doc}, and \c{-common} are plausible.
+
+For mixed packages which include both applications and libraries, the shared
+library package normally has the \c{-libs} suffix (e.g., \c{foo-libs}).
+
+A package name may also include an upstream version based suffix if
+multiple versions of the package can be installed simultaneously (e.g.,
+\c{libfoo1.1} \c{libfoo1.1-devel}, \c{libfoo2} \c{libfoo2-devel}).
+
+Terminology-wise, the term \"base package\" (sometime also \"main package\")
+normally refers to either the application or shared library package (as
+decided by the package maintainer in the spec file) with the suffixed packages
+(\c{-devel}, \c{-doc}, etc) called \"subpackages\".
+
+Here are a few examples:
+
+\
+libpq libpq-devel
+
+zlib zlib-devel zlib-static
+
+catch-devel
+
+eigen3-devel eigen3-doc
+
+xerces-c xerces-c-devel xerces-c-doc
+
+libsigc++20 libsigc++20-devel libsigc++20-doc
+libsigc++30 libsigc++30-devel libsigc++30-doc
+
+icu libicu libicu-devel libicu-doc
+
+openssl openssl-libs openssl-devel openssl-static
+openssl1.1 openssl1.1-devel
+
+curl libcurl libcurl-devel
+
+sqlite sqlite-libs sqlite-devel sqlite-doc
+
+community-mysql community-mysql-libs community-mysql-devel
+community-mysql-common community-mysql-server
+
+ncurses ncurses-libs ncurses-c++-libs ncurses-devel ncurses-static
+
+keyutils keyutils-libs keyutils-libs-devel
+\
+
+Note that while we support arbitrary \c{-debug*} sub-package names for
+consumption, we only generate \c{<main-package>-debug*}.
+
+Based on that, our approach when trying to automatically map a \c{bpkg}
+library package name to Fedora package names is to go for the \c{-devel}
+package first and figure out the shared library package from that based on the
+fact that the \c{-devel} package should have the \c{==} dependency on the
+shared library package with the same version and its name should normally
+start with the \c{-devel} package's stem and potentially end with the
+\c{-libs} suffix. If failed to find the \c{-devel} package, we re-try but now
+using the \c{bpkg} project name instead of the package name (see, for example,
+\c{openssl}, \c{sqlite}).
+
+The format of the \c{fedora-name} (or alike) manifest value value is a
+comma-separated list of one or more package groups:
+
+\
+<package-group> [, <package-group>...]
+\
+
+Where each \c{<package-group>} is the space-separated list of one or more
+package names:
+
+\
+<package-name> [ <package-name>...]
+\
+
+All the packages in the group should belong to the same \"logical package\",
+such as \c{-devel}, \c{-doc}, \c{-common} packages. They normally have the
+same version.
+
+The first group is called the main group and the first package in the
+group is called the main package. Note that all the groups are consumed
+(installed) but only the main group is produced (packaged).
+
+(Note that above we use the term \"logical package\" instead of \"base
+package\" since the main package may not be the base package, for example
+being the \c{-libs} subpackage.)
+
+We allow/recommend specifying the \c{-devel} package instead of the main
+package for libraries (see \l{#manifest-package-type-language \c{type}} for
+details), seeing that we are capable of detecting the main package
+automatically (see above). If the library name happens to end with \c{-devel}
+(which poses an ambiguity), then the \c{-devel} package should be specified
+explicitly as the second package to disambiguate this situation.
+
+The Fedora package version has the \c{[<epoch>:]<version>-<release>} form (see
+Fedora Package Versioning Guidelines for details). If no explicit mapping
+to the \c{bpkg} version is specified with the \c{fedora-to-downstream-version}
+(or alike) manifest values or none match, then we fallback to using the
+\c{<version>} part as the \c{bpkg} version. If explicit mapping is specified,
+then we match it against the \c{[<epoch>:]<version>} parts ignoring
+\c{<release>}.
+
+
+\h2#bindist-mapping-fedora-produce|Fedora Package Mapping for Production|
+
+The same \c{fedora-name} (or alike) manifest values as used for consumption
+are also used to derive the package names for production except here we have
+the option to specify alternative non-native package names using the special
+\c{fedora_0-name} (or alike) value. If only the \c{-devel} package is
+specified, then the main package name is derived from that by removing the
+\c{-devel} suffix. Note that regardless of whether the main package name is
+specified or not, the \l{bpkg-pkg-bindist(1)} command may omit generating the
+main package for a binless library.
+
+The generated binary package version can be specified with the
+\c{fedora-version} (or alike) manifest value. If it's not specified, then the
+\c{upstream-version} is used if specified. Otherwise, the \c{bpkg} version
+is translated to the Fedora version as described next.
+
+To recap, a Fedora package version has the following form:
+
+\
+[<epoch>:]<version>-<release>
+\
+
+Where <release> has the following form:
+
+\
+<release-number>[.<distribution-tag>]
+\
+
+For details on the ordering semantics, see the Fedora Versioning Guidelines.
+While overall unsurprising, the only notable exceptions are \c{~}, which sorts
+before anything else and is commonly used for upstream pre-releases, and
+\c{^}, which sorts after anything else and is supposedly used for upstream
+post-release snapshots. For example, \c{0.1.0~alpha.1-1.fc35} sorts earlier
+than \c{0.1.0-1.fc35}.
+
+To recap, the bpkg version has the following form (see
+\l{#package-version Package Version} for details):
+
+\
+[+<epoch>-]<upstream>[-<prerel>][+<revision>]
+\
+
+Let's start with the case where neither distribution (\c{fedora-version}) nor
+upstream version (\c{upstream-version}) is specified and we need to derive
+everything from the \c{bpkg} version (what follows is as much description as
+rationale).
+
+\dl|
+
+\li|\c{<epoch>}
+
+ On one hand, if we keep our (as in, \c{bpkg}) epoch, it won't necessarily
+ match Fedora's native package epoch. But on the other it will allow our
+ binary packages from different epochs to co-exist. Seeing that this can be
+ easily overridden with a custom distribution version (see below), we keep
+ it.
+
+ Note that while the Fedora start/default epoch is 0, ours is 1 (we use the 0
+ epoch for stub packages). So we shift this value range.|
+
+\li|\c{<upstream>[-<prerel>]}
+
+ Our upstream version maps naturally to Fedora's \c{<version>}. That is, our
+ upstream version format/semantics is a subset of Fedora's \c{<version>}.
+
+ If this is a pre-release, then we could fail (that is, don't allow
+ pre-releases) but then we won't be able to test on pre-release packages, for
+ example, to make sure the name mapping is correct. Plus sometimes it's
+ useful to publish pre-releases. We could ignore it, but then such packages
+ will be indistinguishable from each other and the final release, which is
+ not ideal. On the other hand, Fedora has the mechanism (\c{~}) which is
+ essentially meant for this, so we use it. We will use \c{<prerel>} as is
+ since its format is the same as \c{<upstream>} and thus should map
+ naturally.|
+
+\li|\c{<revision>}
+
+ Similar to epoch, our revision won't necessarily match Fedora's native
+ package release number. But on the other hand it will allow us to establish a
+ correspondence between source and binary packages. Plus, upgrades between
+ binary package releases will be handled naturally. Also note that the
+ revision is mandatory in Fedora. Seeing that we allow overriding the
+ releases with a custom distribution version (see below), we use it.
+
+ Note that the Fedora start release number is 1 and our revision is 0. So we
+ shift this value range.
+
+ Also we automatically add the trailing distribution tag (\c{.fc35},
+ \c{.el8}, etc) to the Fedora release. The tag is deduced automatically
+ unless overridden on the command line (see \l{bpkg-pkg-bindist(1)} command
+ for details).
+
+||
+
+The next case to consider is when we have the upstream version
+(\c{upstream-version} manifest value). After some rumination it feels correct
+to use it in place of the \c{<epoch>-<upstream>} components in the above
+mapping (upstream version itself cannot have epoch). In other words, we will
+add the pre-release and revision components from the \c{bpkg} version. If this
+is not the desired semantics, then it can always be overridden with the
+distribution version (see below).
+
+Finally, we have the distribution version. The Fedora \c{<epoch>} and
+\c{<version>} components are straightforward: they should be specified by the
+distribution version as required. This leaves pre-release and release. It
+feels like in most cases we would want these copied over from the \c{bpkg}
+version automatically \- it's too tedious and error-prone to maintain them
+manually. However, we want the user to have the full override ability. So
+instead, if empty release is specified, as in \c{1.2.3-}, then we
+automatically add the \c{bpkg} revision. Similarly, if empty pre-release is
+specified, as in \c{1.2.3~}, then we add the \c{bpkg} pre-release. To add both
+automatically, we would specify \c{1.2.3~-} (other combinations are
+\c{1.2.3~b.1-} and \c{1.2.3~-1}). If specified, the release must not contain
+the distribution tag, since it is deduced automatically unless overridden on
+the command line (see \l{bpkg-pkg-bindist(1)} command for details). Also,
+since the release component is mandatory in Fedora, if it is omitted together
+with the separating dash we will add the release 1 automatically.
+
+Note also that per the RPM spec file format documentation neither version nor
+release components may contain \c{:} or \c{-}. Note that the \c{bpkg} upstream
+version may not contain either.
"
//@@ TODO items (grep).
diff --git a/doc/style b/doc/style
-Subproject 10f31a8bea8e5817fccf01978009c1ecaf3eabf
+Subproject b72eb624d13b1628e27e9f6c0b3c80853e8e015
diff --git a/manifest b/manifest
index e42542b..e826689 100644
--- a/manifest
+++ b/manifest
@@ -1,6 +1,6 @@
: 1
name: bpkg
-version: 0.14.0-a.0.z
+version: 0.17.0-a.0.z
project: build2
summary: build2 package dependency manager
license: MIT
@@ -12,13 +12,15 @@ doc-url: https://build2.org/doc.xhtml
src-url: https://git.build2.org/cgit/bpkg/tree/
email: users@build2.org
build-warning-email: builds@build2.org
-builds: all
+builds: all : &host
requires: c++14
-depends: * build2 >= 0.13.0
-depends: * bpkg >= 0.13.0
-# @@ Should probably become conditional dependency.
-requires: ? cli ; Only required if changing .cli files.
-depends: libodb [2.5.0-b.20.1 2.5.0-b.21)
-depends: libodb-sqlite [2.5.0-b.20.1 2.5.0-b.21)
-depends: libbutl [0.14.0-a.0.1 0.14.0-a.1)
-depends: libbpkg [0.14.0-a.0.1 0.14.0-a.1)
+depends: * build2 >= 0.16.0-
+depends: * bpkg >= 0.16.0-
+# @@ DEP Should probably become conditional dependency.
+#requires: ? cli ; Only required if changing .cli files.
+depends: libodb [2.5.0-b.26.1 2.5.0-b.27)
+depends: libodb-sqlite [2.5.0-b.26.1 2.5.0-b.27)
+depends: libsqlite3 ^3.21.0 ; ATTACH in transaction
+depends: libbutl [0.17.0-a.0.1 0.17.0-a.1)
+depends: libbpkg [0.17.0-a.0.1 0.17.0-a.1)
+depends: build2 [0.17.0-a.0.1 0.17.0-a.1)
diff --git a/repositories.manifest b/repositories.manifest
index b296991..29cb1cf 100644
--- a/repositories.manifest
+++ b/repositories.manifest
@@ -3,6 +3,10 @@ summary: build2 package dependency manager repository
:
role: prerequisite
+location: ../build2.git##HEAD
+
+:
+role: prerequisite
location: ../libbutl.git##HEAD
:
@@ -11,6 +15,10 @@ location: ../libbpkg.git##HEAD
:
role: prerequisite
+location: https://git.build2.org/packaging/sqlite/sqlite.git##HEAD
+
+:
+role: prerequisite
location: https://git.codesynthesis.com/odb/libodb.git##HEAD
:
diff --git a/tests/.gitignore b/tests/.gitignore
index 35ec43f..a6332dd 100644
--- a/tests/.gitignore
+++ b/tests/.gitignore
@@ -1,2 +1,4 @@
-test/
-test-*/
+# Note: could be symlinks to directories (so no trailing /).
+#
+test
+test-*
diff --git a/tests/auth.testscript b/tests/auth.testscript
index 5dd8769..4e7b0c9 100644
--- a/tests/auth.testscript
+++ b/tests/auth.testscript
@@ -17,6 +17,6 @@ cert = $src_base/auth/default-cert.pem
# file to sign the repository.
#
cert_manifest = $~/cert-manifest
-+echo 'certificate: \' >=$cert_manifest
-+cat <<<$cert >+$cert_manifest
-+echo '\' >+$cert_manifest
++echo 'certificate:\' >=$cert_manifest
++cat <<<$cert >+$cert_manifest
++echo '\' >+$cert_manifest
diff --git a/tests/auth/cert b/tests/auth/cert
index b246d87..9355af8 100755
--- a/tests/auth/cert
+++ b/tests/auth/cert
@@ -4,25 +4,28 @@
#
# openssl genrsa 4096 > key.pem
-openssl req -x509 -new -key key.pem -days 36500 -config default-openssl.cnf > \
+# Note that for glibc versions prior to 2.34 there is an issue on i686 with
+# using certificates with expiration date beyond 2038.
+#
+openssl req -x509 -new -key key.pem -days 5475 -config default-openssl.cnf > \
default-cert.pem
cat default-cert.pem | openssl x509 -sha256 -noout -fingerprint | \
sed -n 's/^SHA256 Fingerprint=\(.*\)$/\1/p' >default-cert-fp
-openssl req -x509 -new -key key.pem -days 36500 -config mismatch-openssl.cnf > \
+openssl req -x509 -new -key key.pem -days 5475 -config mismatch-openssl.cnf > \
mismatch-cert.pem
-openssl req -x509 -new -key key.pem -days 36500 -config noemail-openssl.cnf > \
+openssl req -x509 -new -key key.pem -days 5475 -config noemail-openssl.cnf > \
noemail-cert.pem
-openssl req -x509 -new -key key.pem -days 36500 \
+openssl req -x509 -new -key key.pem -days 5475 \
-config subdomain-openssl.cnf > subdomain-cert.pem
-openssl req -x509 -new -key key.pem -days 36500 -config self-openssl.cnf > \
+openssl req -x509 -new -key key.pem -days 5475 -config self-openssl.cnf > \
self-cert.pem
-openssl req -x509 -new -key key.pem -days 36500 -config self-any-openssl.cnf > \
+openssl req -x509 -new -key key.pem -days 5475 -config self-any-openssl.cnf > \
self-any-cert.pem
# Normally, you have no reason to regenerate expired-cert.pem, as need to keep
@@ -33,7 +36,7 @@ openssl req -x509 -new -key key.pem -days 36500 -config self-any-openssl.cnf > \
# To regenerate the packages and signature manifest files run bpkg rep-create
# command, for example:
#
-# ../../bpkg/bpkg rep-create ../rep-auth/expired --key key.pem
+# bpkg rep-create ../rep-auth/expired --key key.pem
#
# We cannot do it in the testscript since the certificate has expired. This is
# also the reason why we store these auto-generated manifests in git.
diff --git a/tests/auth/default-cert-fp b/tests/auth/default-cert-fp
index beeb982..c2bbddb 100644
--- a/tests/auth/default-cert-fp
+++ b/tests/auth/default-cert-fp
@@ -1 +1 @@
-5A:B3:EE:2C:8C:72:E1:AC:03:D2:46:5E:39:CF:FB:0B:14:64:09:17:23:3D:50:79:54:A5:63:E3:A1:B5:51:60
+85:6F:A0:09:32:5D:9A:F3:97:24:7C:2F:61:FD:F8:D7:56:22:2A:23:57:1A:F2:DE:F1:65:2B:18:52:DA:4F:FB
diff --git a/tests/auth/default-cert.pem b/tests/auth/default-cert.pem
index 6fc8b69..5f696dd 100644
--- a/tests/auth/default-cert.pem
+++ b/tests/auth/default-cert.pem
@@ -1,30 +1,30 @@
-----BEGIN CERTIFICATE-----
-MIIFPDCCAySgAwIBAgIUB0O+guP5NDOoha3khvVPRIMgNeEwDQYJKoZIhvcNAQEL
+MIIFOjCCAyKgAwIBAgIUCGfmbqwLQP1acZ1nH47yy4PRY+UwDQYJKoZIhvcNAQEL
BQAwMzEXMBUGA1UECgwOQ29kZSBTeW50aGVzaXMxGDAWBgNVBAMMD25hbWU6YnVp
-bGQyLm9yZzAgFw0yMTAzMDgwODMyNTBaGA8yMTIxMDIxMjA4MzI1MFowMzEXMBUG
-A1UECgwOQ29kZSBTeW50aGVzaXMxGDAWBgNVBAMMD25hbWU6YnVpbGQyLm9yZzCC
-AiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANq78SXuzFzCMoFU1RnzEeAf
-zE0UUYGynS3F2lG7viH3coxjLt+BrFBudVs4XDTpjXS19hRxIohEgD71W1jhDvmU
-C9yCMW13PCIIjRKTTz0efEhTcMHdhOgvKZsje0IV7svoKVXcG7DfUVl51wWPQPSb
-UrfsQbsXg7Pz5HaDx+Dt2i9hwdE1M0z4R2dtwQkszFyCKiX8RF9oPXirTz5ETLC3
-f19JUapLrY5l5ZylzQifLhPMlHLlrT0n7KkohH7waX3KyeLa0M2IIl3zaeAsuN+E
-rFVecAdlJIvX00cth2OO/Gxy09sIKlagi2q7ZDik2sMvG8dAv7gNZsXp+FOj/XXC
-iOI9f6D5ospJdK9B5UCABjmGc8W5Odv6ZLey5Ui76luI7ciITOKfAoEkbyMiNHiR
-xLdM7aAeizdcwHU4bm6JlmiJk8UyyV85f33mvCSfuo7D+DQYiK650/xwRdTFBIqi
-38IwME62gT7ah/AOmiPshj7FjwIU7ZWHskyr9qpExQOEKJXoLZJo1rf6MRc8AsJy
-z6zdfQhT1BTzhogNfru4xjVM6fSrjRUF34msuWcz/HKo9W350Aw2y5F59kziP+m7
-G6uBYrqmElv/13Vamg2ZZ1b38KMz5Ss3SkfcDErOzz/D+0hRlOaCIeWts1G2zWcQ
-vBnn+zGA+sTIu0xAFOCRAgMBAAGjRjBEMA4GA1UdDwEB/wQEAwIHgDAWBgNVHSUB
-Af8EDDAKBggrBgEFBQcDAzAaBgNVHREEEzARgQ9pbmZvQGJ1aWxkMi5vcmcwDQYJ
-KoZIhvcNAQELBQADggIBAMlCmZJOMObdZIHqDDVb0F3//a3i/wjTH8GFwq3kq5PV
-nea3TUU5I0h8j0lXMgjXXCFJgCWhTW9b1gYs13gzMNOXRfib8VHD6qcJaDYTINun
-o+NQadod+psFaZ+UICex10V8sd4vFDbD+QZ5OAinLgQcvUxP/cLZ5WtKLJSzsG/5
-0jc7BdnQlhx708yWWYC7JP5McDtS3ffR5K5Dd2eaUEREgdQIN+r1zlnP1sluZh3r
-MWNWTLwyVDpB6L2g+XO26D9E6kOSDInqwo+XbryPgZ5Q7W/mIvEzSQ/Sk9oWBOEp
-WZm2rIDBZLg+hQP0Xnw15briV+wD9pVxZ0UTJ3qKitT7T6P59V2IVM0rKTUI+5+A
-ttylRagQ34MDiOxrhLxA+dhEQt9VsbxAmY5PIYpA3CtngkRy3H1RMLeMEBUVfsU8
-X2Po5f+QiUCraSblDXoUVVVyNiOa3KZNzDuvvmSKgODtG4v8NNU1mTFAY8gG2Mqj
-WDLvDLuJU1IzC+GHYbEWr170BXyg1qDYoFVrSOwJf60nK9zqt00j46MkawrbzGLU
-vV0sj0EV92EEkglHKMye4kF2Qbmej+Cbtn4IQlklnrvxevpm6tz2IZnrHMoh8+PY
-W/Dc668orS23u7OVLeAVYV+Z6i2MwN+2IjD/noSDa24zhcxM63bcpAFFyQL+21GK
+bGQyLm9yZzAeFw0yMjAxMjgxNTM5MzNaFw0zNzAxMjQxNTM5MzNaMDMxFzAVBgNV
+BAoMDkNvZGUgU3ludGhlc2lzMRgwFgYDVQQDDA9uYW1lOmJ1aWxkMi5vcmcwggIi
+MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDau/El7sxcwjKBVNUZ8xHgH8xN
+FFGBsp0txdpRu74h93KMYy7fgaxQbnVbOFw06Y10tfYUcSKIRIA+9VtY4Q75lAvc
+gjFtdzwiCI0Sk089HnxIU3DB3YToLymbI3tCFe7L6ClV3Buw31FZedcFj0D0m1K3
+7EG7F4Oz8+R2g8fg7dovYcHRNTNM+EdnbcEJLMxcgiol/ERfaD14q08+REywt39f
+SVGqS62OZeWcpc0Iny4TzJRy5a09J+ypKIR+8Gl9ysni2tDNiCJd82ngLLjfhKxV
+XnAHZSSL19NHLYdjjvxsctPbCCpWoItqu2Q4pNrDLxvHQL+4DWbF6fhTo/11woji
+PX+g+aLKSXSvQeVAgAY5hnPFuTnb+mS3suVIu+pbiO3IiEzinwKBJG8jIjR4kcS3
+TO2gHos3XMB1OG5uiZZoiZPFMslfOX995rwkn7qOw/g0GIiuudP8cEXUxQSKot/C
+MDBOtoE+2ofwDpoj7IY+xY8CFO2Vh7JMq/aqRMUDhCiV6C2SaNa3+jEXPALCcs+s
+3X0IU9QU84aIDX67uMY1TOn0q40VBd+JrLlnM/xyqPVt+dAMNsuRefZM4j/puxur
+gWK6phJb/9d1WpoNmWdW9/CjM+UrN0pH3AxKzs8/w/tIUZTmgiHlrbNRts1nELwZ
+5/sxgPrEyLtMQBTgkQIDAQABo0YwRDAOBgNVHQ8BAf8EBAMCB4AwFgYDVR0lAQH/
+BAwwCgYIKwYBBQUHAwMwGgYDVR0RBBMwEYEPaW5mb0BidWlsZDIub3JnMA0GCSqG
+SIb3DQEBCwUAA4ICAQC1pO/IXnaPObbwj23O43xUQULTvpTw9q8XlmVdEiPmd2Lc
+g+7r8aUOgpkse602exR+Z9Ly0I7IbeRGpQ+5dK1AVVTKVnZCub44AEIRnlryEHP2
+TSLtlakZx9Adtir1njccy2ZzkepebhYNB3aVNFlKP2y/uYjZM6ElXH99vbdUbgus
+ILzS2KHW2xGbcm36whNPz5e0mitDC5bxJ8lK8jFKrBEVhwNrvzCzYf+b96Un6yyg
+e2QO/yIGjGzsQRH2izGjNDNoCuezVq1KnV/LCjZ0LwJDyP9cd29zyTf+KKLxw6q7
+OltIhTgvi27+FnyjF7n94oldfPwwWmrtUy2ScHgRHsCGfQ19f4Mk9QSJ1kgz6JAu
+jOuUGBSVwNubpY7hMM2/IOdjfmCX+W3HzO/Tiluj3OqJV9F3GTyMh7BIQ3ea65KO
+Yz2yuL1L1KKc75k9cxs5DB6zsJy8hqfecPqjDgQ6sFvQHppaopLKvxuwONOgr6Ks
+NvFTH+DQgAqPmrtjyBiP4S64JOLAmA8QwYmpx6AoauSnqUeuzGnr/93A4fzXphX8
+/X3fwanqk7mxCacJrALO+EEN69IiOcaS5mhDim3MnDK9vyJkR/muOY5aU3IL541+
+oL4cwNygep6WvVd505iQbGeGTPe3wc5KcYUUTaS9PmMps/xVD4JO5rEhys9kjQ==
-----END CERTIFICATE-----
diff --git a/tests/auth/mismatch-cert.pem b/tests/auth/mismatch-cert.pem
index b87809a..d9331fe 100644
--- a/tests/auth/mismatch-cert.pem
+++ b/tests/auth/mismatch-cert.pem
@@ -1,31 +1,31 @@
-----BEGIN CERTIFICATE-----
-MIIFXDCCA0SgAwIBAgIUcgX+v3Y9V2MwKIUZ4ethyInKIT8wDQYJKoZIhvcNAQEL
+MIIFWjCCA0KgAwIBAgIUP6yp0fTgGPzaaZgBolF2+qQONT4wDQYJKoZIhvcNAQEL
BQAwQzEXMBUGA1UECgwOQ29kZSBTeW50aGVzaXMxKDAmBgNVBAMMH25hbWU6YnVp
-bGQyLm9yZy9taXNtYXRjaGVkL25hbWUwIBcNMjEwMzA4MDgzMjUwWhgPMjEyMTAy
-MTIwODMyNTBaMEMxFzAVBgNVBAoMDkNvZGUgU3ludGhlc2lzMSgwJgYDVQQDDB9u
-YW1lOmJ1aWxkMi5vcmcvbWlzbWF0Y2hlZC9uYW1lMIICIjANBgkqhkiG9w0BAQEF
-AAOCAg8AMIICCgKCAgEA2rvxJe7MXMIygVTVGfMR4B/MTRRRgbKdLcXaUbu+Ifdy
-jGMu34GsUG51WzhcNOmNdLX2FHEiiESAPvVbWOEO+ZQL3IIxbXc8IgiNEpNPPR58
-SFNwwd2E6C8pmyN7QhXuy+gpVdwbsN9RWXnXBY9A9JtSt+xBuxeDs/PkdoPH4O3a
-L2HB0TUzTPhHZ23BCSzMXIIqJfxEX2g9eKtPPkRMsLd/X0lRqkutjmXlnKXNCJ8u
-E8yUcuWtPSfsqSiEfvBpfcrJ4trQzYgiXfNp4Cy434SsVV5wB2Uki9fTRy2HY478
-bHLT2wgqVqCLartkOKTawy8bx0C/uA1mxen4U6P9dcKI4j1/oPmiykl0r0HlQIAG
-OYZzxbk52/pkt7LlSLvqW4jtyIhM4p8CgSRvIyI0eJHEt0ztoB6LN1zAdThubomW
-aImTxTLJXzl/fea8JJ+6jsP4NBiIrrnT/HBF1MUEiqLfwjAwTraBPtqH8A6aI+yG
-PsWPAhTtlYeyTKv2qkTFA4QolegtkmjWt/oxFzwCwnLPrN19CFPUFPOGiA1+u7jG
-NUzp9KuNFQXfiay5ZzP8cqj1bfnQDDbLkXn2TOI/6bsbq4FiuqYSW//XdVqaDZln
-VvfwozPlKzdKR9wMSs7PP8P7SFGU5oIh5a2zUbbNZxC8Gef7MYD6xMi7TEAU4JEC
-AwEAAaNGMEQwDgYDVR0PAQH/BAQDAgeAMBYGA1UdJQEB/wQMMAoGCCsGAQUFBwMD
-MBoGA1UdEQQTMBGBD2luZm9AYnVpbGQyLm9yZzANBgkqhkiG9w0BAQsFAAOCAgEA
-DAyRCqrh4bNGBzB2V+0DuXq/1L+7a8X5s+5R+WLSD8c5sDPUDP1QjvZEHCdVumnX
-QEO2PbqKKUYRH2lDam0tlYQHkWC1I4320DxwW+U9pCA7r6830haytSYa0Lt8mEwX
-xrvphbYSiM7AwuD7ngPGi0N2mM38E/kR+H7bxlNiu8F71OzFNubOt5UKF036h/Qx
-HkOgZPxIh5DsqdTWL58ouPV6ZBuJA25JjOnLKFqc1KLqlSfntvTSiwLC/dAzHLwf
-l0BsPIarLBlf7tMtkDm6Fx2huLQ+e+TJC0/l45zJ2S/j352omm5OKmW6OXv2YlLP
-4PZXLEc3nquvxAWr1KbjdPFe6+K0K2+YnNq280RZsgyOKTGRRYODC2Mo4XYshNHX
-1cpo6RN7DqVzCxT+hZ2SwnHRuJ/mbaUYN2+m1HYSVyU6fVjJl3EjRXdOlGQl5OU6
-x/OBK4TcmGyfP7mBJRx0bisg/tK2m9PwJjaCzYt9doQNFQKHKjpantuv9bRAULX1
-8z7qJrVe6t5z2DjWONAZye0uBzTJ9+2R+YYF6eC9WQ1/VXnay3Ygxfysn+QAVj93
-sS02Dhas4Ib6O1Qgn7ZMX9kwvBbG1eXT3+TSWvWia170XppNLqWDrwJ7z1H3Eijm
-VAZLHBkVLIuFM3C46uSCU/1t2uMseE9WgXw3FSkJ9c0=
+bGQyLm9yZy9taXNtYXRjaGVkL25hbWUwHhcNMjIwMTI4MTUzOTMzWhcNMzcwMTI0
+MTUzOTMzWjBDMRcwFQYDVQQKDA5Db2RlIFN5bnRoZXNpczEoMCYGA1UEAwwfbmFt
+ZTpidWlsZDIub3JnL21pc21hdGNoZWQvbmFtZTCCAiIwDQYJKoZIhvcNAQEBBQAD
+ggIPADCCAgoCggIBANq78SXuzFzCMoFU1RnzEeAfzE0UUYGynS3F2lG7viH3coxj
+Lt+BrFBudVs4XDTpjXS19hRxIohEgD71W1jhDvmUC9yCMW13PCIIjRKTTz0efEhT
+cMHdhOgvKZsje0IV7svoKVXcG7DfUVl51wWPQPSbUrfsQbsXg7Pz5HaDx+Dt2i9h
+wdE1M0z4R2dtwQkszFyCKiX8RF9oPXirTz5ETLC3f19JUapLrY5l5ZylzQifLhPM
+lHLlrT0n7KkohH7waX3KyeLa0M2IIl3zaeAsuN+ErFVecAdlJIvX00cth2OO/Gxy
+09sIKlagi2q7ZDik2sMvG8dAv7gNZsXp+FOj/XXCiOI9f6D5ospJdK9B5UCABjmG
+c8W5Odv6ZLey5Ui76luI7ciITOKfAoEkbyMiNHiRxLdM7aAeizdcwHU4bm6JlmiJ
+k8UyyV85f33mvCSfuo7D+DQYiK650/xwRdTFBIqi38IwME62gT7ah/AOmiPshj7F
+jwIU7ZWHskyr9qpExQOEKJXoLZJo1rf6MRc8AsJyz6zdfQhT1BTzhogNfru4xjVM
+6fSrjRUF34msuWcz/HKo9W350Aw2y5F59kziP+m7G6uBYrqmElv/13Vamg2ZZ1b3
+8KMz5Ss3SkfcDErOzz/D+0hRlOaCIeWts1G2zWcQvBnn+zGA+sTIu0xAFOCRAgMB
+AAGjRjBEMA4GA1UdDwEB/wQEAwIHgDAWBgNVHSUBAf8EDDAKBggrBgEFBQcDAzAa
+BgNVHREEEzARgQ9pbmZvQGJ1aWxkMi5vcmcwDQYJKoZIhvcNAQELBQADggIBAMkT
+IFZZE7anQDBgfYXTICeb9xnIHHiGalio7UT2dPGvIlyy0B9uWoCF5WY3P2YNmOI3
+JWH0SaD55hykrIUWZeQpvbB0llsOstFU2CLYe5uHZEmkq76Cc73WiT2NsEPQpEmM
+TDdTnwRIAxXFh57XjLEqqQJ81bRiakUwlzCawtAS4FMOKxkZVSCLiDHfAg1X3LKr
+krFhSqk/cVAtMFDjOhncHK4s7mvRcUQSBSjvq0QiRJISB1+LwerA/In0FV8H1XlI
+zbNXW1MSHjqyIHi2xSHoYxMetJL/3cVr77C7fw2IdYZ9/eE9hw7uSq+s/Kxt78BS
+Vz87beTk8mnPzFDGDlYN58VMamtiaCtNMb7yrQ6USTX52cwscHO7rvCjkVAAM/Om
+hVd1zBEpSkrhDkXgaBlxHb8gTdH9uejvTQzEr2CgBMbKo4yDDglRGAIETrB3DZm/
+8SqzKeiD0/iacTag/pjaDakArldiZklNEMaHTGhsKGP5f+fhiPzgAyq3mk5Oihsi
+P9ZSAikwASPxBiN7YR8Y7/R9lwfqga+mpqlbz3zeYrWtNymwlyoM8oloEhhbZFXL
+Ozrt9ES7ss1ozsv9HsT9Tzb0Zn7IHIan2dHFHUtHlAKbGWNITegr6iyTAQEL5ZKk
+4ikmdLViBHWjDr+n7NLzTwgXRQi2JObcH3yQvOnH
-----END CERTIFICATE-----
diff --git a/tests/auth/noemail-cert.pem b/tests/auth/noemail-cert.pem
index 376cc74..be3d84a 100644
--- a/tests/auth/noemail-cert.pem
+++ b/tests/auth/noemail-cert.pem
@@ -1,30 +1,30 @@
-----BEGIN CERTIFICATE-----
-MIIFIDCCAwigAwIBAgIUTe6UFlzESo1vXMm299tqkK/MymgwDQYJKoZIhvcNAQEL
+MIIFHjCCAwagAwIBAgIUf5Yh4hKU0uE1Rpytar0kAafQPn8wDQYJKoZIhvcNAQEL
BQAwMzEXMBUGA1UECgwOQ29kZSBTeW50aGVzaXMxGDAWBgNVBAMMD25hbWU6YnVp
-bGQyLm9yZzAgFw0yMTAzMDgwODMyNTBaGA8yMTIxMDIxMjA4MzI1MFowMzEXMBUG
-A1UECgwOQ29kZSBTeW50aGVzaXMxGDAWBgNVBAMMD25hbWU6YnVpbGQyLm9yZzCC
-AiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANq78SXuzFzCMoFU1RnzEeAf
-zE0UUYGynS3F2lG7viH3coxjLt+BrFBudVs4XDTpjXS19hRxIohEgD71W1jhDvmU
-C9yCMW13PCIIjRKTTz0efEhTcMHdhOgvKZsje0IV7svoKVXcG7DfUVl51wWPQPSb
-UrfsQbsXg7Pz5HaDx+Dt2i9hwdE1M0z4R2dtwQkszFyCKiX8RF9oPXirTz5ETLC3
-f19JUapLrY5l5ZylzQifLhPMlHLlrT0n7KkohH7waX3KyeLa0M2IIl3zaeAsuN+E
-rFVecAdlJIvX00cth2OO/Gxy09sIKlagi2q7ZDik2sMvG8dAv7gNZsXp+FOj/XXC
-iOI9f6D5ospJdK9B5UCABjmGc8W5Odv6ZLey5Ui76luI7ciITOKfAoEkbyMiNHiR
-xLdM7aAeizdcwHU4bm6JlmiJk8UyyV85f33mvCSfuo7D+DQYiK650/xwRdTFBIqi
-38IwME62gT7ah/AOmiPshj7FjwIU7ZWHskyr9qpExQOEKJXoLZJo1rf6MRc8AsJy
-z6zdfQhT1BTzhogNfru4xjVM6fSrjRUF34msuWcz/HKo9W350Aw2y5F59kziP+m7
-G6uBYrqmElv/13Vamg2ZZ1b38KMz5Ss3SkfcDErOzz/D+0hRlOaCIeWts1G2zWcQ
-vBnn+zGA+sTIu0xAFOCRAgMBAAGjKjAoMA4GA1UdDwEB/wQEAwIHgDAWBgNVHSUB
-Af8EDDAKBggrBgEFBQcDAzANBgkqhkiG9w0BAQsFAAOCAgEAyjufknz7r1OM9Udr
-7ljN8xqqterRt2yYEalgUgQ6z3Q0P6/mzh1ZD+INjVmYOWi432wcxm7xYPVJ+onX
-ECqErruMn2Yib0wxIGWLenTeBOIpj0n7UbnDqrY9iuI7Mp5+OarrHyn67vdu4IVQ
-qj+tqcg1eSXhPUjTTuZgsqFjazKHmO6JN+MYi3tX5gI+Kw3un/uayJr/Uwq4xcnL
-hHJMfowm+psYQxNh0pvtc8D7mZIlRVJmHjReVW2j5ziQd09CSi/mt1NnZW4ADidj
-PiPQXA2q6MTzZiEQEdU7Ee5OaYrky5dhmcB5IYRGFy7fOPGrJEi+jFUIqTU8svkF
-NiItN1t9GDu/t7IlFc3s/0XYD9ePuEF4sRgj2JOLc2+R53nT5yuXFj/cZCMvCrmK
-e2LeLYfJOh9u4pggBn7QiSQ2C7Jo20fNB8+qeKoLL8LrES2/uNney0br9+GrOfnh
-y0w5i2jOg51+bSpCTsgiVJ7vhY3oQHWcNTPt/MMYnEh/GxJZhhnUV/QLCFbsUxlv
-FJJTOda3CIkzBtjsFfGS3n0SaRppIKTeTk5CUGSGmiaew15UM/xk/rZAISacXk5h
-J4+MqHN352v+jmNUNj68mU9FEWQl6XF7qeSbgAemCDMTHlcEPXBN6qz5YA8QX3Je
-RWl3nROcbf1T8uu0M5B+oYGLaP8=
+bGQyLm9yZzAeFw0yMjAxMjgxNTM5MzNaFw0zNzAxMjQxNTM5MzNaMDMxFzAVBgNV
+BAoMDkNvZGUgU3ludGhlc2lzMRgwFgYDVQQDDA9uYW1lOmJ1aWxkMi5vcmcwggIi
+MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDau/El7sxcwjKBVNUZ8xHgH8xN
+FFGBsp0txdpRu74h93KMYy7fgaxQbnVbOFw06Y10tfYUcSKIRIA+9VtY4Q75lAvc
+gjFtdzwiCI0Sk089HnxIU3DB3YToLymbI3tCFe7L6ClV3Buw31FZedcFj0D0m1K3
+7EG7F4Oz8+R2g8fg7dovYcHRNTNM+EdnbcEJLMxcgiol/ERfaD14q08+REywt39f
+SVGqS62OZeWcpc0Iny4TzJRy5a09J+ypKIR+8Gl9ysni2tDNiCJd82ngLLjfhKxV
+XnAHZSSL19NHLYdjjvxsctPbCCpWoItqu2Q4pNrDLxvHQL+4DWbF6fhTo/11woji
+PX+g+aLKSXSvQeVAgAY5hnPFuTnb+mS3suVIu+pbiO3IiEzinwKBJG8jIjR4kcS3
+TO2gHos3XMB1OG5uiZZoiZPFMslfOX995rwkn7qOw/g0GIiuudP8cEXUxQSKot/C
+MDBOtoE+2ofwDpoj7IY+xY8CFO2Vh7JMq/aqRMUDhCiV6C2SaNa3+jEXPALCcs+s
+3X0IU9QU84aIDX67uMY1TOn0q40VBd+JrLlnM/xyqPVt+dAMNsuRefZM4j/puxur
+gWK6phJb/9d1WpoNmWdW9/CjM+UrN0pH3AxKzs8/w/tIUZTmgiHlrbNRts1nELwZ
+5/sxgPrEyLtMQBTgkQIDAQABoyowKDAOBgNVHQ8BAf8EBAMCB4AwFgYDVR0lAQH/
+BAwwCgYIKwYBBQUHAwMwDQYJKoZIhvcNAQELBQADggIBACfm2EzsSJSOfvA39Ad6
+1Y/uPoRFU3fNSXwFJ+K33+CGiblhtOaJmKbY1OTRwqW7P7L66U3/Bw+k1bOdxwOv
+WerGoj44xFqLUPptF70Vg2xJqHe7ax+ONHPXQsi4kEdJXfwdTc+v7Nom5xW3fs8a
+jp6dRqFua/l75QroQesGdOfFVZS5DecJzpv3JY8Z0fVde3B82HUmvUDoB+PoCHOG
+WnIZWF9avlitwSammrGytkPLqhhVh36TFV7bnKsqjU7RB5VDCuocmpTImFmBwQia
+L/VnSg1PTZtbxSFQRKXPOSAsXb2s3TLbWMYNJeucXcYigCzaZpX8Um+2Pmf+gwaL
+qcxdek6xV/X7y64FN4K60p7rKkrohkZ54B5HpxbEQhSxbaXqTfHFVwjJHmBNLh8d
+HRdbRVu2pCCUUcy3ZNwJA//Ogd65L/JvIm9Wb9q6J5BdEcl2FiFb1KCMwueXYvMJ
+wyseyevzk7WZxmvKbBpD7Sv0z96+79pWzemQiTjkDCTyBdGg298juewEdKRarz5G
+eWPPOnZ17lLHZtd9QAaxgvhzoa08iR2km00yHljkoSV6NlvNEAcwxVh6llqZvxTe
+mphobrj4JgepLKF7qaguWusjWNoKnRa4BV4cu2FyIbZ4ujSnupt5vk4RBdnTDsVk
+wglQdSTV7HMNdOzBTZx+UDhZ
-----END CERTIFICATE-----
diff --git a/tests/auth/self-any-cert.pem b/tests/auth/self-any-cert.pem
index 3aa96de..a929ffc 100644
--- a/tests/auth/self-any-cert.pem
+++ b/tests/auth/self-any-cert.pem
@@ -1,31 +1,31 @@
-----BEGIN CERTIFICATE-----
-MIIFQDCCAyigAwIBAgIUZuaUzSXBKduFv6X4Tb5+I0xc5q4wDQYJKoZIhvcNAQEL
+MIIFPjCCAyagAwIBAgIUZEIRK/HIwcJOhGVkifwFEO1dgpIwDQYJKoZIhvcNAQEL
BQAwNTEXMBUGA1UECgwOQ29kZSBTeW50aGVzaXMxGjAYBgNVBAMMEW5hbWU6Kipi
-dWlsZDIub3JnMCAXDTIxMDMwODA4MzI1MFoYDzIxMjEwMjEyMDgzMjUwWjA1MRcw
-FQYDVQQKDA5Db2RlIFN5bnRoZXNpczEaMBgGA1UEAwwRbmFtZToqKmJ1aWxkMi5v
-cmcwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDau/El7sxcwjKBVNUZ
-8xHgH8xNFFGBsp0txdpRu74h93KMYy7fgaxQbnVbOFw06Y10tfYUcSKIRIA+9VtY
-4Q75lAvcgjFtdzwiCI0Sk089HnxIU3DB3YToLymbI3tCFe7L6ClV3Buw31FZedcF
-j0D0m1K37EG7F4Oz8+R2g8fg7dovYcHRNTNM+EdnbcEJLMxcgiol/ERfaD14q08+
-REywt39fSVGqS62OZeWcpc0Iny4TzJRy5a09J+ypKIR+8Gl9ysni2tDNiCJd82ng
-LLjfhKxVXnAHZSSL19NHLYdjjvxsctPbCCpWoItqu2Q4pNrDLxvHQL+4DWbF6fhT
-o/11wojiPX+g+aLKSXSvQeVAgAY5hnPFuTnb+mS3suVIu+pbiO3IiEzinwKBJG8j
-IjR4kcS3TO2gHos3XMB1OG5uiZZoiZPFMslfOX995rwkn7qOw/g0GIiuudP8cEXU
-xQSKot/CMDBOtoE+2ofwDpoj7IY+xY8CFO2Vh7JMq/aqRMUDhCiV6C2SaNa3+jEX
-PALCcs+s3X0IU9QU84aIDX67uMY1TOn0q40VBd+JrLlnM/xyqPVt+dAMNsuRefZM
-4j/puxurgWK6phJb/9d1WpoNmWdW9/CjM+UrN0pH3AxKzs8/w/tIUZTmgiHlrbNR
-ts1nELwZ5/sxgPrEyLtMQBTgkQIDAQABo0YwRDAOBgNVHQ8BAf8EBAMCB4AwFgYD
-VR0lAQH/BAwwCgYIKwYBBQUHAwMwGgYDVR0RBBMwEYEPaW5mb0BidWlsZDIub3Jn
-MA0GCSqGSIb3DQEBCwUAA4ICAQAUEuQSylqB+obXenWK0WmH6vyMtEEMo2TupFSf
-5anBi+szp67t3hOTQw1OoxegHj2HvkjgnGg1GqhlKKrRFg60ohYsw6d2VOAk81V8
-lbYMnRiFa1JcLs62TUgpSrnzLVRGt6iK1pgxAR9N2rRgb91WXLNEbWzoXpAQCAWD
-0ktw1xcTl5htKYviz57ggjyfWKbFECHjh7zlYxjI84OwYwS5QU9tnnkHDlT/d/KW
-zaU67YPpeldAefCh7y4lJY7P8468ZD7sI56ZxTyNwZVHxXCxBZuuR36DWuyniJ1o
-aOy2OxAj7h9WNTRWZk7IPunYoLgdFGy48PH1KyGVWNfPP16+59BNbUmqeAQpMm/L
-w/YIslEpdySWX/YM/SbRLc2CT+8DuOQ/y0kZNdzhVzfTsYX6nTAJqsAuh6PnFKBM
-2HFamizAdBVMUCsgyw0xeKfPDr2NFkmP9FteZTH4b1snqZTo4qqRsoDSUHbwzJmi
-1Kq7urzpv2smKb1a4O7+78G3efIGjtWMv/mKiqMIGyX8OXUNrnb2ZPOZjp3Hm+hZ
-Li9LlWyrol+x7ib8oIF6WIx1tyGjriLZLW5uddv4zS/usIUpkaGZRQvesrLVfPI4
-BRPWwRiA+CEQJ6ptagpJptkzbj9DzMmbp/ZnEGoyY0j83CnPdm5m4zKONFz28zPW
-hqX3Zg==
+dWlsZDIub3JnMB4XDTIyMDEyODE1MzkzM1oXDTM3MDEyNDE1MzkzM1owNTEXMBUG
+A1UECgwOQ29kZSBTeW50aGVzaXMxGjAYBgNVBAMMEW5hbWU6KipidWlsZDIub3Jn
+MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA2rvxJe7MXMIygVTVGfMR
+4B/MTRRRgbKdLcXaUbu+IfdyjGMu34GsUG51WzhcNOmNdLX2FHEiiESAPvVbWOEO
++ZQL3IIxbXc8IgiNEpNPPR58SFNwwd2E6C8pmyN7QhXuy+gpVdwbsN9RWXnXBY9A
+9JtSt+xBuxeDs/PkdoPH4O3aL2HB0TUzTPhHZ23BCSzMXIIqJfxEX2g9eKtPPkRM
+sLd/X0lRqkutjmXlnKXNCJ8uE8yUcuWtPSfsqSiEfvBpfcrJ4trQzYgiXfNp4Cy4
+34SsVV5wB2Uki9fTRy2HY478bHLT2wgqVqCLartkOKTawy8bx0C/uA1mxen4U6P9
+dcKI4j1/oPmiykl0r0HlQIAGOYZzxbk52/pkt7LlSLvqW4jtyIhM4p8CgSRvIyI0
+eJHEt0ztoB6LN1zAdThubomWaImTxTLJXzl/fea8JJ+6jsP4NBiIrrnT/HBF1MUE
+iqLfwjAwTraBPtqH8A6aI+yGPsWPAhTtlYeyTKv2qkTFA4QolegtkmjWt/oxFzwC
+wnLPrN19CFPUFPOGiA1+u7jGNUzp9KuNFQXfiay5ZzP8cqj1bfnQDDbLkXn2TOI/
+6bsbq4FiuqYSW//XdVqaDZlnVvfwozPlKzdKR9wMSs7PP8P7SFGU5oIh5a2zUbbN
+ZxC8Gef7MYD6xMi7TEAU4JECAwEAAaNGMEQwDgYDVR0PAQH/BAQDAgeAMBYGA1Ud
+JQEB/wQMMAoGCCsGAQUFBwMDMBoGA1UdEQQTMBGBD2luZm9AYnVpbGQyLm9yZzAN
+BgkqhkiG9w0BAQsFAAOCAgEAOpGR4TH4IUloFaTtqeIqZf3rjrT1k40KJr6iWwwR
+hNtNlKeLLTMAvWFMnrw/1TnzCJIMhIbpEyqP4fIFRLYURBwZzUBrCijeM9MI9Rl1
+9ckA9RxAfanwofQrJ0hBVyQaRoMXhDRN0ut0OwojK+MGf3HLSgCGQtjd6KOTNzsF
+tOQ8OQ4ruaKZU9gMGC27sTSYDMNtXfAEFhNd1BICIqGhD0S3is74HLoYW180UX/W
+/45V/VrF+fsj5Ks8RYagr+1wZDglsREfuqF8l9S4I9nO2NfhwbMRRTAiRsJU5PfC
+uh3zpHNhy0O4SE8zBQFehRfaOk6GH23dmVUA8lIoDzRyHgjIfciwERDKSD4FWBt/
+Tb05vqsw4vnYzWlp/oQWN1hscSlRlWQQYOEJBOsTI6LoCNFuuIJzTxF407zrQvD0
+vUVvec6lhbZhOhKwlBIfF6qKGxQw1ExIo+nbue43ulJVbAXu70vUM+y5e4q7XzZs
+nQT7Wlu1OMXctwZXkBKvIaXqFMZ628qxODdFzBo2Hpugm2U9mfXXCHqUJnsSRBEK
+Y+GPAB44iogRli0eDM0XrMtrgOWvP0noCuo7ODnL+6aSxW4C/JrAsgodTBAitPG5
+vuE8p1geY+rhI72ZkC/CZK96oJ0N81Mido5gw53AqQRvE0RMQVKBihccQfYmDRpI
+38M=
-----END CERTIFICATE-----
diff --git a/tests/auth/self-cert.pem b/tests/auth/self-cert.pem
index 824d46d..9e4f6d9 100644
--- a/tests/auth/self-cert.pem
+++ b/tests/auth/self-cert.pem
@@ -1,31 +1,30 @@
-----BEGIN CERTIFICATE-----
-MIIFPjCCAyagAwIBAgIUXlJkVTYynE5nQeou8siCgpjHJx4wDQYJKoZIhvcNAQEL
+MIIFPDCCAySgAwIBAgIUGW0SVQYLDCPh84fTIuxSBKjv4iIwDQYJKoZIhvcNAQEL
BQAwNDEXMBUGA1UECgwOQ29kZSBTeW50aGVzaXMxGTAXBgNVBAMMEG5hbWU6KmJ1
-aWxkMi5vcmcwIBcNMjEwMzA4MDgzMjUwWhgPMjEyMTAyMTIwODMyNTBaMDQxFzAV
-BgNVBAoMDkNvZGUgU3ludGhlc2lzMRkwFwYDVQQDDBBuYW1lOipidWlsZDIub3Jn
-MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA2rvxJe7MXMIygVTVGfMR
-4B/MTRRRgbKdLcXaUbu+IfdyjGMu34GsUG51WzhcNOmNdLX2FHEiiESAPvVbWOEO
-+ZQL3IIxbXc8IgiNEpNPPR58SFNwwd2E6C8pmyN7QhXuy+gpVdwbsN9RWXnXBY9A
-9JtSt+xBuxeDs/PkdoPH4O3aL2HB0TUzTPhHZ23BCSzMXIIqJfxEX2g9eKtPPkRM
-sLd/X0lRqkutjmXlnKXNCJ8uE8yUcuWtPSfsqSiEfvBpfcrJ4trQzYgiXfNp4Cy4
-34SsVV5wB2Uki9fTRy2HY478bHLT2wgqVqCLartkOKTawy8bx0C/uA1mxen4U6P9
-dcKI4j1/oPmiykl0r0HlQIAGOYZzxbk52/pkt7LlSLvqW4jtyIhM4p8CgSRvIyI0
-eJHEt0ztoB6LN1zAdThubomWaImTxTLJXzl/fea8JJ+6jsP4NBiIrrnT/HBF1MUE
-iqLfwjAwTraBPtqH8A6aI+yGPsWPAhTtlYeyTKv2qkTFA4QolegtkmjWt/oxFzwC
-wnLPrN19CFPUFPOGiA1+u7jGNUzp9KuNFQXfiay5ZzP8cqj1bfnQDDbLkXn2TOI/
-6bsbq4FiuqYSW//XdVqaDZlnVvfwozPlKzdKR9wMSs7PP8P7SFGU5oIh5a2zUbbN
-ZxC8Gef7MYD6xMi7TEAU4JECAwEAAaNGMEQwDgYDVR0PAQH/BAQDAgeAMBYGA1Ud
-JQEB/wQMMAoGCCsGAQUFBwMDMBoGA1UdEQQTMBGBD2luZm9AYnVpbGQyLm9yZzAN
-BgkqhkiG9w0BAQsFAAOCAgEAZNo0nEWuBA3fAFy3eRql5sULULBCF+eq2vQ7GCK8
-N7kG88gcz61Rba/evi2IyGNz+TbTPYmND8LRSl+R1F81ueH0sZqbhZp5jXL58H79
-Kll4gNNm2IM7qJeEhajlNdYofZx8aFtFDDHV9rw8WAZLqkib0qNfyDbIVBMsXJQk
-PL0f4oxrEiesIRJ4bLDAPdcA2N6vfO88jCYniy9mVITszaBResfzG21oiy1B5RDv
-dXkdPuwe54T2Fb6XLKv5ILXywrENaSp1BvlvZsLWcccekTKeIf+3RQMyhY4+Ixot
-IU6+uJ4TcVJObkj9sG8RvktsmnGoZc4uHmZ042SmC1kPiy7Ti8wnwvy2RoKlrbJa
-rne9jfCiDPM8qH5yLiErv4XOS1BEleEH2HkeW0BeNEGM2n2Wrz8+6Mcp/Ixejwiz
-ZTBkxfJjhrOOlH+ixas/hbzBCWP+FvJc4k7LRCZTfuO8GMGoMKWNWDL1h8U1hRcp
-5MUf3pl29IYGTSzTmgqYq/NVITfbKCv/P8+ut2HUkTYs6/mLaptlVVwPwN/SdW5v
-FI6hUcp2/b8d5sM1WqtEJKng5an5QI8845HJnbHcgglfDx7vxg0MoGzAqb93ZVtd
-P9xzmxbqHMsxbG503vC24E6PeJhbyH1vQNK9onh8fQlFiUZA4ZycNLzliJB6uqIH
-B/M=
+aWxkMi5vcmcwHhcNMjIwMTI4MTUzOTMzWhcNMzcwMTI0MTUzOTMzWjA0MRcwFQYD
+VQQKDA5Db2RlIFN5bnRoZXNpczEZMBcGA1UEAwwQbmFtZToqYnVpbGQyLm9yZzCC
+AiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANq78SXuzFzCMoFU1RnzEeAf
+zE0UUYGynS3F2lG7viH3coxjLt+BrFBudVs4XDTpjXS19hRxIohEgD71W1jhDvmU
+C9yCMW13PCIIjRKTTz0efEhTcMHdhOgvKZsje0IV7svoKVXcG7DfUVl51wWPQPSb
+UrfsQbsXg7Pz5HaDx+Dt2i9hwdE1M0z4R2dtwQkszFyCKiX8RF9oPXirTz5ETLC3
+f19JUapLrY5l5ZylzQifLhPMlHLlrT0n7KkohH7waX3KyeLa0M2IIl3zaeAsuN+E
+rFVecAdlJIvX00cth2OO/Gxy09sIKlagi2q7ZDik2sMvG8dAv7gNZsXp+FOj/XXC
+iOI9f6D5ospJdK9B5UCABjmGc8W5Odv6ZLey5Ui76luI7ciITOKfAoEkbyMiNHiR
+xLdM7aAeizdcwHU4bm6JlmiJk8UyyV85f33mvCSfuo7D+DQYiK650/xwRdTFBIqi
+38IwME62gT7ah/AOmiPshj7FjwIU7ZWHskyr9qpExQOEKJXoLZJo1rf6MRc8AsJy
+z6zdfQhT1BTzhogNfru4xjVM6fSrjRUF34msuWcz/HKo9W350Aw2y5F59kziP+m7
+G6uBYrqmElv/13Vamg2ZZ1b38KMz5Ss3SkfcDErOzz/D+0hRlOaCIeWts1G2zWcQ
+vBnn+zGA+sTIu0xAFOCRAgMBAAGjRjBEMA4GA1UdDwEB/wQEAwIHgDAWBgNVHSUB
+Af8EDDAKBggrBgEFBQcDAzAaBgNVHREEEzARgQ9pbmZvQGJ1aWxkMi5vcmcwDQYJ
+KoZIhvcNAQELBQADggIBAKkl/nV34sQYHhZk9WobqBAuuxuXHp1E4pPRu2YzjaIN
+sJCKd4/EVR76iiQsfv2C3gkOdNNGif5lqCpdjOEBs6QTD/fvNi/0LYWmgsYywMwC
+yxDHXPiDlSXRwbUKaco+7+TP0PUs/uZdpXBiVese+zSePWQUAWbMcM73CZiKykXx
+Ov1hC08hnjgZvyut6YzQTjbVrjgStbXzxtBTjRfOLf3s6Td0c+UBbc4gzdifSug7
+FT7qVqQc19VX0npyCHKZYFDyduCJHIs2edL98abFhTI3pbW5Rcdu9YLf0A+2PMkb
+kqbTOihI0JG6T4lu9tWFKrS/tsn/EeJ/ytWrYGtiH/Nytt8MqgHyLnj+drHOpJE3
+ZtB7ym94Dh5V1GyoBdk5o3XZkiztjCBqdMvb2DJrTsIy3h97SaSpyJ9n+MHmP98+
+lIUAmv33rsE+TsUa1IkAT4wIzllbE0Fof+71/2mdVpOkTbOhZMihuRtt5XwuyfEN
+GGAOga7B8+OSQGR0Pza6UuLGkry0wI7QXZjEFsL7g2w1YDSBd3LI99WGvvsb4zwn
+PkRFiOyEVW42z16sSNVwCxmlI2IuR36xRMCyHpV6Y/byeBm+2T4V0BMB3VTVSh/+
+X6hMWTbjPS14p3f9ZQGMrU0hMYEf4c+CmOVhP86bXetSsmL2qT2Dra/7LHhGjq5W
-----END CERTIFICATE-----
diff --git a/tests/auth/subdomain-cert.pem b/tests/auth/subdomain-cert.pem
index 235b814..6dd5349 100644
--- a/tests/auth/subdomain-cert.pem
+++ b/tests/auth/subdomain-cert.pem
@@ -1,31 +1,31 @@
-----BEGIN CERTIFICATE-----
-MIIFQDCCAyigAwIBAgIUNzGw0T196IJKqGZ9PXgKEXdaUzAwDQYJKoZIhvcNAQEL
+MIIFPjCCAyagAwIBAgIUSedvgx6XcCct0TFHTVEsP6CnXx0wDQYJKoZIhvcNAQEL
BQAwNTEXMBUGA1UECgwOQ29kZSBTeW50aGVzaXMxGjAYBgNVBAMMEW5hbWU6Ki5i
-dWlsZDIub3JnMCAXDTIxMDMwODA4MzI1MFoYDzIxMjEwMjEyMDgzMjUwWjA1MRcw
-FQYDVQQKDA5Db2RlIFN5bnRoZXNpczEaMBgGA1UEAwwRbmFtZToqLmJ1aWxkMi5v
-cmcwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDau/El7sxcwjKBVNUZ
-8xHgH8xNFFGBsp0txdpRu74h93KMYy7fgaxQbnVbOFw06Y10tfYUcSKIRIA+9VtY
-4Q75lAvcgjFtdzwiCI0Sk089HnxIU3DB3YToLymbI3tCFe7L6ClV3Buw31FZedcF
-j0D0m1K37EG7F4Oz8+R2g8fg7dovYcHRNTNM+EdnbcEJLMxcgiol/ERfaD14q08+
-REywt39fSVGqS62OZeWcpc0Iny4TzJRy5a09J+ypKIR+8Gl9ysni2tDNiCJd82ng
-LLjfhKxVXnAHZSSL19NHLYdjjvxsctPbCCpWoItqu2Q4pNrDLxvHQL+4DWbF6fhT
-o/11wojiPX+g+aLKSXSvQeVAgAY5hnPFuTnb+mS3suVIu+pbiO3IiEzinwKBJG8j
-IjR4kcS3TO2gHos3XMB1OG5uiZZoiZPFMslfOX995rwkn7qOw/g0GIiuudP8cEXU
-xQSKot/CMDBOtoE+2ofwDpoj7IY+xY8CFO2Vh7JMq/aqRMUDhCiV6C2SaNa3+jEX
-PALCcs+s3X0IU9QU84aIDX67uMY1TOn0q40VBd+JrLlnM/xyqPVt+dAMNsuRefZM
-4j/puxurgWK6phJb/9d1WpoNmWdW9/CjM+UrN0pH3AxKzs8/w/tIUZTmgiHlrbNR
-ts1nELwZ5/sxgPrEyLtMQBTgkQIDAQABo0YwRDAOBgNVHQ8BAf8EBAMCB4AwFgYD
-VR0lAQH/BAwwCgYIKwYBBQUHAwMwGgYDVR0RBBMwEYEPaW5mb0BidWlsZDIub3Jn
-MA0GCSqGSIb3DQEBCwUAA4ICAQBvN3fGaVz2IvwGLX919kaNX8lyzOS3X1tJCVff
-9f9VgXCbkQ+Lw1Xj2qlOC2RtpR8JsMYRupEgIfyR9xzcDwxqs2VTQwMLZ0VarQqd
-ozvekNMj0LI4yhgj62bEnoJ/GpPbkGvxtv47oist4LVQtMJp9JwML/+KhO0cLtLz
-JJR/gmYFQ1Az6eM1Qvs346BgPsKsuJ5h7HZ97BWJZNA1zXSXorDa2D2gF7IyI87r
-eoleyILUhHuVab0FhrItk3HHRRcWVCaOILO2OCqeEr/JhYRvG1q9wwcAT1B3DGPW
-31RcF27wO15PMXplHSlndNigvzrO464FchtAR1mhg1j85OtlnqBRa/GYellqXUeq
-SypiRltlOpkc46e2Kl7XK9QLAF7ytbUtSf+chDEj4kWuE/hCm6bCArASzWXkI3SH
-gqQ9/6FBrNBTy/c+Ng3Y5EpOXc2s1u59Rk7t00SaQVGxklytspV0D1Jx+gZuAUsP
-u+/ns2zRfgrBb3j2oK2x5XZ5dp23cq7BBF3xbwCLjpq46wkx8LeTirHorcEKLTR1
-F50b2hnjf3ihbfQVjcI1tdHgI28PEgFCAHUvIKykDjiofyMAX9KoZUVEW4rLsTlI
-esx1SDL8gVGoIevP1H30kEpiksSox1S1wAD1Ylb+/EH7ITxNtHjHaqTiR/GHLQcj
-GZhKyw==
+dWlsZDIub3JnMB4XDTIyMDEyODE1MzkzM1oXDTM3MDEyNDE1MzkzM1owNTEXMBUG
+A1UECgwOQ29kZSBTeW50aGVzaXMxGjAYBgNVBAMMEW5hbWU6Ki5idWlsZDIub3Jn
+MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA2rvxJe7MXMIygVTVGfMR
+4B/MTRRRgbKdLcXaUbu+IfdyjGMu34GsUG51WzhcNOmNdLX2FHEiiESAPvVbWOEO
++ZQL3IIxbXc8IgiNEpNPPR58SFNwwd2E6C8pmyN7QhXuy+gpVdwbsN9RWXnXBY9A
+9JtSt+xBuxeDs/PkdoPH4O3aL2HB0TUzTPhHZ23BCSzMXIIqJfxEX2g9eKtPPkRM
+sLd/X0lRqkutjmXlnKXNCJ8uE8yUcuWtPSfsqSiEfvBpfcrJ4trQzYgiXfNp4Cy4
+34SsVV5wB2Uki9fTRy2HY478bHLT2wgqVqCLartkOKTawy8bx0C/uA1mxen4U6P9
+dcKI4j1/oPmiykl0r0HlQIAGOYZzxbk52/pkt7LlSLvqW4jtyIhM4p8CgSRvIyI0
+eJHEt0ztoB6LN1zAdThubomWaImTxTLJXzl/fea8JJ+6jsP4NBiIrrnT/HBF1MUE
+iqLfwjAwTraBPtqH8A6aI+yGPsWPAhTtlYeyTKv2qkTFA4QolegtkmjWt/oxFzwC
+wnLPrN19CFPUFPOGiA1+u7jGNUzp9KuNFQXfiay5ZzP8cqj1bfnQDDbLkXn2TOI/
+6bsbq4FiuqYSW//XdVqaDZlnVvfwozPlKzdKR9wMSs7PP8P7SFGU5oIh5a2zUbbN
+ZxC8Gef7MYD6xMi7TEAU4JECAwEAAaNGMEQwDgYDVR0PAQH/BAQDAgeAMBYGA1Ud
+JQEB/wQMMAoGCCsGAQUFBwMDMBoGA1UdEQQTMBGBD2luZm9AYnVpbGQyLm9yZzAN
+BgkqhkiG9w0BAQsFAAOCAgEAN3fr+vCXO1sTt6SYHq+aegBYdL1gBNcTbbpYu0Ry
+VY+3pJ8PDVXVDac0K0MCgxvc+XVRR5yMs1hxm8a6yHYmojLmi3v32TROiYpuk8Cz
+QgFxXzUuE+dKP2Cawc1iavuJjKGurA6boSEt/uurp4tHDPqX9UKUO5lkf6Y4kOWE
+u+W5LmunykPPXGUtJuzLwa4iLxHdE/YLxLDc17W6yQtgVWkrPb4OBYueWTiGLE9x
+f0xG6OW7QP3OIfq/5iqK+ryuT/NHSww4PSubHpignYwZ7sc6iByvGhjDROkFbB9R
+AEqcjb+yMFioZsNFZAZxuaUchaOk+fOKS8Mj9TWMBKDUWs7ntzkvNJj0BkEKtdTC
+R8F4D6O1UTX0QuS2ZVN3VCSxhG2G8lUfWE1gK+CHEDjco8oGQoR8HG0CiFvXx7/y
+ZIMBKjsG1VG9GXv+AZKj6FQFui1K0rDNCchMUoX8XHLmpuiFl5TAntM3zxxT6HOV
+2hVu3UTG5DM2ClsnEulm0qaBDK1zodCqp8yaU04UnObvNhu0BDfRqYqxUS1OZm/H
+I+2kwRKr+MFDTbUfB6k19TdQ6GVga+HCZkEH6EBCm8hJpuqDx6mW0eLweSno0lhl
+3UcPDLKv5xIMQ8+BNK0ZyA+7Kf6ra8oGLTVeY8KDfc04IuAuRmOE5Og64cK0njxI
+/nk=
-----END CERTIFICATE-----
diff --git a/tests/build/root.build b/tests/build/root.build
index 4da21de..1d29eec 100644
--- a/tests/build/root.build
+++ b/tests/build/root.build
@@ -1,6 +1,10 @@
# file : tests/build/root.build
# license : MIT; see accompanying LICENSE file
+# Enable all tests, including those which takes a long time to execute.
+#
+config [bool] config.bpkg.tests.all ?= false
+
# Use remote instead of local repository locations for tests.
#
# Note that this is an "instead of" rather than "in addition to" configuration
@@ -19,6 +23,7 @@ config [bool] config.bpkg.tests.git.ssh ?= false
# Using the project configuration variables all over the testscripts makes
# them look hairy. Thus, let's provide short aliases for these variables.
#
+all = $config.bpkg.tests.all
remote = $config.bpkg.tests.remote
git_ssh = $config.bpkg.tests.git.ssh
diff --git a/tests/cfg-create.testscript b/tests/cfg-create.testscript
index 9461dad..f22bd57 100644
--- a/tests/cfg-create.testscript
+++ b/tests/cfg-create.testscript
@@ -3,8 +3,11 @@
.include common.testscript
-config_cxx = config.cxx=$quote($recall($cxx.path) $cxx.config.mode, true)
+config_cxx = [cmdline] config.cxx=$quote($recall($cxx.path) $cxx.config.mode, true)
+cfg_create += 2>!
+
+cfg_info += -d cfg
pkg_status += -d cfg
: non-empty
@@ -25,8 +28,17 @@ EOE
{
$* 2>>/~%EOE%;
%created new configuration in .+/cfg/%
+ % uuid: .{36}%
+ type: target
EOE
+ $cfg_info >>/~"%EOO%";
+ %path: .+/cfg/%
+ %uuid: .{36}%
+ type: target
+ %name: %
+ EOO
+
$pkg_status libfoo >'libfoo unknown'
}
@@ -35,8 +47,17 @@ EOE
{
$* "config.install.root='$~/opt'" 2>>/~%EOE%;
%created new configuration in .+/cfg/%
+ % uuid: .{36}%
+ type: target
EOE
+ $cfg_info >>/~"%EOO%";
+ %path: .+/cfg/%
+ %uuid: .{36}%
+ type: target
+ %name: %
+ EOO
+
$pkg_status libfoo >'libfoo unknown';
cat cfg/build/config.build >>/~"%EOO%"
@@ -51,8 +72,17 @@ EOE
{
$* cxx $config_cxx 2>>/~%EOE%;
%created new configuration in .+/cfg/%
+ % uuid: .{36}%
+ type: target
EOE
+ $cfg_info >>/~"%EOO%";
+ %path: .+/cfg/%
+ %uuid: .{36}%
+ type: target
+ %name: %
+ EOO
+
$pkg_status libfoo >'libfoo unknown';
cat cfg/build/config.build >>/~"%EOO%"
@@ -69,8 +99,17 @@ EOE
$* --wipe 2>>/~%EOE%;
%created new configuration in .+/cfg/%
+ % uuid: .{36}%
+ type: target
EOE
+ $cfg_info >>/~"%EOO%";
+ %path: .+/cfg/%
+ %uuid: .{36}%
+ type: target
+ %name: %
+ EOO
+
$pkg_status libfoo >'libfoo unknown'
}
@@ -81,8 +120,172 @@ EOE
$* --existing 2>>/~%EOE%;
%initialized existing configuration in .+/cfg/%
+ % uuid: .{36}%
+ type: target
+ EOE
+
+ $cfg_info >>/~"%EOO%";
+ %path: .+/cfg/%
+ %uuid: .{36}%
+ type: target
+ %name: %
+ EOO
+
+ $pkg_status libfoo >'libfoo unknown'
+ }
+}
+
+: name
+:
+{
+ test.arguments += -d cfg
+
+ : valid
+ :
+ {
+ $* --name foo 2>>/~%EOE% &cfg/***;
+ %created new configuration in .+/cfg/%
+ % uuid: .{36}%
+ type: target
+ name: foo
+ EOE
+
+ $cfg_info >>/~"%EOO%";
+ %path: .+/cfg/%
+ %uuid: .{36}%
+ type: target
+ name: foo
+ EOO
+
+ $pkg_status libfoo >'libfoo unknown'
+ }
+
+ : invalid
+ :
+ : Also use the short option.
+ :
+ $* --name 123 2>>EOE != 0
+ error: invalid --name option value '123': illegal first character (must be alphabetic or underscore)
+ EOE
+}
+
+: type
+:
+{
+ test.arguments += -d cfg
+
+ : valid
+ :
+ {
+ $* --type host 2>>/~%EOE% &cfg/***;
+ %created new configuration in .+/cfg/%
+ % uuid: .{36}%
+ type: host
+ EOE
+
+ $cfg_info >>/~"%EOO%";
+ %path: .+/cfg/%
+ %uuid: .{36}%
+ type: host
+ %name: %
+ EOO
+
+ $pkg_status libfoo >'libfoo unknown'
+ }
+
+ : invalid
+ :
+ : Also use the short option.
+ :
+ $* --type '' 2>>EOE != 0
+ error: empty --type option value
+ EOE
+}
+
+: uuid
+:
+{
+ test.arguments += -d cfg
+
+ : valid
+ :
+ {
+ uuid='18f48b4b-b5d9-4712-b98c-1930df1c4228';
+
+ $* --uuid $uuid --name foo 2>>/~"%EOE%" &cfg/***;
+ %created new configuration in .+/cfg/%
+ uuid: $uuid
+ type: target
+ name: foo
+ EOE
+
+ $cfg_info >>/~"%EOO%";
+ %path: .+/cfg/%
+ uuid: $uuid
+ type: target
+ name: foo
+ EOO
+
+ $pkg_status libfoo >'libfoo unknown'
+ }
+
+ : invalid
+ :
+ : Also use the short option.
+ :
+ $* --uuid '123' 2>>EOE != 0
+ error: invalid value '123' for option '--uuid'
+ EOE
+}
+
+: link-config
+:
+{
+ test.arguments += -d cfg
+
+ : valid-type
+ :
+ {
+ $cfg_create -d host --type 'host' &host/***;
+ $cfg_create -d build2 --type 'build2' &build2/***;
+
+ $* --host-config host --build2-config build2 2>>/~%EOE% &cfg/***;
+ %created new configuration in .+/cfg/%
+ % uuid: .{36}%
+ type: target
EOE
+ $cfg_info --link >>/~"%EOO%";
+ %path: .+/cfg/%
+ %uuid: .{36}%
+ type: target
+ %name: %
+
+ %path: .+/host/%
+ %uuid: .{36}%
+ type: host
+ %name: %
+
+ %path: .+/build2/%
+ %uuid: .{36}%
+ type: build2
+ %name: %
+ EOO
+
$pkg_status libfoo >'libfoo unknown'
}
+
+ : invalid-type
+ :
+ {
+ $cfg_create -d cfg2 &cfg2/***;
+
+ $* --host-config cfg2 2>>/~%EOE% != 0;
+ %error: host configuration .+/cfg2/ is of 'target' type%
+ EOE
+
+ $* --build2-config cfg2 2>>/~%EOE% != 0
+ %error: build2 configuration .+/cfg2/ is of 'target' type%
+ EOE
+ }
}
diff --git a/tests/cfg-info.testscript b/tests/cfg-info.testscript
new file mode 100644
index 0000000..10e1e4c
--- /dev/null
+++ b/tests/cfg-info.testscript
@@ -0,0 +1,176 @@
+# file : tests/cfg-info.testscript
+# license : MIT; see accompanying LICENSE file
+
+.include common.testscript
+
+cfg_create += 2>!
+
+uuid1 = '18f48b4b-b5d9-4712-b98c-1930df1c4228'
+uuid2 = '28f48b4b-b5d9-4712-b98c-1930df1c4228'
+uuid3 = '38f48b4b-b5d9-4712-b98c-1930df1c4228'
+uuid4 = '48f48b4b-b5d9-4712-b98c-1930df1c4228'
+
++$cfg_create -d cfg1 --name 't1' --uuid "$uuid1" &cfg1/***
++$cfg_create -d cfg2 --name 't2' --uuid "$uuid2" &cfg2/***
++$cfg_create -d cfg3 --name 'h3' --uuid "$uuid3" --type host &cfg3/***
++$cfg_create -d cfg4 --name 'b4' --uuid "$uuid4" --type build2 &cfg4/***
+
++$cfg_link -d cfg1 cfg3 2>!
++$cfg_link -d cfg2 cfg3 2>!
++$cfg_link -d cfg3 cfg4 2>!
+
+clone_cfgs = cp -r ../cfg1 ../cfg2 ../cfg3 ../cfg4 ./
+
+sp = ' '
+
+: self
+:
+{
+ $clone_cfgs;
+
+ $* -d cfg1 >>/"EOO"
+ path: $~/cfg1/
+ uuid: $uuid1
+ type: target
+ name: t1
+ EOO
+}
+
+: links
+:
+{
+ $clone_cfgs;
+
+ $* -d cfg1 --link >>/"EOO"
+ path: $~/cfg1/
+ uuid: $uuid1
+ type: target
+ name: t1
+
+ path: $~/cfg3/
+ uuid: $uuid3
+ type: host
+ name: h3
+ EOO
+}
+
+: links-recursive
+:
+{
+ $clone_cfgs;
+
+ $* -d cfg1 --link --recursive >>/"EOO"
+ path: $~/cfg1/
+ uuid: $uuid1
+ type: target
+ name: t1
+
+ path: $~/cfg3/
+ uuid: $uuid3
+ type: host
+ name: h3
+
+ path: $~/cfg4/
+ uuid: $uuid4
+ type: build2
+ name: b4
+ EOO
+}
+
+: backlinks
+:
+{
+ $clone_cfgs;
+
+ $* -d cfg3 --backlink >>/"EOO";
+ path: $~/cfg3/
+ uuid: $uuid3
+ type: host
+ name: h3
+
+ path: $~/cfg1/
+ uuid: $uuid1
+ type: target
+ name: t1
+
+ path: $~/cfg2/
+ uuid: $uuid2
+ type: target
+ name: t2
+ EOO
+
+ mv cfg2 cfg2.tmp;
+
+ # Make sure that dangling links are silently skipped.
+ #
+ $* -d cfg3 --backlink >>/"EOO";
+ path: $~/cfg3/
+ uuid: $uuid3
+ type: host
+ name: h3
+
+ path: $~/cfg1/
+ uuid: $uuid1
+ type: target
+ name: t1
+ EOO
+
+ # While at it, test printing dangling links.
+ #
+ $* -d cfg3 --dangling >>/"EOO";
+ path: $~/cfg3/
+ uuid: $uuid3
+ type: host
+ name: h3
+
+ path: $~/cfg2/
+ uuid: $uuid2
+ type: target
+ name:$sp
+ EOO
+
+ $* -d cfg3 --dangling --backlink >>/"EOO"
+ path: $~/cfg3/
+ uuid: $uuid3
+ type: host
+ name: h3
+
+ path: $~/cfg1/
+ uuid: $uuid1
+ type: target
+ name: t1
+
+ path: $~/cfg2/
+ uuid: $uuid2
+ type: target
+ name:$sp
+ EOO
+}
+
+: all-links-recursive
+:
+{
+ $clone_cfgs;
+
+ $* -d cfg1 --link --backlink --recursive >>/"EOO"
+ path: $~/cfg1/
+ uuid: $uuid1
+ type: target
+ name: t1
+
+ path: $~/cfg3/
+ uuid: $uuid3
+ type: host
+ name: h3
+
+ path: $~/cfg2/
+ uuid: $uuid2
+ type: target
+ name: t2
+
+ path: $~/cfg4/
+ uuid: $uuid4
+ type: build2
+ name: b4
+ EOO
+}
diff --git a/tests/cfg-link.testscript b/tests/cfg-link.testscript
new file mode 100644
index 0000000..6d98f03
--- /dev/null
+++ b/tests/cfg-link.testscript
@@ -0,0 +1,290 @@
+# file : tests/cfg-link.testscript
+# license : MIT; see accompanying LICENSE file
+
+.include common.testscript
+
+cfg_create += 2>!
+cfg_info += --link
+
+uuid1 = '18f48b4b-b5d9-4712-b98c-1930df1c4228'
+uuid2 = '28f48b4b-b5d9-4712-b98c-1930df1c4228'
+uuid3 = '38f48b4b-b5d9-4712-b98c-1930df1c4228'
+
++$cfg_create -d cfg1 --name 'main' --uuid "$uuid1" &cfg1/***
++$cfg_create -d cfg2 --name 'shared' --uuid "$uuid2" &cfg2/***
+
+clone_cfgs = cp -r ../cfg1 ../cfg2 ./
+
+sp = ' '
+
+: self
+:
+{
+ $cfg_create -d cfg1 --uuid "$uuid1" &cfg1/***;
+
+ $* -d cfg1 cfg1 2>>/"EOE" != 0
+ error: linking configuration $~/cfg1/ with itself
+ info: uuid: $uuid1
+ EOE
+}
+
+: same-name
+:
+{
+ $cfg_create -d cfg1 --name 'main' &cfg1/***;
+ $cfg_create -d cfg2 --name 'shared' &cfg2/***;
+
+ $* -d cfg1 cfg2 --name 'main' 2>>/"EOE" != 0
+ error: linking configuration $~/cfg2/ using current configuration name 'main'
+ info: consider specifying alternative name with --name
+ EOE
+}
+
+: basic
+:
+{
+ $clone_cfgs;
+
+ $* -d cfg1 cfg2 2>>/"EOE";
+ linked with configuration $~/cfg2/
+ uuid: $uuid2
+ type: target
+ name: shared
+ id: 1
+ EOE
+
+ $cfg_info -d cfg1 >>/"EOO";
+ path: $~/cfg1/
+ uuid: $uuid1
+ type: target
+ name: main
+
+ path: $~/cfg2/
+ uuid: $uuid2
+ type: target
+ name: shared
+ EOO
+
+ $cfg_info -d cfg2 --backlink >>/"EOO";
+ path: $~/cfg2/
+ uuid: $uuid2
+ type: target
+ name: shared
+
+ path: $~/cfg1/
+ uuid: $uuid1
+ type: target
+ name: main
+ EOO
+
+ # While at it, test that an implicit link is unnamed.
+ #
+ mv cfg1 cfg1.tmp;
+
+ $cfg_info -d cfg2 --dangling >>/"EOO"
+ path: $~/cfg2/
+ uuid: $uuid2
+ type: target
+ name: shared
+
+ path: $~/cfg1/
+ uuid: $uuid1
+ type: target
+ name:$sp
+ EOO
+}
+
+: implicit-relink
+:
+{
+ $clone_cfgs;
+
+ $* -d cfg1 cfg2 2>!;
+
+ rm -r cfg1/;
+ $cfg_create -d cfg1 --name 'foo' --uuid "$uuid1";
+
+ $* -d cfg1 cfg2 2>>/"EOE";
+ warning: current configuration $~/cfg1/ is already implicitly linked with $~/cfg2/
+ linked with configuration $~/cfg2/
+ uuid: $uuid2
+ type: target
+ name: shared
+ id: 1
+ EOE
+
+ $cfg_info -d cfg1 >>/"EOO"
+ path: $~/cfg1/
+ uuid: $uuid1
+ type: target
+ name: foo
+
+ path: $~/cfg2/
+ uuid: $uuid2
+ type: target
+ name: shared
+ EOO
+}
+
+: turn-implicit-explicit
+:
+{
+ $clone_cfgs;
+
+ $* -d cfg1 cfg2 2>!;
+
+ $* -d cfg2 cfg1 2>>/"EOE";
+ linked with configuration $~/cfg1/
+ uuid: $uuid1
+ type: target
+ name: main
+ id: 1
+ EOE
+
+ $cfg_info -d cfg2 >>/"EOO";
+ path: $~/cfg2/
+ uuid: $uuid2
+ type: target
+ name: shared
+
+ path: $~/cfg1/
+ uuid: $uuid1
+ type: target
+ name: main
+ EOO
+
+ $cfg_info -d cfg1 >>/"EOO";
+ path: $~/cfg1/
+ uuid: $uuid1
+ type: target
+ name: main
+
+ path: $~/cfg2/
+ uuid: $uuid2
+ type: target
+ name: shared
+ EOO
+
+ # While at it, test that relink attempt is reported.
+ #
+ $* -d cfg2 cfg1 2>>/"EOE" != 0
+ error: configuration with uuid $uuid1 is already linked as ../cfg1/
+ EOE
+}
+
+: uuid-clash
+:
+{
+ $clone_cfgs;
+
+ $* -d cfg1 cfg2 2>!;
+
+ $* -d cfg1 cfg2 2>>/"EOE" != 0
+ error: configuration with uuid $uuid2 is already linked as ../cfg2/
+ EOE
+}
+
+: path-clash
+:
+{
+ $clone_cfgs;
+
+ $* -d cfg1 cfg2 2>!;
+
+ mv cfg2 cfg2.tmp;
+ $cfg_create -d cfg2 --name 'shared' --uuid "$uuid3" &cfg2/***;
+
+ $* -d cfg1 cfg2 2>>/"EOE" != 0
+ error: configuration with path $~/cfg2/ is already linked
+ EOE
+}
+
+: name-clash
+:
+{
+ $clone_cfgs;
+
+ $* -d cfg1 cfg2 2>!;
+
+ $cfg_create -d cfg3 --name 'shared' --uuid "$uuid3" &cfg3/***;
+
+ $* -d cfg1 cfg3 --name 'shared' 2>>/"EOE" != 0;
+ error: configuration with name shared is already linked as ../cfg2/
+ info: consider specifying alternative name with --name
+ EOE
+
+ $* -d cfg1 cfg3 2>>/"EOE"
+ warning: configuration with name shared is already linked as ../cfg2/, linking as unnamed
+ linked with configuration $~/cfg3/
+ uuid: $uuid3
+ type: target
+ id: 2
+ EOE
+}
+
+: implicit-link-path-clash
+:
+{
+ $clone_cfgs;
+
+ $* -d cfg1 cfg2 2>!;
+ $* -d cfg2 cfg1 2>!;
+
+ mv cfg1 cfg1.tmp;
+ $cfg_create -d cfg1 --name 'main' --uuid "$uuid3" &cfg1/***;
+
+ $* -d cfg1 cfg2 2>>/"EOE" != 0
+ error: current configuration $~/cfg1/ is already linked with $~/cfg2/
+ EOE
+}
+
+: uuid-mismatch
+:
+{
+ $clone_cfgs;
+
+ $* -d cfg1 cfg2 2>!;
+
+ mv cfg2 cfg2.tmp;
+
+ $cfg_create -d cfg2 --name 'shared' --uuid "$uuid3";
+
+ $cfg_info -d cfg1 >- 2>>/"EOE" != 0;
+ error: configuration cfg2/ uuid mismatch
+ info: uuid $uuid3
+ info: linked with cfg1/ as $uuid2
+ EOE
+
+ rm -r cfg2;
+ mv cfg2.tmp cfg2;
+
+ $cfg_info -d cfg1 >>/"EOO"
+ path: $~/cfg1/
+ uuid: $uuid1
+ type: target
+ name: main
+
+ path: $~/cfg2/
+ uuid: $uuid2
+ type: target
+ name: shared
+ EOO
+}
+
+: type-mismatch
+:
+{
+ $clone_cfgs;
+
+ $* -d cfg1 cfg2 2>!;
+
+ rm -r cfg2;
+
+ $cfg_create -d cfg2 --name 'shared' --uuid "$uuid2" --type 'shared';
+
+ $cfg_info -d cfg1 >- 2>>/"EOE" != 0
+ error: configuration cfg2/ type mismatch
+ info: type shared
+ info: linked with cfg1/ as target
+ EOE
+}
diff --git a/tests/cfg-unlink.testscript b/tests/cfg-unlink.testscript
new file mode 100644
index 0000000..2ca3783
--- /dev/null
+++ b/tests/cfg-unlink.testscript
@@ -0,0 +1,275 @@
+# file : tests/cfg-link.testscript
+# license : MIT; see accompanying LICENSE file
+
+.include common.testscript remote.testscript
+
+# Source repository (see pkg-build for details):
+#
+# cfg-unlink
+# `-- t7a
+
+# Prepare repositories used by tests if running in the local mode.
+#
++if! $remote
+ rep_create += 2>!
+
+ cp -r $src/t7a $out/t7a && $rep_create $out/t7a &$out/t7a/packages.manifest
+end
+
+cfg_create += 2>!
+cfg_info += --link
+pkg_build += --yes 2>!
+pkg_drop += --yes 2>!
+rep_add += 2>!
+rep_fetch += --trust-yes 2>!
+
+cfg1_uuid = '18f48b4b-b5d9-4712-b98c-1930df1c4228'
+cfg2_uuid ='28f48b4b-b5d9-4712-b98c-1930df1c4228'
+
++$cfg_create -d cfg1 --name 'main' --uuid "$cfg1_uuid" &cfg1/***
++$cfg_create -d cfg2 --name 'shared' --uuid "$cfg2_uuid" --type host &cfg2/***
+
++$cfg_link -d cfg1 cfg2 2>!
+
+clone_root_cfgs = cp -r $~/cfg1 $~/cfg2 ./
+
+: unlink
+:
+{
+ : name-dir
+ :
+ {
+ $clone_root_cfgs;
+
+ $* -d cfg1 cfg2 --name 'host' 2>/'error: both --name and directory argument specified' != 0
+ }
+
+ : dir
+ :
+ {
+ $clone_root_cfgs;
+
+ $* -d cfg1 cfg1 2>/"error: no configuration with path $~/cfg1/ is linked with cfg1/" != 0;
+
+ $* -d cfg1 cfg2 2>/"unlinked configuration $~/cfg2/";
+
+ $cfg_info -d cfg1 >>/"EOO";
+ path: $~/cfg1/
+ uuid: $cfg1_uuid
+ type: target
+ name: main
+ EOO
+
+ $cfg_info -d cfg2 >>/"EOO"
+ path: $~/cfg2/
+ uuid: $cfg2_uuid
+ type: host
+ name: shared
+ EOO
+ }
+
+ : name
+ :
+ {
+ $clone_root_cfgs;
+
+ $* -d cfg1 --name 'target' 2>/"error: no configuration with name 'target' is linked with cfg1/" != 0;
+
+ $* -d cfg1 --name 'shared' 2>/"unlinked configuration $~/cfg2/";
+
+ $cfg_info -d cfg1 >>/"EOO";
+ path: $~/cfg1/
+ uuid: $cfg1_uuid
+ type: target
+ name: main
+ EOO
+
+ $cfg_info -d cfg2 >>/"EOO"
+ path: $~/cfg2/
+ uuid: $cfg2_uuid
+ type: host
+ name: shared
+ EOO
+ }
+
+ : id
+ :
+ {
+ $clone_root_cfgs;
+
+ $* -d cfg1 --id 2 2>/"error: no configuration with id 2 is linked with cfg1/" != 0;
+
+ $* -d cfg1 --id 1 2>/"unlinked configuration $~/cfg2/";
+
+ $cfg_info -d cfg1 >>/"EOO";
+ path: $~/cfg1/
+ uuid: $cfg1_uuid
+ type: target
+ name: main
+ EOO
+
+ $cfg_info -d cfg2 >>/"EOO"
+ path: $~/cfg2/
+ uuid: $cfg2_uuid
+ type: host
+ name: shared
+ EOO
+ }
+
+ : uuid
+ :
+ {
+ $clone_root_cfgs;
+
+ $* -d cfg1 --uuid $cfg1_uuid 2>/"error: no configuration with uuid $cfg1_uuid is linked with cfg1/" != 0;
+
+ $* -d cfg1 --uuid $cfg2_uuid 2>/"unlinked configuration $~/cfg2/";
+
+ $cfg_info -d cfg1 >>/"EOO";
+ path: $~/cfg1/
+ uuid: $cfg1_uuid
+ type: target
+ name: main
+ EOO
+
+ $cfg_info -d cfg2 >>/"EOO"
+ path: $~/cfg2/
+ uuid: $cfg2_uuid
+ type: host
+ name: shared
+ EOO
+ }
+
+ : mutual
+ :
+ {
+ $clone_root_cfgs;
+
+ $cfg_link -d cfg2 cfg1 2>!;
+
+ $* -d cfg1 cfg2 2>>/"EOE";
+ info: configurations cfg2/ and cfg1/ are mutually linked, turning the link to cfg2/ into implicit backlink
+ unlinked configuration $~/cfg2/
+ EOE
+
+ $cfg_info -d cfg1 >>/"EOO";
+ path: $~/cfg1/
+ uuid: $cfg1_uuid
+ type: target
+ name: main
+ EOO
+
+ $cfg_info -d cfg2 >>/"EOO"
+ path: $~/cfg2/
+ uuid: $cfg2_uuid
+ type: host
+ name: shared
+
+ path: $~/cfg1/
+ uuid: $cfg1_uuid
+ type: target
+ name: main
+ EOO
+ }
+
+ : dependency
+ :
+ {
+ $clone_root_cfgs;
+
+ $rep_add -d cfg1 $rep/t7a && $rep_fetch -d cfg1;
+
+ $pkg_build -d cfg1 libbar &cfg2/.bpkg/build2/***;
+
+ $* -d cfg1 cfg2 2>>/EOE != 0;
+ error: configuration cfg1/ still depends on configuration cfg2/
+ info: package foo [cfg2/] has dependents:
+ info: package libbar on foo ^1.0.0
+ EOE
+
+ $pkg_drop -d cfg1 --keep-unused libbar;
+
+ $* -d cfg1 cfg2 2>>/"EOE";
+ unlinked configuration $~/cfg2/
+ EOE
+
+ $cfg_info -d cfg1 >>/"EOO";
+ path: $~/cfg1/
+ uuid: $cfg1_uuid
+ type: target
+ name: main
+ EOO
+
+ $cfg_info -d cfg2 >>/~"%EOO%";
+ path: $~/cfg2/
+ uuid: $cfg2_uuid
+ type: host
+ name: shared
+
+ path: $~/cfg2/.bpkg/build2/
+ %uuid: .{36}%
+ type: build2
+ name: build2
+ EOO
+
+ $pkg_drop -d cfg1 libbaz;
+ $pkg_drop -d cfg2 foo
+ }
+
+ : dependency-private
+ :
+ {
+ $clone_root_cfgs;
+
+ $rep_add -d cfg2 $rep/t7a && $rep_fetch -d cfg2;
+
+ $pkg_build -d cfg2 foo;
+
+ $* -d cfg2 --name build2 2>>/EOE != 0;
+ error: configuration cfg2/ still depends on private configuration cfg2/.bpkg/build2/
+ info: package libbuild2-bar [cfg2/.bpkg/build2/] has dependents:
+ info: package foo on libbuild2-bar ^1.0.0
+ EOE
+
+ $pkg_drop -d cfg2 --keep-unused foo;
+
+ test -d cfg2/.bpkg/build2/;
+
+ $* -d cfg2 --name build2 2>>/"EOE";
+ unlinked and removed configuration $~/cfg2/.bpkg/build2/
+ EOE
+
+ $cfg_info -d cfg2 >>/"EOO";
+ path: $~/cfg2/
+ uuid: $cfg2_uuid
+ type: host
+ name: shared
+ EOO
+
+ test -d cfg2/.bpkg/build2/ == 1;
+
+ $pkg_drop -d cfg2 libbaz
+ }
+}
+: remove-dangling
+:
+{
+ : success
+ :
+ {
+ $clone_root_cfgs;
+
+ mv cfg1 cfg3;
+
+ $* -d cfg2 --dangling 2>'removed 1 dangling implicit backlink(s)';
+ $* -d cfg2 --dangling 2>'removed 0 dangling implicit backlink(s)'
+ }
+
+ : error
+ :
+ {
+ $clone_root_cfgs;
+
+ $* -d cfg1 --dangling --name 'host' 2>'error: both --dangling and --name specified' != 0
+ }
+}
diff --git a/tests/cfg-unlink/t7a b/tests/cfg-unlink/t7a
new file mode 120000
index 0000000..d02b5d4
--- /dev/null
+++ b/tests/cfg-unlink/t7a
@@ -0,0 +1 @@
+../common/linked/t7a \ No newline at end of file
diff --git a/tests/common.testscript b/tests/common.testscript
index 5db8c6a..30fcf7e 100644
--- a/tests/common.testscript
+++ b/tests/common.testscript
@@ -32,23 +32,28 @@ test.options += --default-options $options_guard \
# (for example, to make sure that configuration post-test state is valid and is
# as expected).
#
-cfg_create = $* cfg-create
-pkg_build = $* pkg-build
-pkg_checkout = $* pkg-checkout
-pkg_configure = $* pkg-configure
-pkg_disfigure = $* pkg-disfigure
-pkg_drop = $* pkg-drop
-pkg_fetch = $* pkg-fetch
-pkg_purge = $* pkg-purge
-pkg_status = $* pkg-status
-pkg_unpack = $* pkg-unpack
-pkg_update = $* pkg-update
-rep_add = $* rep-add
-rep_create = $* rep-create
-rep_fetch = $* rep-fetch
-rep_info = $* rep-info
-rep_list = $* rep-list
-rep_remove = $* rep-remove
+# Disable the use of the system package manager for the pkg-build command.
+#
+cfg_create = [cmdline] $* cfg-create
+cfg_info = [cmdline] $* cfg-info
+cfg_link = [cmdline] $* cfg-link
+cfg_unlink = [cmdline] $* cfg-unlink
+pkg_build = [cmdline] $* pkg-build --sys-no-query
+pkg_checkout = [cmdline] $* pkg-checkout
+pkg_configure = [cmdline] $* pkg-configure
+pkg_disfigure = [cmdline] $* pkg-disfigure
+pkg_drop = [cmdline] $* pkg-drop
+pkg_fetch = [cmdline] $* pkg-fetch
+pkg_purge = [cmdline] $* pkg-purge
+pkg_status = [cmdline] $* pkg-status
+pkg_unpack = [cmdline] $* pkg-unpack
+pkg_update = [cmdline] $* pkg-update
+rep_add = [cmdline] $* rep-add
+rep_create = [cmdline] $* rep-create
+rep_fetch = [cmdline] $* rep-fetch
+rep_info = [cmdline] $* rep-info
+rep_list = [cmdline] $* rep-list
+rep_remove = [cmdline] $* rep-remove
# All testscripts are named after bpkg commands, for example
# pkg-verify.testscript. So the testscript scope id is a name of the command
diff --git a/tests/common/compatibility/t15/libbar-1.0.0.tar.gz b/tests/common/compatibility/t15/libbar-1.0.0.tar.gz
new file mode 100644
index 0000000..2c741b2
--- /dev/null
+++ b/tests/common/compatibility/t15/libbar-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/compatibility/t15/libbaz-1.0.0.tar.gz b/tests/common/compatibility/t15/libbaz-1.0.0.tar.gz
new file mode 100644
index 0000000..07e6d04
--- /dev/null
+++ b/tests/common/compatibility/t15/libbaz-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/compatibility/t15/libbiz-1.0.0.tar.gz b/tests/common/compatibility/t15/libbiz-1.0.0.tar.gz
new file mode 100644
index 0000000..52ee52a
--- /dev/null
+++ b/tests/common/compatibility/t15/libbiz-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/compatibility/t15/libfoo-1.0.0.tar.gz b/tests/common/compatibility/t15/libfoo-1.0.0.tar.gz
new file mode 100644
index 0000000..be052c2
--- /dev/null
+++ b/tests/common/compatibility/t15/libfoo-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/compatibility/t15/repositories.manifest b/tests/common/compatibility/t15/repositories.manifest
new file mode 100644
index 0000000..5b70556
--- /dev/null
+++ b/tests/common/compatibility/t15/repositories.manifest
@@ -0,0 +1 @@
+: 1
diff --git a/tests/common/dependency-alternatives/t11a/bac-1.0.0.tar.gz b/tests/common/dependency-alternatives/t11a/bac-1.0.0.tar.gz
new file mode 100644
index 0000000..7a7670d
--- /dev/null
+++ b/tests/common/dependency-alternatives/t11a/bac-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t11a/bar-0.1.0.tar.gz b/tests/common/dependency-alternatives/t11a/bar-0.1.0.tar.gz
new file mode 100644
index 0000000..34774a8
--- /dev/null
+++ b/tests/common/dependency-alternatives/t11a/bar-0.1.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t11a/bar-1.0.0.tar.gz b/tests/common/dependency-alternatives/t11a/bar-1.0.0.tar.gz
new file mode 100644
index 0000000..15d819e
--- /dev/null
+++ b/tests/common/dependency-alternatives/t11a/bar-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t11a/bas-1.0.0.tar.gz b/tests/common/dependency-alternatives/t11a/bas-1.0.0.tar.gz
new file mode 100644
index 0000000..c6365e3
--- /dev/null
+++ b/tests/common/dependency-alternatives/t11a/bas-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t11a/bat-1.0.0.tar.gz b/tests/common/dependency-alternatives/t11a/bat-1.0.0.tar.gz
new file mode 100644
index 0000000..bb92104
--- /dev/null
+++ b/tests/common/dependency-alternatives/t11a/bat-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t11a/bax-0.1.0.tar.gz b/tests/common/dependency-alternatives/t11a/bax-0.1.0.tar.gz
new file mode 100644
index 0000000..8ed7936
--- /dev/null
+++ b/tests/common/dependency-alternatives/t11a/bax-0.1.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t11a/bax-1.0.0.tar.gz b/tests/common/dependency-alternatives/t11a/bax-1.0.0.tar.gz
new file mode 100644
index 0000000..6c00903
--- /dev/null
+++ b/tests/common/dependency-alternatives/t11a/bax-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t11a/baz-0.1.0.tar.gz b/tests/common/dependency-alternatives/t11a/baz-0.1.0.tar.gz
new file mode 100644
index 0000000..a1f37b0
--- /dev/null
+++ b/tests/common/dependency-alternatives/t11a/baz-0.1.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t11a/baz-1.0.0.tar.gz b/tests/common/dependency-alternatives/t11a/baz-1.0.0.tar.gz
new file mode 100644
index 0000000..1fd57fe
--- /dev/null
+++ b/tests/common/dependency-alternatives/t11a/baz-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t11a/bex-1.0.0.tar.gz b/tests/common/dependency-alternatives/t11a/bex-1.0.0.tar.gz
new file mode 100644
index 0000000..4afa0f8
--- /dev/null
+++ b/tests/common/dependency-alternatives/t11a/bex-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t11a/bix-1.0.0.tar.gz b/tests/common/dependency-alternatives/t11a/bix-1.0.0.tar.gz
new file mode 100644
index 0000000..2109914
--- /dev/null
+++ b/tests/common/dependency-alternatives/t11a/bix-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t11a/biz-0.1.0.tar.gz b/tests/common/dependency-alternatives/t11a/biz-0.1.0.tar.gz
new file mode 100644
index 0000000..b42dff0
--- /dev/null
+++ b/tests/common/dependency-alternatives/t11a/biz-0.1.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t11a/biz-1.0.0.tar.gz b/tests/common/dependency-alternatives/t11a/biz-1.0.0.tar.gz
new file mode 100644
index 0000000..e81a027
--- /dev/null
+++ b/tests/common/dependency-alternatives/t11a/biz-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t11a/boo-1.0.0.tar.gz b/tests/common/dependency-alternatives/t11a/boo-1.0.0.tar.gz
new file mode 100644
index 0000000..778b253
--- /dev/null
+++ b/tests/common/dependency-alternatives/t11a/boo-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t11a/box-0.1.0.tar.gz b/tests/common/dependency-alternatives/t11a/box-0.1.0.tar.gz
new file mode 100644
index 0000000..8e91f91
--- /dev/null
+++ b/tests/common/dependency-alternatives/t11a/box-0.1.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t11a/box-0.2.0.tar.gz b/tests/common/dependency-alternatives/t11a/box-0.2.0.tar.gz
new file mode 100644
index 0000000..fc22464
--- /dev/null
+++ b/tests/common/dependency-alternatives/t11a/box-0.2.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t11a/box-1.0.0.tar.gz b/tests/common/dependency-alternatives/t11a/box-1.0.0.tar.gz
new file mode 100644
index 0000000..d205dc1
--- /dev/null
+++ b/tests/common/dependency-alternatives/t11a/box-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t11a/buc-1.0.0.tar.gz b/tests/common/dependency-alternatives/t11a/buc-1.0.0.tar.gz
new file mode 100644
index 0000000..88183f7
--- /dev/null
+++ b/tests/common/dependency-alternatives/t11a/buc-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t11a/bus-0.1.0.tar.gz b/tests/common/dependency-alternatives/t11a/bus-0.1.0.tar.gz
new file mode 100644
index 0000000..e3672b2
--- /dev/null
+++ b/tests/common/dependency-alternatives/t11a/bus-0.1.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t11a/bus-1.0.0.tar.gz b/tests/common/dependency-alternatives/t11a/bus-1.0.0.tar.gz
new file mode 100644
index 0000000..1530f14
--- /dev/null
+++ b/tests/common/dependency-alternatives/t11a/bus-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t11a/bux-1.0.0.tar.gz b/tests/common/dependency-alternatives/t11a/bux-1.0.0.tar.gz
new file mode 100644
index 0000000..210941a
--- /dev/null
+++ b/tests/common/dependency-alternatives/t11a/bux-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t11a/buz-1.0.0.tar.gz b/tests/common/dependency-alternatives/t11a/buz-1.0.0.tar.gz
new file mode 100644
index 0000000..7419d8a
--- /dev/null
+++ b/tests/common/dependency-alternatives/t11a/buz-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t11a/dex-1.0.0.tar.gz b/tests/common/dependency-alternatives/t11a/dex-1.0.0.tar.gz
new file mode 100644
index 0000000..58bb16d
--- /dev/null
+++ b/tests/common/dependency-alternatives/t11a/dex-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t11a/dix-1.0.0.tar.gz b/tests/common/dependency-alternatives/t11a/dix-1.0.0.tar.gz
new file mode 100644
index 0000000..2236190
--- /dev/null
+++ b/tests/common/dependency-alternatives/t11a/dix-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t11a/diz-1.0.0.tar.gz b/tests/common/dependency-alternatives/t11a/diz-1.0.0.tar.gz
new file mode 100644
index 0000000..a7fc8fa
--- /dev/null
+++ b/tests/common/dependency-alternatives/t11a/diz-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t11a/dox-1.0.0.tar.gz b/tests/common/dependency-alternatives/t11a/dox-1.0.0.tar.gz
new file mode 100644
index 0000000..00c730a
--- /dev/null
+++ b/tests/common/dependency-alternatives/t11a/dox-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t11a/fex-0.1.0.tar.gz b/tests/common/dependency-alternatives/t11a/fex-0.1.0.tar.gz
new file mode 100644
index 0000000..160601b
--- /dev/null
+++ b/tests/common/dependency-alternatives/t11a/fex-0.1.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t11a/fex-1.0.0.tar.gz b/tests/common/dependency-alternatives/t11a/fex-1.0.0.tar.gz
new file mode 100644
index 0000000..00e925a
--- /dev/null
+++ b/tests/common/dependency-alternatives/t11a/fex-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t11a/fix-0.1.0.tar.gz b/tests/common/dependency-alternatives/t11a/fix-0.1.0.tar.gz
new file mode 100644
index 0000000..1d4ab42
--- /dev/null
+++ b/tests/common/dependency-alternatives/t11a/fix-0.1.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t11a/fix-1.0.0.tar.gz b/tests/common/dependency-alternatives/t11a/fix-1.0.0.tar.gz
new file mode 100644
index 0000000..3613136
--- /dev/null
+++ b/tests/common/dependency-alternatives/t11a/fix-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t11a/foo-0.1.0.tar.gz b/tests/common/dependency-alternatives/t11a/foo-0.1.0.tar.gz
new file mode 100644
index 0000000..ebf3f01
--- /dev/null
+++ b/tests/common/dependency-alternatives/t11a/foo-0.1.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t11a/foo-0.2.0.tar.gz b/tests/common/dependency-alternatives/t11a/foo-0.2.0.tar.gz
new file mode 100644
index 0000000..59bac4b
--- /dev/null
+++ b/tests/common/dependency-alternatives/t11a/foo-0.2.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t11a/foo-1.0.0.tar.gz b/tests/common/dependency-alternatives/t11a/foo-1.0.0.tar.gz
new file mode 100644
index 0000000..42d9cc5
--- /dev/null
+++ b/tests/common/dependency-alternatives/t11a/foo-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t11a/fox-0.1.0.tar.gz b/tests/common/dependency-alternatives/t11a/fox-0.1.0.tar.gz
new file mode 100644
index 0000000..19e7307
--- /dev/null
+++ b/tests/common/dependency-alternatives/t11a/fox-0.1.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t11a/fox-0.2.0.tar.gz b/tests/common/dependency-alternatives/t11a/fox-0.2.0.tar.gz
new file mode 100644
index 0000000..6f11b23
--- /dev/null
+++ b/tests/common/dependency-alternatives/t11a/fox-0.2.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t11a/fox-1.0.0.tar.gz b/tests/common/dependency-alternatives/t11a/fox-1.0.0.tar.gz
new file mode 100644
index 0000000..159cfbd
--- /dev/null
+++ b/tests/common/dependency-alternatives/t11a/fox-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t11a/fux-0.1.0.tar.gz b/tests/common/dependency-alternatives/t11a/fux-0.1.0.tar.gz
new file mode 100644
index 0000000..563761e
--- /dev/null
+++ b/tests/common/dependency-alternatives/t11a/fux-0.1.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t11a/fux-0.1.1.tar.gz b/tests/common/dependency-alternatives/t11a/fux-0.1.1.tar.gz
new file mode 100644
index 0000000..22db443
--- /dev/null
+++ b/tests/common/dependency-alternatives/t11a/fux-0.1.1.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t11a/fux-0.2.0.tar.gz b/tests/common/dependency-alternatives/t11a/fux-0.2.0.tar.gz
new file mode 100644
index 0000000..4966b0d
--- /dev/null
+++ b/tests/common/dependency-alternatives/t11a/fux-0.2.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t11a/fux-1.0.0.tar.gz b/tests/common/dependency-alternatives/t11a/fux-1.0.0.tar.gz
new file mode 100644
index 0000000..c723ef9
--- /dev/null
+++ b/tests/common/dependency-alternatives/t11a/fux-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t11a/libbar-0.1.0.tar.gz b/tests/common/dependency-alternatives/t11a/libbar-0.1.0.tar.gz
new file mode 100644
index 0000000..57b9ccc
--- /dev/null
+++ b/tests/common/dependency-alternatives/t11a/libbar-0.1.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t11a/libbar-1.0.0.tar.gz b/tests/common/dependency-alternatives/t11a/libbar-1.0.0.tar.gz
new file mode 100644
index 0000000..aacf8d7
--- /dev/null
+++ b/tests/common/dependency-alternatives/t11a/libbar-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t11a/libbaz-0.1.0.tar.gz b/tests/common/dependency-alternatives/t11a/libbaz-0.1.0.tar.gz
new file mode 100644
index 0000000..b8bfaec
--- /dev/null
+++ b/tests/common/dependency-alternatives/t11a/libbaz-0.1.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t11a/libbaz-1.0.0.tar.gz b/tests/common/dependency-alternatives/t11a/libbaz-1.0.0.tar.gz
new file mode 100644
index 0000000..a41505d
--- /dev/null
+++ b/tests/common/dependency-alternatives/t11a/libbaz-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t11a/libbiz-0.1.0.tar.gz b/tests/common/dependency-alternatives/t11a/libbiz-0.1.0.tar.gz
new file mode 100644
index 0000000..429dc0d
--- /dev/null
+++ b/tests/common/dependency-alternatives/t11a/libbiz-0.1.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t11a/libbiz-1.0.0.tar.gz b/tests/common/dependency-alternatives/t11a/libbiz-1.0.0.tar.gz
new file mode 100644
index 0000000..250f110
--- /dev/null
+++ b/tests/common/dependency-alternatives/t11a/libbiz-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t11a/libbox-0.1.0.tar.gz b/tests/common/dependency-alternatives/t11a/libbox-0.1.0.tar.gz
new file mode 100644
index 0000000..3388a94
--- /dev/null
+++ b/tests/common/dependency-alternatives/t11a/libbox-0.1.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t11a/libbox-1.0.0.tar.gz b/tests/common/dependency-alternatives/t11a/libbox-1.0.0.tar.gz
new file mode 100644
index 0000000..ce7f51f
--- /dev/null
+++ b/tests/common/dependency-alternatives/t11a/libbox-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t11a/libfoo-0.1.0.tar.gz b/tests/common/dependency-alternatives/t11a/libfoo-0.1.0.tar.gz
new file mode 100644
index 0000000..a94b8fe
--- /dev/null
+++ b/tests/common/dependency-alternatives/t11a/libfoo-0.1.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t11a/libfoo-1.0.0.tar.gz b/tests/common/dependency-alternatives/t11a/libfoo-1.0.0.tar.gz
new file mode 100644
index 0000000..e46178a
--- /dev/null
+++ b/tests/common/dependency-alternatives/t11a/libfoo-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t11a/repositories.manifest b/tests/common/dependency-alternatives/t11a/repositories.manifest
new file mode 100644
index 0000000..5b70556
--- /dev/null
+++ b/tests/common/dependency-alternatives/t11a/repositories.manifest
@@ -0,0 +1 @@
+: 1
diff --git a/tests/common/dependency-alternatives/t11a/tax-1.0.0.tar.gz b/tests/common/dependency-alternatives/t11a/tax-1.0.0.tar.gz
new file mode 100644
index 0000000..616cb05
--- /dev/null
+++ b/tests/common/dependency-alternatives/t11a/tax-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t11a/tex-0.1.0.tar.gz b/tests/common/dependency-alternatives/t11a/tex-0.1.0.tar.gz
new file mode 100644
index 0000000..eb7d09f
--- /dev/null
+++ b/tests/common/dependency-alternatives/t11a/tex-0.1.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t11a/tex-0.2.0.tar.gz b/tests/common/dependency-alternatives/t11a/tex-0.2.0.tar.gz
new file mode 100644
index 0000000..0b21183
--- /dev/null
+++ b/tests/common/dependency-alternatives/t11a/tex-0.2.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t11a/tex-0.3.0.tar.gz b/tests/common/dependency-alternatives/t11a/tex-0.3.0.tar.gz
new file mode 100644
index 0000000..836a032
--- /dev/null
+++ b/tests/common/dependency-alternatives/t11a/tex-0.3.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t11a/tex-1.0.0.tar.gz b/tests/common/dependency-alternatives/t11a/tex-1.0.0.tar.gz
new file mode 100644
index 0000000..0c9e29d
--- /dev/null
+++ b/tests/common/dependency-alternatives/t11a/tex-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t11a/tez-0.1.0.tar.gz b/tests/common/dependency-alternatives/t11a/tez-0.1.0.tar.gz
new file mode 100644
index 0000000..208acfe
--- /dev/null
+++ b/tests/common/dependency-alternatives/t11a/tez-0.1.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t11a/tez-1.0.0.tar.gz b/tests/common/dependency-alternatives/t11a/tez-1.0.0.tar.gz
new file mode 100644
index 0000000..edf378b
--- /dev/null
+++ b/tests/common/dependency-alternatives/t11a/tez-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t11a/tix-0.1.0.tar.gz b/tests/common/dependency-alternatives/t11a/tix-0.1.0.tar.gz
new file mode 100644
index 0000000..2badf78
--- /dev/null
+++ b/tests/common/dependency-alternatives/t11a/tix-0.1.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t11a/tix-1.0.0.tar.gz b/tests/common/dependency-alternatives/t11a/tix-1.0.0.tar.gz
new file mode 100644
index 0000000..a1f2930
--- /dev/null
+++ b/tests/common/dependency-alternatives/t11a/tix-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t11a/tiz-1.0.0.tar.gz b/tests/common/dependency-alternatives/t11a/tiz-1.0.0.tar.gz
new file mode 100644
index 0000000..8bfadcd
--- /dev/null
+++ b/tests/common/dependency-alternatives/t11a/tiz-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t11a/toz-0.1.0.tar.gz b/tests/common/dependency-alternatives/t11a/toz-0.1.0.tar.gz
new file mode 100644
index 0000000..b99803c
--- /dev/null
+++ b/tests/common/dependency-alternatives/t11a/toz-0.1.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t11a/toz-0.2.0.tar.gz b/tests/common/dependency-alternatives/t11a/toz-0.2.0.tar.gz
new file mode 100644
index 0000000..b2bd931
--- /dev/null
+++ b/tests/common/dependency-alternatives/t11a/toz-0.2.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t11a/toz-1.0.0.tar.gz b/tests/common/dependency-alternatives/t11a/toz-1.0.0.tar.gz
new file mode 100644
index 0000000..ccfc094
--- /dev/null
+++ b/tests/common/dependency-alternatives/t11a/toz-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t11a/tux-1.0.0.tar.gz b/tests/common/dependency-alternatives/t11a/tux-1.0.0.tar.gz
new file mode 100644
index 0000000..62c4e73
--- /dev/null
+++ b/tests/common/dependency-alternatives/t11a/tux-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t11a/tuz-1.0.0.tar.gz b/tests/common/dependency-alternatives/t11a/tuz-1.0.0.tar.gz
new file mode 100644
index 0000000..d4a8cdf
--- /dev/null
+++ b/tests/common/dependency-alternatives/t11a/tuz-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t11a/tvz-0.1.0.tar.gz b/tests/common/dependency-alternatives/t11a/tvz-0.1.0.tar.gz
new file mode 100644
index 0000000..55ef173
--- /dev/null
+++ b/tests/common/dependency-alternatives/t11a/tvz-0.1.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t11a/tvz-1.0.0.tar.gz b/tests/common/dependency-alternatives/t11a/tvz-1.0.0.tar.gz
new file mode 100644
index 0000000..0743be6
--- /dev/null
+++ b/tests/common/dependency-alternatives/t11a/tvz-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t13a/bar-1.0.0.tar.gz b/tests/common/dependency-alternatives/t13a/bar-1.0.0.tar.gz
new file mode 100644
index 0000000..db3c22d
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13a/bar-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t13a/baz-1.0.0.tar.gz b/tests/common/dependency-alternatives/t13a/baz-1.0.0.tar.gz
new file mode 100644
index 0000000..f86b0aa
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13a/baz-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t13a/biz-1.0.0.tar.gz b/tests/common/dependency-alternatives/t13a/biz-1.0.0.tar.gz
new file mode 100644
index 0000000..cfa8fa2
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13a/biz-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t13a/box-1.0.0.tar.gz b/tests/common/dependency-alternatives/t13a/box-1.0.0.tar.gz
new file mode 100644
index 0000000..be45865
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13a/box-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t13a/liba-1.0.0.tar.gz b/tests/common/dependency-alternatives/t13a/liba-1.0.0.tar.gz
new file mode 100644
index 0000000..c177a1b
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13a/liba-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t13a/repositories.manifest b/tests/common/dependency-alternatives/t13a/repositories.manifest
new file mode 100644
index 0000000..5b70556
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13a/repositories.manifest
@@ -0,0 +1 @@
+: 1
diff --git a/tests/common/dependency-alternatives/t13b/bar-1.0.0.tar.gz b/tests/common/dependency-alternatives/t13b/bar-1.0.0.tar.gz
new file mode 100644
index 0000000..5b49774
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13b/bar-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t13b/baz-1.0.0.tar.gz b/tests/common/dependency-alternatives/t13b/baz-1.0.0.tar.gz
new file mode 100644
index 0000000..cb136b4
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13b/baz-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t13b/biz-1.0.0.tar.gz b/tests/common/dependency-alternatives/t13b/biz-1.0.0.tar.gz
new file mode 100644
index 0000000..27fe077
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13b/biz-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t13b/liba-1.0.0.tar.gz b/tests/common/dependency-alternatives/t13b/liba-1.0.0.tar.gz
new file mode 100644
index 0000000..ee1dd3a
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13b/liba-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t13b/repositories.manifest b/tests/common/dependency-alternatives/t13b/repositories.manifest
new file mode 100644
index 0000000..5b70556
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13b/repositories.manifest
@@ -0,0 +1 @@
+: 1
diff --git a/tests/common/dependency-alternatives/t13c/bar-1.0.0.tar.gz b/tests/common/dependency-alternatives/t13c/bar-1.0.0.tar.gz
new file mode 100644
index 0000000..83ad484
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13c/bar-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t13c/baz-1.0.0.tar.gz b/tests/common/dependency-alternatives/t13c/baz-1.0.0.tar.gz
new file mode 100644
index 0000000..b64552e
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13c/baz-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t13c/liba-1.0.0.tar.gz b/tests/common/dependency-alternatives/t13c/liba-1.0.0.tar.gz
new file mode 100644
index 0000000..5b79dc9
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13c/liba-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t13c/repositories.manifest b/tests/common/dependency-alternatives/t13c/repositories.manifest
new file mode 100644
index 0000000..5b70556
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13c/repositories.manifest
@@ -0,0 +1 @@
+: 1
diff --git a/tests/common/dependency-alternatives/t13d/bar-1.0.0.tar.gz b/tests/common/dependency-alternatives/t13d/bar-1.0.0.tar.gz
new file mode 100644
index 0000000..5683176
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13d/bar-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t13d/baz-1.0.0.tar.gz b/tests/common/dependency-alternatives/t13d/baz-1.0.0.tar.gz
new file mode 100644
index 0000000..8101c5d
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13d/baz-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t13d/liba-1.0.0.tar.gz b/tests/common/dependency-alternatives/t13d/liba-1.0.0.tar.gz
new file mode 100644
index 0000000..f2965f5
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13d/liba-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t13d/libb-1.0.0.tar.gz b/tests/common/dependency-alternatives/t13d/libb-1.0.0.tar.gz
new file mode 100644
index 0000000..0f13429
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13d/libb-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t13d/repositories.manifest b/tests/common/dependency-alternatives/t13d/repositories.manifest
new file mode 100644
index 0000000..5b70556
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13d/repositories.manifest
@@ -0,0 +1 @@
+: 1
diff --git a/tests/common/dependency-alternatives/t13e/bar-1.0.0.tar.gz b/tests/common/dependency-alternatives/t13e/bar-1.0.0.tar.gz
new file mode 100644
index 0000000..7211afb
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13e/bar-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t13e/baz-1.0.0.tar.gz b/tests/common/dependency-alternatives/t13e/baz-1.0.0.tar.gz
new file mode 100644
index 0000000..e5f2880
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13e/baz-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t13e/biz-1.0.0.tar.gz b/tests/common/dependency-alternatives/t13e/biz-1.0.0.tar.gz
new file mode 100644
index 0000000..afd7db3
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13e/biz-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t13e/liba-1.0.0.tar.gz b/tests/common/dependency-alternatives/t13e/liba-1.0.0.tar.gz
new file mode 100644
index 0000000..f3cb165
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13e/liba-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t13e/repositories.manifest b/tests/common/dependency-alternatives/t13e/repositories.manifest
new file mode 100644
index 0000000..5b70556
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13e/repositories.manifest
@@ -0,0 +1 @@
+: 1
diff --git a/tests/common/dependency-alternatives/t13f/bar-1.0.0.tar.gz b/tests/common/dependency-alternatives/t13f/bar-1.0.0.tar.gz
new file mode 100644
index 0000000..7a255a1
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13f/bar-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t13f/baz-1.0.0.tar.gz b/tests/common/dependency-alternatives/t13f/baz-1.0.0.tar.gz
new file mode 100644
index 0000000..f697c81
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13f/baz-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t13f/liba-1.0.0.tar.gz b/tests/common/dependency-alternatives/t13f/liba-1.0.0.tar.gz
new file mode 100644
index 0000000..877305f
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13f/liba-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t13f/repositories.manifest b/tests/common/dependency-alternatives/t13f/repositories.manifest
new file mode 100644
index 0000000..5b70556
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13f/repositories.manifest
@@ -0,0 +1 @@
+: 1
diff --git a/tests/common/dependency-alternatives/t13g/bar-1.0.0.tar.gz b/tests/common/dependency-alternatives/t13g/bar-1.0.0.tar.gz
new file mode 100644
index 0000000..5c8a596
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13g/bar-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t13g/baz-1.0.0.tar.gz b/tests/common/dependency-alternatives/t13g/baz-1.0.0.tar.gz
new file mode 100644
index 0000000..011ffea
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13g/baz-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t13g/biz-1.0.0.tar.gz b/tests/common/dependency-alternatives/t13g/biz-1.0.0.tar.gz
new file mode 100644
index 0000000..77cb421
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13g/biz-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t13g/box-1.0.0.tar.gz b/tests/common/dependency-alternatives/t13g/box-1.0.0.tar.gz
new file mode 100644
index 0000000..5217e88
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13g/box-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t13g/liba-1.0.0.tar.gz b/tests/common/dependency-alternatives/t13g/liba-1.0.0.tar.gz
new file mode 100644
index 0000000..2ee35ae
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13g/liba-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t13g/libb-1.0.0.tar.gz b/tests/common/dependency-alternatives/t13g/libb-1.0.0.tar.gz
new file mode 100644
index 0000000..0f13429
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13g/libb-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t13g/repositories.manifest b/tests/common/dependency-alternatives/t13g/repositories.manifest
new file mode 100644
index 0000000..5b70556
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13g/repositories.manifest
@@ -0,0 +1 @@
+: 1
diff --git a/tests/common/dependency-alternatives/t13h/bar-1.0.0.tar.gz b/tests/common/dependency-alternatives/t13h/bar-1.0.0.tar.gz
new file mode 100644
index 0000000..7e85fca
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13h/bar-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t13h/baz-1.0.0.tar.gz b/tests/common/dependency-alternatives/t13h/baz-1.0.0.tar.gz
new file mode 100644
index 0000000..6fbdf82
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13h/baz-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t13h/liba-1.0.0.tar.gz b/tests/common/dependency-alternatives/t13h/liba-1.0.0.tar.gz
new file mode 100644
index 0000000..ffd380c
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13h/liba-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t13h/repositories.manifest b/tests/common/dependency-alternatives/t13h/repositories.manifest
new file mode 100644
index 0000000..5b70556
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13h/repositories.manifest
@@ -0,0 +1 @@
+: 1
diff --git a/tests/common/dependency-alternatives/t13i/bar-1.0.0.tar.gz b/tests/common/dependency-alternatives/t13i/bar-1.0.0.tar.gz
new file mode 100644
index 0000000..6c1076d
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13i/bar-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t13i/baz-1.0.0.tar.gz b/tests/common/dependency-alternatives/t13i/baz-1.0.0.tar.gz
new file mode 100644
index 0000000..b086ef4
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13i/baz-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t13i/liba-1.0.0.tar.gz b/tests/common/dependency-alternatives/t13i/liba-1.0.0.tar.gz
new file mode 100644
index 0000000..4a7bd22
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13i/liba-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t13i/repositories.manifest b/tests/common/dependency-alternatives/t13i/repositories.manifest
new file mode 100644
index 0000000..5b70556
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13i/repositories.manifest
@@ -0,0 +1 @@
+: 1
diff --git a/tests/common/dependency-alternatives/t13j/bar-1.0.0.tar.gz b/tests/common/dependency-alternatives/t13j/bar-1.0.0.tar.gz
new file mode 100644
index 0000000..8660e0f
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13j/bar-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t13j/baz-1.0.0.tar.gz b/tests/common/dependency-alternatives/t13j/baz-1.0.0.tar.gz
new file mode 100644
index 0000000..7483377
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13j/baz-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t13j/biz-1.0.0.tar.gz b/tests/common/dependency-alternatives/t13j/biz-1.0.0.tar.gz
new file mode 100644
index 0000000..948c1c4
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13j/biz-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t13j/liba-1.0.0.tar.gz b/tests/common/dependency-alternatives/t13j/liba-1.0.0.tar.gz
new file mode 100644
index 0000000..0c94586
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13j/liba-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t13j/repositories.manifest b/tests/common/dependency-alternatives/t13j/repositories.manifest
new file mode 100644
index 0000000..5b70556
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13j/repositories.manifest
@@ -0,0 +1 @@
+: 1
diff --git a/tests/common/dependency-alternatives/t13k/bar-1.0.0.tar.gz b/tests/common/dependency-alternatives/t13k/bar-1.0.0.tar.gz
new file mode 100644
index 0000000..51de0c5
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13k/bar-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t13k/baz-1.0.0.tar.gz b/tests/common/dependency-alternatives/t13k/baz-1.0.0.tar.gz
new file mode 100644
index 0000000..a8c62ba
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13k/baz-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t13k/liba-1.0.0.tar.gz b/tests/common/dependency-alternatives/t13k/liba-1.0.0.tar.gz
new file mode 100644
index 0000000..c4a4b32
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13k/liba-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t13k/repositories.manifest b/tests/common/dependency-alternatives/t13k/repositories.manifest
new file mode 100644
index 0000000..5b70556
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13k/repositories.manifest
@@ -0,0 +1 @@
+: 1
diff --git a/tests/common/dependency-alternatives/t13l/bar-1.0.0.tar.gz b/tests/common/dependency-alternatives/t13l/bar-1.0.0.tar.gz
new file mode 100644
index 0000000..d114c5c
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13l/bar-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t13l/baz-1.0.0.tar.gz b/tests/common/dependency-alternatives/t13l/baz-1.0.0.tar.gz
new file mode 100644
index 0000000..e884965
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13l/baz-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t13l/liba-1.0.0.tar.gz b/tests/common/dependency-alternatives/t13l/liba-1.0.0.tar.gz
new file mode 100644
index 0000000..3590340
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13l/liba-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t13l/libb-1.0.0.tar.gz b/tests/common/dependency-alternatives/t13l/libb-1.0.0.tar.gz
new file mode 100644
index 0000000..409f438
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13l/libb-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t13l/repositories.manifest b/tests/common/dependency-alternatives/t13l/repositories.manifest
new file mode 100644
index 0000000..5b70556
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13l/repositories.manifest
@@ -0,0 +1 @@
+: 1
diff --git a/tests/common/dependency-alternatives/t13m/bar-1.0.0.tar.gz b/tests/common/dependency-alternatives/t13m/bar-1.0.0.tar.gz
new file mode 100644
index 0000000..7a2bc53
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13m/bar-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t13m/baz-1.0.0.tar.gz b/tests/common/dependency-alternatives/t13m/baz-1.0.0.tar.gz
new file mode 100644
index 0000000..c5028f7
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13m/baz-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t13m/bix-1.0.0.tar.gz b/tests/common/dependency-alternatives/t13m/bix-1.0.0.tar.gz
new file mode 100644
index 0000000..9d45acc
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13m/bix-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t13m/biz-1.0.0.tar.gz b/tests/common/dependency-alternatives/t13m/biz-1.0.0.tar.gz
new file mode 100644
index 0000000..d12852b
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13m/biz-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t13m/box-1.0.0.tar.gz b/tests/common/dependency-alternatives/t13m/box-1.0.0.tar.gz
new file mode 100644
index 0000000..316458d
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13m/box-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t13m/liba-1.0.0.tar.gz b/tests/common/dependency-alternatives/t13m/liba-1.0.0.tar.gz
new file mode 100644
index 0000000..3857354
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13m/liba-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t13m/repositories.manifest b/tests/common/dependency-alternatives/t13m/repositories.manifest
new file mode 100644
index 0000000..5b70556
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13m/repositories.manifest
@@ -0,0 +1 @@
+: 1
diff --git a/tests/common/dependency-alternatives/t13n/bar-1.0.0.tar.gz b/tests/common/dependency-alternatives/t13n/bar-1.0.0.tar.gz
new file mode 100644
index 0000000..d3d3a24
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13n/bar-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t13n/liba-1.0.0.tar.gz b/tests/common/dependency-alternatives/t13n/liba-1.0.0.tar.gz
new file mode 100644
index 0000000..fd96b61
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13n/liba-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t13n/libb-1.0.0.tar.gz b/tests/common/dependency-alternatives/t13n/libb-1.0.0.tar.gz
new file mode 100644
index 0000000..409f438
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13n/libb-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t13n/repositories.manifest b/tests/common/dependency-alternatives/t13n/repositories.manifest
new file mode 100644
index 0000000..5b70556
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13n/repositories.manifest
@@ -0,0 +1 @@
+: 1
diff --git a/tests/common/dependency-alternatives/t13o/bar-1.0.0.tar.gz b/tests/common/dependency-alternatives/t13o/bar-1.0.0.tar.gz
new file mode 100644
index 0000000..d6ed94c
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13o/bar-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t13o/baz-1.0.0.tar.gz b/tests/common/dependency-alternatives/t13o/baz-1.0.0.tar.gz
new file mode 100644
index 0000000..7c1f3b7
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13o/baz-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t13o/bix-1.0.0.tar.gz b/tests/common/dependency-alternatives/t13o/bix-1.0.0.tar.gz
new file mode 100644
index 0000000..cdbba8d
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13o/bix-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t13o/biz-1.0.0.tar.gz b/tests/common/dependency-alternatives/t13o/biz-1.0.0.tar.gz
new file mode 100644
index 0000000..a7908c1
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13o/biz-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t13o/liba-1.0.0.tar.gz b/tests/common/dependency-alternatives/t13o/liba-1.0.0.tar.gz
new file mode 100644
index 0000000..d9dcd50
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13o/liba-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t13o/repositories.manifest b/tests/common/dependency-alternatives/t13o/repositories.manifest
new file mode 100644
index 0000000..5b70556
--- /dev/null
+++ b/tests/common/dependency-alternatives/t13o/repositories.manifest
@@ -0,0 +1 @@
+: 1
diff --git a/tests/common/dependency-alternatives/t8a/bar-1.0.0.tar.gz b/tests/common/dependency-alternatives/t8a/bar-1.0.0.tar.gz
new file mode 100644
index 0000000..8fd34e9
--- /dev/null
+++ b/tests/common/dependency-alternatives/t8a/bar-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t8a/bax-0.1.0.tar.gz b/tests/common/dependency-alternatives/t8a/bax-0.1.0.tar.gz
new file mode 100644
index 0000000..85a24ea
--- /dev/null
+++ b/tests/common/dependency-alternatives/t8a/bax-0.1.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t8a/bax-1.0.0.tar.gz b/tests/common/dependency-alternatives/t8a/bax-1.0.0.tar.gz
new file mode 100644
index 0000000..204c335
--- /dev/null
+++ b/tests/common/dependency-alternatives/t8a/bax-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t8a/baz-1.0.0.tar.gz b/tests/common/dependency-alternatives/t8a/baz-1.0.0.tar.gz
new file mode 100644
index 0000000..733d887
--- /dev/null
+++ b/tests/common/dependency-alternatives/t8a/baz-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t8a/bix-0.1.0.tar.gz b/tests/common/dependency-alternatives/t8a/bix-0.1.0.tar.gz
new file mode 100644
index 0000000..d0cc912
--- /dev/null
+++ b/tests/common/dependency-alternatives/t8a/bix-0.1.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t8a/bix-1.0.0.tar.gz b/tests/common/dependency-alternatives/t8a/bix-1.0.0.tar.gz
new file mode 100644
index 0000000..f1bab8d
--- /dev/null
+++ b/tests/common/dependency-alternatives/t8a/bix-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t8a/box-1.0.0.tar.gz b/tests/common/dependency-alternatives/t8a/box-1.0.0.tar.gz
new file mode 100644
index 0000000..f266c46
--- /dev/null
+++ b/tests/common/dependency-alternatives/t8a/box-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t8a/bux-1.0.0.tar.gz b/tests/common/dependency-alternatives/t8a/bux-1.0.0.tar.gz
new file mode 100644
index 0000000..9395a59
--- /dev/null
+++ b/tests/common/dependency-alternatives/t8a/bux-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t8a/dax-1.0.0.tar.gz b/tests/common/dependency-alternatives/t8a/dax-1.0.0.tar.gz
new file mode 100644
index 0000000..0839f7b
--- /dev/null
+++ b/tests/common/dependency-alternatives/t8a/dax-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t8a/dix-0.1.0.tar.gz b/tests/common/dependency-alternatives/t8a/dix-0.1.0.tar.gz
new file mode 100644
index 0000000..706f068
--- /dev/null
+++ b/tests/common/dependency-alternatives/t8a/dix-0.1.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t8a/dix-1.0.0.tar.gz b/tests/common/dependency-alternatives/t8a/dix-1.0.0.tar.gz
new file mode 100644
index 0000000..764e530
--- /dev/null
+++ b/tests/common/dependency-alternatives/t8a/dix-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t8a/dox-1.0.0.tar.gz b/tests/common/dependency-alternatives/t8a/dox-1.0.0.tar.gz
new file mode 100644
index 0000000..475f7d6
--- /dev/null
+++ b/tests/common/dependency-alternatives/t8a/dox-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t8a/dux-1.0.0.tar.gz b/tests/common/dependency-alternatives/t8a/dux-1.0.0.tar.gz
new file mode 100644
index 0000000..23f5505
--- /dev/null
+++ b/tests/common/dependency-alternatives/t8a/dux-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t8a/fax-1.0.0.tar.gz b/tests/common/dependency-alternatives/t8a/fax-1.0.0.tar.gz
new file mode 100644
index 0000000..2bf2360
--- /dev/null
+++ b/tests/common/dependency-alternatives/t8a/fax-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t8a/fix-1.0.0.tar.gz b/tests/common/dependency-alternatives/t8a/fix-1.0.0.tar.gz
new file mode 100644
index 0000000..9383845
--- /dev/null
+++ b/tests/common/dependency-alternatives/t8a/fix-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t8a/foo-1.0.0.tar.gz b/tests/common/dependency-alternatives/t8a/foo-1.0.0.tar.gz
new file mode 100644
index 0000000..bca3658
--- /dev/null
+++ b/tests/common/dependency-alternatives/t8a/foo-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t8a/fox-1.0.0.tar.gz b/tests/common/dependency-alternatives/t8a/fox-1.0.0.tar.gz
new file mode 100644
index 0000000..baf1068
--- /dev/null
+++ b/tests/common/dependency-alternatives/t8a/fox-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t8a/foz-1.0.0.tar.gz b/tests/common/dependency-alternatives/t8a/foz-1.0.0.tar.gz
new file mode 100644
index 0000000..90506c6
--- /dev/null
+++ b/tests/common/dependency-alternatives/t8a/foz-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t8a/fux-1.0.0.tar.gz b/tests/common/dependency-alternatives/t8a/fux-1.0.0.tar.gz
new file mode 100644
index 0000000..7764719
--- /dev/null
+++ b/tests/common/dependency-alternatives/t8a/fux-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t8a/fuz-1.0.0.tar.gz b/tests/common/dependency-alternatives/t8a/fuz-1.0.0.tar.gz
new file mode 100644
index 0000000..03f8f1a
--- /dev/null
+++ b/tests/common/dependency-alternatives/t8a/fuz-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t8a/libbar-1.0.0.tar.gz b/tests/common/dependency-alternatives/t8a/libbar-1.0.0.tar.gz
new file mode 100644
index 0000000..badb970
--- /dev/null
+++ b/tests/common/dependency-alternatives/t8a/libbar-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t8a/libbaz-1.0.0.tar.gz b/tests/common/dependency-alternatives/t8a/libbaz-1.0.0.tar.gz
new file mode 100644
index 0000000..2a24050
--- /dev/null
+++ b/tests/common/dependency-alternatives/t8a/libbaz-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t8a/libbaz-1.1.0.tar.gz b/tests/common/dependency-alternatives/t8a/libbaz-1.1.0.tar.gz
new file mode 100644
index 0000000..ddc5435
--- /dev/null
+++ b/tests/common/dependency-alternatives/t8a/libbaz-1.1.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t8a/libbiz-0.1.0.tar.gz b/tests/common/dependency-alternatives/t8a/libbiz-0.1.0.tar.gz
new file mode 100644
index 0000000..575e346
--- /dev/null
+++ b/tests/common/dependency-alternatives/t8a/libbiz-0.1.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t8a/libbiz-1.0.0.tar.gz b/tests/common/dependency-alternatives/t8a/libbiz-1.0.0.tar.gz
new file mode 100644
index 0000000..06316f9
--- /dev/null
+++ b/tests/common/dependency-alternatives/t8a/libbiz-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t8a/libbox-0.1.0.tar.gz b/tests/common/dependency-alternatives/t8a/libbox-0.1.0.tar.gz
new file mode 100644
index 0000000..0d4c32d
--- /dev/null
+++ b/tests/common/dependency-alternatives/t8a/libbox-0.1.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t8a/libbox-0.1.1.tar.gz b/tests/common/dependency-alternatives/t8a/libbox-0.1.1.tar.gz
new file mode 100644
index 0000000..a14d55a
--- /dev/null
+++ b/tests/common/dependency-alternatives/t8a/libbox-0.1.1.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t8a/libbox-1.0.0.tar.gz b/tests/common/dependency-alternatives/t8a/libbox-1.0.0.tar.gz
new file mode 100644
index 0000000..ba67cfe
--- /dev/null
+++ b/tests/common/dependency-alternatives/t8a/libbox-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t8a/libfoo-1.0.0.tar.gz b/tests/common/dependency-alternatives/t8a/libfoo-1.0.0.tar.gz
new file mode 100644
index 0000000..f2dcb15
--- /dev/null
+++ b/tests/common/dependency-alternatives/t8a/libfoo-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t8a/libfoo-2.0.0.tar.gz b/tests/common/dependency-alternatives/t8a/libfoo-2.0.0.tar.gz
new file mode 100644
index 0000000..2b6f0f5
--- /dev/null
+++ b/tests/common/dependency-alternatives/t8a/libfoo-2.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t8a/libfox-1.0.0.tar.gz b/tests/common/dependency-alternatives/t8a/libfox-1.0.0.tar.gz
new file mode 100644
index 0000000..68eee9d
--- /dev/null
+++ b/tests/common/dependency-alternatives/t8a/libfox-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t8a/repositories.manifest b/tests/common/dependency-alternatives/t8a/repositories.manifest
new file mode 100644
index 0000000..5b70556
--- /dev/null
+++ b/tests/common/dependency-alternatives/t8a/repositories.manifest
@@ -0,0 +1 @@
+: 1
diff --git a/tests/common/dependency-alternatives/t8a/tax-1.0.0.tar.gz b/tests/common/dependency-alternatives/t8a/tax-1.0.0.tar.gz
new file mode 100644
index 0000000..7c2b99d
--- /dev/null
+++ b/tests/common/dependency-alternatives/t8a/tax-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t8a/tex-1.0.0.tar.gz b/tests/common/dependency-alternatives/t8a/tex-1.0.0.tar.gz
new file mode 100644
index 0000000..663b09a
--- /dev/null
+++ b/tests/common/dependency-alternatives/t8a/tex-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t8a/tix-1.0.0.tar.gz b/tests/common/dependency-alternatives/t8a/tix-1.0.0.tar.gz
new file mode 100644
index 0000000..34ea3da
--- /dev/null
+++ b/tests/common/dependency-alternatives/t8a/tix-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t8a/tox-1.0.0.tar.gz b/tests/common/dependency-alternatives/t8a/tox-1.0.0.tar.gz
new file mode 100644
index 0000000..c767026
--- /dev/null
+++ b/tests/common/dependency-alternatives/t8a/tox-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t8a/tpx-1.0.0.tar.gz b/tests/common/dependency-alternatives/t8a/tpx-1.0.0.tar.gz
new file mode 100644
index 0000000..aa8db1d
--- /dev/null
+++ b/tests/common/dependency-alternatives/t8a/tpx-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t8a/tux-1.0.0.tar.gz b/tests/common/dependency-alternatives/t8a/tux-1.0.0.tar.gz
new file mode 100644
index 0000000..e171e87
--- /dev/null
+++ b/tests/common/dependency-alternatives/t8a/tux-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t8a/tvx-1.0.0.tar.gz b/tests/common/dependency-alternatives/t8a/tvx-1.0.0.tar.gz
new file mode 100644
index 0000000..0d719a5
--- /dev/null
+++ b/tests/common/dependency-alternatives/t8a/tvx-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/dependency-alternatives/t8a/twx-1.0.0.tar.gz b/tests/common/dependency-alternatives/t8a/twx-1.0.0.tar.gz
new file mode 100644
index 0000000..03e8fbb
--- /dev/null
+++ b/tests/common/dependency-alternatives/t8a/twx-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/foo-1.tar.gz b/tests/common/foo-1.tar.gz
index 0195f04..0ae2384 100644
--- a/tests/common/foo-1.tar.gz
+++ b/tests/common/foo-1.tar.gz
Binary files differ
diff --git a/tests/common/git/README b/tests/common/git/README
index 737cd83..5b4781b 100644
--- a/tests/common/git/README
+++ b/tests/common/git/README
@@ -8,7 +8,7 @@ before modification, and
$ ./pack
-afterwrds.
+afterwards.
Also note that config files under .git/ subdirectory refer to the submodule
repositories using absolute paths. So prior to pulling in subproject directory
diff --git a/tests/common/git/init b/tests/common/git/init
index 81479a8..e34246e 100755
--- a/tests/common/git/init
+++ b/tests/common/git/init
@@ -80,7 +80,7 @@ rm -f style-basic.git/repositories.manifest
#
git -C style-basic.git init
git -C style-basic.git add '*'
-git -C style-basic.git commit -am 'Create'
+git -C style-basic.git commit -am 'Create' --no-verify
# Create stable branch for style-basic.
#
@@ -93,21 +93,21 @@ cat <<EOF >style-basic.git/repositories.manifest
email: user@example.com
EOF
git -C style-basic.git add README repositories.manifest
-git -C style-basic.git commit -am 'README'
+git -C style-basic.git commit -am 'README' --no-verify
# Create master branch for style.git, adding style-basic.git as a submodule.
#
git -C style.git init
git -C style.git add '*'
git -C style.git submodule add ../style-basic.git basic # The stable branch.
-git -C style.git commit -am 'Create'
+git -C style.git commit -am 'Create' --no-verify
# Make style.git to refer an unadvertised reference, commiting into the stable
# branch of style-basic.git.
#
touch style-basic.git/INSTALL
git -C style-basic.git add INSTALL
-git -C style-basic.git commit -am 'INSTALL'
+git -C style-basic.git commit -am 'INSTALL' --no-verify
git -C style-basic.git checkout master
# Create master branch for libbar.git.
@@ -127,7 +127,7 @@ depends: style-basic >= $
EOF
git -C libbar.git add '*'
-git -C libbar.git commit -am 'Create'
+git -C libbar.git commit -am 'Create' --no-verify
git -C libbar.git tag -a 'v1.0.0' -m 'Tag version 1.0.0'
git -C libbar.git submodule add -b stable ../style-basic.git extras
@@ -143,7 +143,7 @@ email: pkg@example.org
depends: style-basic >= $
EOF
-git -C libbar.git commit -am 'Add extras'
+git -C libbar.git commit -am 'Add extras' --no-verify
git -C libbar.git tag -a 'v1.0.0+1' -m 'Tag version 1.0.0+1'
# Create master branch for libfoo.git, adding style.git and libbar.git as
@@ -165,7 +165,7 @@ git -C libfoo.git add '*'
git -C libfoo.git submodule add ../style.git doc/style
git -C libfoo.git submodule add ../libbar.git libbar
git -C libfoo.git submodule update --init --recursive # Updates doc/style/basic.
-git -C libfoo.git commit -am 'Create'
+git -C libfoo.git commit -am 'Create' --no-verify
git -C libfoo.git tag -a 'v0.0.1' -m 'Tag version 0.0.1'
# Increase libfoo version and add tags.
@@ -180,7 +180,7 @@ url: http://example.org
email: pkg@example.org
EOF
-git -C libfoo.git commit -am 'Increase version to 1.0.0'
+git -C libfoo.git commit -am 'Increase version to 1.0.0' --no-verify
git -C libfoo.git tag 'ltag'
git -C libfoo.git tag -a 'atag' -m 'Create annotated tag'
@@ -190,7 +190,7 @@ git -C libfoo.git tag -a 'v1.0.0' -m 'Tag version 1.0.0'
#
touch libfoo.git/README
git -C libfoo.git add README
-git -C libfoo.git commit -am 'README'
+git -C libfoo.git commit -am 'README' --no-verify
# Create master branch for libfox.git, adding libbar.git as a submodule.
#
@@ -198,7 +198,7 @@ git -C libfox.git init
git -C libfox.git add '*'
git -C libfox.git submodule add ../libbar.git libbar
git -C libfox.git submodule update --init --recursive # Recursive for safety.
-git -C libfox.git commit -am 'Create'
+git -C libfox.git commit -am 'Create' --no-verify
# Create master branch for links.git, adding style.git as a submodule.
#
@@ -217,7 +217,7 @@ EOF
git -C links.git add '*'
git -C links.git submodule add ../style.git doc/style
git -C links.git submodule update --init --recursive # Updates doc/style/basic.
-git -C links.git commit -am 'Create'
+git -C links.git commit -am 'Create' --no-verify
git -C links.git tag -a 'v0.0.1' -m 'Tag version 0.0.1'
# Increase links version and add symlinks.
@@ -240,7 +240,7 @@ ln -s doc/style/basic links.git/bs # Submodule directory symlink.
ln -s bs/page.css links.git/pg # Symlink via submodule directory symlink.
git -C links.git add '*'
-git -C links.git commit -am 'Add symlinks'
+git -C links.git commit -am 'Add symlinks' --no-verify
git -C links.git tag -a 'v1.0.0-alpha' -m 'Tag version 1.0.0-alpha'
# Increase links version and add dangling symlink.
@@ -258,7 +258,7 @@ EOF
ln -s lc links.git/bl # Dangling symlink.
git -C links.git add '*'
-git -C links.git commit -am 'Add dangling symlinks'
+git -C links.git commit -am 'Add dangling symlinks' --no-verify
git -C links.git tag -a 'v1.0.1' -m 'Tag version 1.0.1'
# Increase links version and add cyclic symlink.
@@ -276,7 +276,7 @@ EOF
ln -s bl links.git/lc # Cyclic symlink.
git -C links.git add '*'
-git -C links.git commit -am 'Add cyclic symlinks'
+git -C links.git commit -am 'Add cyclic symlinks' --no-verify
git -C links.git tag -a 'v1.0.2' -m 'Tag version 1.0.2'
@@ -303,7 +303,7 @@ rm -f -r libbaz.git/.git
git -C libbaz.git init
git -C libbaz.git add '*'
-git -C libbaz.git commit -am 'Create'
+git -C libbaz.git commit -am 'Create' --no-verify
# Sync submodule references with their new locations.
#
@@ -315,28 +315,28 @@ done
#
touch style.git/README
git -C style.git add README
-git -C style.git commit -am 'README'
+git -C style.git commit -am 'README' --no-verify
# Advance libfoo.git master branch.
#
git -C libfoo.git submodule update --init --remote # Pull style only.
-git -C libfoo.git commit -am 'Update style'
+git -C libfoo.git commit -am 'Update style' --no-verify
git -C libfoo.git rm -r tests
-git -C libfoo.git commit -am 'Remove tests'
+git -C libfoo.git commit -am 'Remove tests' --no-verify
git -C libfoo.git submodule deinit libbar
git -C libfoo.git rm libbar
-git -C libfoo.git commit -am 'Remove libbar'
+git -C libfoo.git commit -am 'Remove libbar' --no-verify
rm -f -r libbar.git
git -C libfoo.git submodule add ../libbaz.git libbaz
git -C libfoo.git submodule update --init libbaz
-git -C libfoo.git commit -am 'Add libbaz'
+git -C libfoo.git commit -am 'Add libbaz' --no-verify
git -C libfoo.git tag -f 'ltag'
git -C libfoo.git tag -f -a 'atag' -m 'Move annotated tag'
touch libfoo.git/INSTALL
git -C libfoo.git add INSTALL
-git -C libfoo.git commit -am 'INSTALL'
+git -C libfoo.git commit -am 'INSTALL' --no-verify
diff --git a/tests/common/git/state0/libbar.tar b/tests/common/git/state0/libbar.tar
index ea4d296..4312752 100644
--- a/tests/common/git/state0/libbar.tar
+++ b/tests/common/git/state0/libbar.tar
Binary files differ
diff --git a/tests/common/git/state0/libfoo.tar b/tests/common/git/state0/libfoo.tar
index d30ab31..af2affc 100644
--- a/tests/common/git/state0/libfoo.tar
+++ b/tests/common/git/state0/libfoo.tar
Binary files differ
diff --git a/tests/common/git/state0/libfox.tar b/tests/common/git/state0/libfox.tar
index 50b9840..d955085 100644
--- a/tests/common/git/state0/libfox.tar
+++ b/tests/common/git/state0/libfox.tar
Binary files differ
diff --git a/tests/common/git/state0/links.tar b/tests/common/git/state0/links.tar
index f8a7efd..3376bf7 100644
--- a/tests/common/git/state0/links.tar
+++ b/tests/common/git/state0/links.tar
Binary files differ
diff --git a/tests/common/git/state0/style-basic.tar b/tests/common/git/state0/style-basic.tar
index aa23cf0..5f60782 100644
--- a/tests/common/git/state0/style-basic.tar
+++ b/tests/common/git/state0/style-basic.tar
Binary files differ
diff --git a/tests/common/git/state0/style.tar b/tests/common/git/state0/style.tar
index 9ab3367..d95f00b 100644
--- a/tests/common/git/state0/style.tar
+++ b/tests/common/git/state0/style.tar
Binary files differ
diff --git a/tests/common/git/state1/libbaz.tar b/tests/common/git/state1/libbaz.tar
index 420a984..f10cc12 100644
--- a/tests/common/git/state1/libbaz.tar
+++ b/tests/common/git/state1/libbaz.tar
Binary files differ
diff --git a/tests/common/git/state1/libfoo.tar b/tests/common/git/state1/libfoo.tar
index c827226..4ce25d1 100644
--- a/tests/common/git/state1/libfoo.tar
+++ b/tests/common/git/state1/libfoo.tar
Binary files differ
diff --git a/tests/common/git/state1/libfox.tar b/tests/common/git/state1/libfox.tar
index 95e2e07..7de6ffa 100644
--- a/tests/common/git/state1/libfox.tar
+++ b/tests/common/git/state1/libfox.tar
Binary files differ
diff --git a/tests/common/git/state1/style-basic.tar b/tests/common/git/state1/style-basic.tar
index f59e67e..1693a33 100644
--- a/tests/common/git/state1/style-basic.tar
+++ b/tests/common/git/state1/style-basic.tar
Binary files differ
diff --git a/tests/common/git/state1/style.tar b/tests/common/git/state1/style.tar
index a627bd5..3fbf69e 100644
--- a/tests/common/git/state1/style.tar
+++ b/tests/common/git/state1/style.tar
Binary files differ
diff --git a/tests/common/hello/libhello-1.0.0.tar.gz b/tests/common/hello/libhello-1.0.0.tar.gz
index fa71182..12c2354 100644
--- a/tests/common/hello/libhello-1.0.0.tar.gz
+++ b/tests/common/hello/libhello-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/libhello-1.0.0/manifest b/tests/common/libhello-1.0.0/manifest
index 1fdd50c..1cb2ae0 100644
--- a/tests/common/libhello-1.0.0/manifest
+++ b/tests/common/libhello-1.0.0/manifest
@@ -4,7 +4,7 @@ version: 1.0.0
summary: The "Hello World" example library
license: MIT
tags: c++, hello, world, example
-description: \
+description:\
A simple library that implements the "Hello World" example in C++. Its primary
goal is to show a canonical build2/bpkg project/package.
\
diff --git a/tests/common/linked/t7a/foo-1.0.0.tar.gz b/tests/common/linked/t7a/foo-1.0.0.tar.gz
new file mode 100644
index 0000000..1fffa54
--- /dev/null
+++ b/tests/common/linked/t7a/foo-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/linked/t7a/libbar-1.0.0.tar.gz b/tests/common/linked/t7a/libbar-1.0.0.tar.gz
new file mode 100644
index 0000000..9b0e0f6
--- /dev/null
+++ b/tests/common/linked/t7a/libbar-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/linked/t7a/libbaz-1.0.0.tar.gz b/tests/common/linked/t7a/libbaz-1.0.0.tar.gz
new file mode 100644
index 0000000..3cdd0b4
--- /dev/null
+++ b/tests/common/linked/t7a/libbaz-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/linked/t7a/libbix-1.0.0.tar.gz b/tests/common/linked/t7a/libbix-1.0.0.tar.gz
new file mode 100644
index 0000000..67dd873
--- /dev/null
+++ b/tests/common/linked/t7a/libbix-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/linked/t7a/libbiz-1.0.0.tar.gz b/tests/common/linked/t7a/libbiz-1.0.0.tar.gz
new file mode 100644
index 0000000..30f7ba6
--- /dev/null
+++ b/tests/common/linked/t7a/libbiz-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/linked/t7a/libbox-1.0.0.tar.gz b/tests/common/linked/t7a/libbox-1.0.0.tar.gz
new file mode 100644
index 0000000..cea6f6a
--- /dev/null
+++ b/tests/common/linked/t7a/libbox-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/linked/t7a/libbuild2-bar-1.0.0.tar.gz b/tests/common/linked/t7a/libbuild2-bar-1.0.0.tar.gz
new file mode 100644
index 0000000..5f0b592
--- /dev/null
+++ b/tests/common/linked/t7a/libbuild2-bar-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/linked/t7a/libbuild2-foo-1.0.0.tar.gz b/tests/common/linked/t7a/libbuild2-foo-1.0.0.tar.gz
new file mode 100644
index 0000000..5d900ab
--- /dev/null
+++ b/tests/common/linked/t7a/libbuild2-foo-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/linked/t7a/libbuz-1.0.0.tar.gz b/tests/common/linked/t7a/libbuz-1.0.0.tar.gz
new file mode 100644
index 0000000..c3344b6
--- /dev/null
+++ b/tests/common/linked/t7a/libbuz-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/linked/t7a/libfax-1.0.0.tar.gz b/tests/common/linked/t7a/libfax-1.0.0.tar.gz
new file mode 100644
index 0000000..ffb7257
--- /dev/null
+++ b/tests/common/linked/t7a/libfax-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/linked/t7a/libfix-1.0.0.tar.gz b/tests/common/linked/t7a/libfix-1.0.0.tar.gz
new file mode 100644
index 0000000..98d3499
--- /dev/null
+++ b/tests/common/linked/t7a/libfix-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/linked/t7a/repositories.manifest b/tests/common/linked/t7a/repositories.manifest
new file mode 100644
index 0000000..5b70556
--- /dev/null
+++ b/tests/common/linked/t7a/repositories.manifest
@@ -0,0 +1 @@
+: 1
diff --git a/tests/common/linked/t7b/foo-1.1.0.tar.gz b/tests/common/linked/t7b/foo-1.1.0.tar.gz
new file mode 100644
index 0000000..ff8dc1b
--- /dev/null
+++ b/tests/common/linked/t7b/foo-1.1.0.tar.gz
Binary files differ
diff --git a/tests/common/linked/t7b/libbar-1.1.0.tar.gz b/tests/common/linked/t7b/libbar-1.1.0.tar.gz
new file mode 100644
index 0000000..5d79594
--- /dev/null
+++ b/tests/common/linked/t7b/libbar-1.1.0.tar.gz
Binary files differ
diff --git a/tests/common/linked/t7b/libbaz-1.1.0.tar.gz b/tests/common/linked/t7b/libbaz-1.1.0.tar.gz
new file mode 100644
index 0000000..ec28e55
--- /dev/null
+++ b/tests/common/linked/t7b/libbaz-1.1.0.tar.gz
Binary files differ
diff --git a/tests/common/linked/t7b/libbox-1.1.0.tar.gz b/tests/common/linked/t7b/libbox-1.1.0.tar.gz
new file mode 100644
index 0000000..4365901
--- /dev/null
+++ b/tests/common/linked/t7b/libbox-1.1.0.tar.gz
Binary files differ
diff --git a/tests/common/linked/t7b/repositories.manifest b/tests/common/linked/t7b/repositories.manifest
new file mode 100644
index 0000000..aed60ed
--- /dev/null
+++ b/tests/common/linked/t7b/repositories.manifest
@@ -0,0 +1,4 @@
+: 1
+location: ../t7a
+role: complement
+:
diff --git a/tests/common/prereq-cycle/extra/libbar-1.1.0+1.tar.gz b/tests/common/prereq-cycle/extra/libbar-1.1.0+1.tar.gz
index 890e9e2..9115154 100644
--- a/tests/common/prereq-cycle/extra/libbar-1.1.0+1.tar.gz
+++ b/tests/common/prereq-cycle/extra/libbar-1.1.0+1.tar.gz
Binary files differ
diff --git a/tests/common/prereq-cycle/math/libbar-1.0.0.tar.gz b/tests/common/prereq-cycle/math/libbar-1.0.0.tar.gz
index 97e6e32..259aeb0 100644
--- a/tests/common/prereq-cycle/math/libbar-1.0.0.tar.gz
+++ b/tests/common/prereq-cycle/math/libbar-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/prereq-cycle/stable/libfoo-1.0.0.tar.gz b/tests/common/prereq-cycle/stable/libfoo-1.0.0.tar.gz
index 5e7fa17..30dbd01 100644
--- a/tests/common/prereq-cycle/stable/libfoo-1.0.0.tar.gz
+++ b/tests/common/prereq-cycle/stable/libfoo-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/satisfy/libbar-0.1.0.tar.gz b/tests/common/satisfy/libbar-0.1.0.tar.gz
new file mode 100644
index 0000000..f072c11
--- /dev/null
+++ b/tests/common/satisfy/libbar-0.1.0.tar.gz
Binary files differ
diff --git a/tests/common/satisfy/libbar-1.0.0.tar.gz b/tests/common/satisfy/libbar-1.0.0.tar.gz
index 5dc3a9b..00220d7 100644
--- a/tests/common/satisfy/libbar-1.0.0.tar.gz
+++ b/tests/common/satisfy/libbar-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/satisfy/libbar-1.1.0.tar.gz b/tests/common/satisfy/libbar-1.1.0.tar.gz
index 16a8b47..b3dcf28 100644
--- a/tests/common/satisfy/libbar-1.1.0.tar.gz
+++ b/tests/common/satisfy/libbar-1.1.0.tar.gz
Binary files differ
diff --git a/tests/common/satisfy/libbar-1.2.0.tar.gz b/tests/common/satisfy/libbar-1.2.0.tar.gz
index 4572395..51d997f 100644
--- a/tests/common/satisfy/libbar-1.2.0.tar.gz
+++ b/tests/common/satisfy/libbar-1.2.0.tar.gz
Binary files differ
diff --git a/tests/common/satisfy/libbar-2.1.0.tar.gz b/tests/common/satisfy/libbar-2.1.0.tar.gz
new file mode 100644
index 0000000..0ff50c5
--- /dev/null
+++ b/tests/common/satisfy/libbar-2.1.0.tar.gz
Binary files differ
diff --git a/tests/common/satisfy/libbax-1.0.0.tar.gz b/tests/common/satisfy/libbax-1.0.0.tar.gz
new file mode 100644
index 0000000..7f4dfd0
--- /dev/null
+++ b/tests/common/satisfy/libbax-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/satisfy/libbax-2.0.0.tar.gz b/tests/common/satisfy/libbax-2.0.0.tar.gz
new file mode 100644
index 0000000..f130da0
--- /dev/null
+++ b/tests/common/satisfy/libbax-2.0.0.tar.gz
Binary files differ
diff --git a/tests/common/satisfy/libbaz-1.1.0.tar.gz b/tests/common/satisfy/libbaz-1.1.0.tar.gz
index 1aa72a8..5838bf7 100644
--- a/tests/common/satisfy/libbaz-1.1.0.tar.gz
+++ b/tests/common/satisfy/libbaz-1.1.0.tar.gz
Binary files differ
diff --git a/tests/common/satisfy/libbaz-1.2.0.tar.gz b/tests/common/satisfy/libbaz-1.2.0.tar.gz
new file mode 100644
index 0000000..3d07d88
--- /dev/null
+++ b/tests/common/satisfy/libbaz-1.2.0.tar.gz
Binary files differ
diff --git a/tests/common/satisfy/libbaz-2.0.0.tar.gz b/tests/common/satisfy/libbaz-2.0.0.tar.gz
new file mode 100644
index 0000000..dec93bf
--- /dev/null
+++ b/tests/common/satisfy/libbaz-2.0.0.tar.gz
Binary files differ
diff --git a/tests/common/satisfy/libbaz-2.1.0.tar.gz b/tests/common/satisfy/libbaz-2.1.0.tar.gz
new file mode 100644
index 0000000..4133df8
--- /dev/null
+++ b/tests/common/satisfy/libbaz-2.1.0.tar.gz
Binary files differ
diff --git a/tests/common/satisfy/libbix-1.0.0.tar.gz b/tests/common/satisfy/libbix-1.0.0.tar.gz
new file mode 100644
index 0000000..9dfe541
--- /dev/null
+++ b/tests/common/satisfy/libbix-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/satisfy/libbix-2.0.0.tar.gz b/tests/common/satisfy/libbix-2.0.0.tar.gz
new file mode 100644
index 0000000..ca7bd95
--- /dev/null
+++ b/tests/common/satisfy/libbix-2.0.0.tar.gz
Binary files differ
diff --git a/tests/common/satisfy/libbox-1.0.0.tar.gz b/tests/common/satisfy/libbox-1.0.0.tar.gz
new file mode 100644
index 0000000..90227f7
--- /dev/null
+++ b/tests/common/satisfy/libbox-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/satisfy/libbox-1.2.0.tar.gz b/tests/common/satisfy/libbox-1.2.0.tar.gz
new file mode 100644
index 0000000..1f8675a
--- /dev/null
+++ b/tests/common/satisfy/libbox-1.2.0.tar.gz
Binary files differ
diff --git a/tests/common/satisfy/libbox-2.0.0.tar.gz b/tests/common/satisfy/libbox-2.0.0.tar.gz
new file mode 100644
index 0000000..15d457f
--- /dev/null
+++ b/tests/common/satisfy/libbox-2.0.0.tar.gz
Binary files differ
diff --git a/tests/common/satisfy/libbux-1.0.0.tar.gz b/tests/common/satisfy/libbux-1.0.0.tar.gz
new file mode 100644
index 0000000..51960f9
--- /dev/null
+++ b/tests/common/satisfy/libbux-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/satisfy/libfix-1.0.0.tar.gz b/tests/common/satisfy/libfix-1.0.0.tar.gz
new file mode 100644
index 0000000..ed2d78f
--- /dev/null
+++ b/tests/common/satisfy/libfix-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/satisfy/libfoo-0.1.0.tar.gz b/tests/common/satisfy/libfoo-0.1.0.tar.gz
new file mode 100644
index 0000000..b1c1ff2
--- /dev/null
+++ b/tests/common/satisfy/libfoo-0.1.0.tar.gz
Binary files differ
diff --git a/tests/common/satisfy/libfoo-1.0.0.tar.gz b/tests/common/satisfy/libfoo-1.0.0.tar.gz
index 41da9a0..be052c2 100644
--- a/tests/common/satisfy/libfoo-1.0.0.tar.gz
+++ b/tests/common/satisfy/libfoo-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/satisfy/libfoo-1.1.0+1.tar.gz b/tests/common/satisfy/libfoo-1.1.0+1.tar.gz
index 8cc49aa..3eb8670 100644
--- a/tests/common/satisfy/libfoo-1.1.0+1.tar.gz
+++ b/tests/common/satisfy/libfoo-1.1.0+1.tar.gz
Binary files differ
diff --git a/tests/common/satisfy/libfoo-1.1.0+2.tar.gz b/tests/common/satisfy/libfoo-1.1.0+2.tar.gz
new file mode 100644
index 0000000..1ffeaea
--- /dev/null
+++ b/tests/common/satisfy/libfoo-1.1.0+2.tar.gz
Binary files differ
diff --git a/tests/common/satisfy/libfoo-1.1.0+3.tar.gz b/tests/common/satisfy/libfoo-1.1.0+3.tar.gz
new file mode 100644
index 0000000..8892b7b
--- /dev/null
+++ b/tests/common/satisfy/libfoo-1.1.0+3.tar.gz
Binary files differ
diff --git a/tests/common/satisfy/libfoo-1.1.0.tar.gz b/tests/common/satisfy/libfoo-1.1.0.tar.gz
index e03481f..2b95877 100644
--- a/tests/common/satisfy/libfoo-1.1.0.tar.gz
+++ b/tests/common/satisfy/libfoo-1.1.0.tar.gz
Binary files differ
diff --git a/tests/common/satisfy/libfoo-1.1.1.tar.gz b/tests/common/satisfy/libfoo-1.1.1.tar.gz
new file mode 100644
index 0000000..2e3a1f8
--- /dev/null
+++ b/tests/common/satisfy/libfoo-1.1.1.tar.gz
Binary files differ
diff --git a/tests/common/satisfy/libfoo-2.0.0.tar.gz b/tests/common/satisfy/libfoo-2.0.0.tar.gz
new file mode 100644
index 0000000..fd8eeb3
--- /dev/null
+++ b/tests/common/satisfy/libfoo-2.0.0.tar.gz
Binary files differ
diff --git a/tests/common/satisfy/libfoo-3.0.0.tar.gz b/tests/common/satisfy/libfoo-3.0.0.tar.gz
new file mode 100644
index 0000000..3ef4fdf
--- /dev/null
+++ b/tests/common/satisfy/libfoo-3.0.0.tar.gz
Binary files differ
diff --git a/tests/common/satisfy/libfox-1.1.0.tar.gz b/tests/common/satisfy/libfox-1.1.0.tar.gz
new file mode 100644
index 0000000..c626d72
--- /dev/null
+++ b/tests/common/satisfy/libfox-1.1.0.tar.gz
Binary files differ
diff --git a/tests/common/satisfy/libfox-2.0.0.tar.gz b/tests/common/satisfy/libfox-2.0.0.tar.gz
new file mode 100644
index 0000000..1297cda
--- /dev/null
+++ b/tests/common/satisfy/libfox-2.0.0.tar.gz
Binary files differ
diff --git a/tests/common/satisfy/libfox-2.1.0.tar.gz b/tests/common/satisfy/libfox-2.1.0.tar.gz
new file mode 100644
index 0000000..60a4cce
--- /dev/null
+++ b/tests/common/satisfy/libfox-2.1.0.tar.gz
Binary files differ
diff --git a/tests/common/satisfy/libfox-3.0.0.tar.gz b/tests/common/satisfy/libfox-3.0.0.tar.gz
new file mode 100644
index 0000000..0bc246e
--- /dev/null
+++ b/tests/common/satisfy/libfox-3.0.0.tar.gz
Binary files differ
diff --git a/tests/common/satisfy/t10/libbar-baz-1.0.0.tar.gz b/tests/common/satisfy/t10/libbar-baz-1.0.0.tar.gz
new file mode 100644
index 0000000..9075ee4
--- /dev/null
+++ b/tests/common/satisfy/t10/libbar-baz-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/satisfy/t10/libbar-foo-1.0.0.tar.gz b/tests/common/satisfy/t10/libbar-foo-1.0.0.tar.gz
new file mode 100644
index 0000000..2dd5e69
--- /dev/null
+++ b/tests/common/satisfy/t10/libbar-foo-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/satisfy/t10/libbar-tests-1.0.0.tar.gz b/tests/common/satisfy/t10/libbar-tests-1.0.0.tar.gz
new file mode 100644
index 0000000..7f587d9
--- /dev/null
+++ b/tests/common/satisfy/t10/libbar-tests-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/satisfy/t10/libfoo-bar-1.0.0.tar.gz b/tests/common/satisfy/t10/libfoo-bar-1.0.0.tar.gz
new file mode 100644
index 0000000..30ae081
--- /dev/null
+++ b/tests/common/satisfy/t10/libfoo-bar-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/satisfy/t10/libfoo-baz-1.0.0.tar.gz b/tests/common/satisfy/t10/libfoo-baz-1.0.0.tar.gz
new file mode 100644
index 0000000..1263394
--- /dev/null
+++ b/tests/common/satisfy/t10/libfoo-baz-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/satisfy/t10/libfoo-tests-1.0.0.tar.gz b/tests/common/satisfy/t10/libfoo-tests-1.0.0.tar.gz
new file mode 100644
index 0000000..bc58a14
--- /dev/null
+++ b/tests/common/satisfy/t10/libfoo-tests-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/satisfy/t10/repositories.manifest b/tests/common/satisfy/t10/repositories.manifest
new file mode 120000
index 0000000..0d4767a
--- /dev/null
+++ b/tests/common/satisfy/t10/repositories.manifest
@@ -0,0 +1 @@
+../repositories.manifest \ No newline at end of file
diff --git a/tests/common/satisfy/t12a/libbar-0.1.0.tar.gz b/tests/common/satisfy/t12a/libbar-0.1.0.tar.gz
new file mode 100644
index 0000000..10c7f29
--- /dev/null
+++ b/tests/common/satisfy/t12a/libbar-0.1.0.tar.gz
Binary files differ
diff --git a/tests/common/satisfy/t12a/libbaz-1.0.0.tar.gz b/tests/common/satisfy/t12a/libbaz-1.0.0.tar.gz
new file mode 100644
index 0000000..1d498b8
--- /dev/null
+++ b/tests/common/satisfy/t12a/libbaz-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/satisfy/t12a/repositories.manifest b/tests/common/satisfy/t12a/repositories.manifest
new file mode 100644
index 0000000..5b70556
--- /dev/null
+++ b/tests/common/satisfy/t12a/repositories.manifest
@@ -0,0 +1 @@
+: 1
diff --git a/tests/common/satisfy/t12b/bar-1.0.0.tar.gz b/tests/common/satisfy/t12b/bar-1.0.0.tar.gz
new file mode 100644
index 0000000..8999e1a
--- /dev/null
+++ b/tests/common/satisfy/t12b/bar-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/satisfy/t12b/baz-0.1.0.tar.gz b/tests/common/satisfy/t12b/baz-0.1.0.tar.gz
new file mode 100644
index 0000000..2676c52
--- /dev/null
+++ b/tests/common/satisfy/t12b/baz-0.1.0.tar.gz
Binary files differ
diff --git a/tests/common/satisfy/t12b/baz-1.0.0.tar.gz b/tests/common/satisfy/t12b/baz-1.0.0.tar.gz
new file mode 100644
index 0000000..1aec461
--- /dev/null
+++ b/tests/common/satisfy/t12b/baz-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/satisfy/t12b/foo-0.1.0.tar.gz b/tests/common/satisfy/t12b/foo-0.1.0.tar.gz
new file mode 100644
index 0000000..a282f20
--- /dev/null
+++ b/tests/common/satisfy/t12b/foo-0.1.0.tar.gz
Binary files differ
diff --git a/tests/common/satisfy/t12b/foo-1.0.0.tar.gz b/tests/common/satisfy/t12b/foo-1.0.0.tar.gz
new file mode 100644
index 0000000..4c66d3d
--- /dev/null
+++ b/tests/common/satisfy/t12b/foo-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/satisfy/t12b/libbar-1.0.0.tar.gz b/tests/common/satisfy/t12b/libbar-1.0.0.tar.gz
new file mode 100644
index 0000000..c0fe278
--- /dev/null
+++ b/tests/common/satisfy/t12b/libbar-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/satisfy/t12b/libbaz-0.1.0.tar.gz b/tests/common/satisfy/t12b/libbaz-0.1.0.tar.gz
new file mode 100644
index 0000000..73c0edb
--- /dev/null
+++ b/tests/common/satisfy/t12b/libbaz-0.1.0.tar.gz
Binary files differ
diff --git a/tests/common/satisfy/t12b/repositories.manifest b/tests/common/satisfy/t12b/repositories.manifest
new file mode 100644
index 0000000..6387e01
--- /dev/null
+++ b/tests/common/satisfy/t12b/repositories.manifest
@@ -0,0 +1,4 @@
+: 1
+:
+location: ../t12a
+role: prerequisite
diff --git a/tests/common/satisfy/t14a/libfoo-1.0.0.tar.gz b/tests/common/satisfy/t14a/libfoo-1.0.0.tar.gz
new file mode 120000
index 0000000..32e5a3c
--- /dev/null
+++ b/tests/common/satisfy/t14a/libfoo-1.0.0.tar.gz
@@ -0,0 +1 @@
+../libfoo-1.0.0.tar.gz \ No newline at end of file
diff --git a/tests/common/satisfy/t14a/repositories.manifest b/tests/common/satisfy/t14a/repositories.manifest
new file mode 120000
index 0000000..0d4767a
--- /dev/null
+++ b/tests/common/satisfy/t14a/repositories.manifest
@@ -0,0 +1 @@
+../repositories.manifest \ No newline at end of file
diff --git a/tests/common/satisfy/t14b/libfoo-1.1.0.tar.gz b/tests/common/satisfy/t14b/libfoo-1.1.0.tar.gz
new file mode 120000
index 0000000..c004b2a
--- /dev/null
+++ b/tests/common/satisfy/t14b/libfoo-1.1.0.tar.gz
@@ -0,0 +1 @@
+../libfoo-1.1.0.tar.gz \ No newline at end of file
diff --git a/tests/common/satisfy/t14b/repositories.manifest b/tests/common/satisfy/t14b/repositories.manifest
new file mode 120000
index 0000000..0d4767a
--- /dev/null
+++ b/tests/common/satisfy/t14b/repositories.manifest
@@ -0,0 +1 @@
+../repositories.manifest \ No newline at end of file
diff --git a/tests/common/satisfy/t14c/libfoo-1.1.0+1.tar.gz b/tests/common/satisfy/t14c/libfoo-1.1.0+1.tar.gz
new file mode 120000
index 0000000..ca9c01a
--- /dev/null
+++ b/tests/common/satisfy/t14c/libfoo-1.1.0+1.tar.gz
@@ -0,0 +1 @@
+../libfoo-1.1.0+1.tar.gz \ No newline at end of file
diff --git a/tests/common/satisfy/t14c/repositories.manifest b/tests/common/satisfy/t14c/repositories.manifest
new file mode 120000
index 0000000..0d4767a
--- /dev/null
+++ b/tests/common/satisfy/t14c/repositories.manifest
@@ -0,0 +1 @@
+../repositories.manifest \ No newline at end of file
diff --git a/tests/common/satisfy/t14d/libfoo-1.1.0+2.tar.gz b/tests/common/satisfy/t14d/libfoo-1.1.0+2.tar.gz
new file mode 120000
index 0000000..a89d2cc
--- /dev/null
+++ b/tests/common/satisfy/t14d/libfoo-1.1.0+2.tar.gz
@@ -0,0 +1 @@
+../libfoo-1.1.0+2.tar.gz \ No newline at end of file
diff --git a/tests/common/satisfy/t14d/repositories.manifest b/tests/common/satisfy/t14d/repositories.manifest
new file mode 120000
index 0000000..0d4767a
--- /dev/null
+++ b/tests/common/satisfy/t14d/repositories.manifest
@@ -0,0 +1 @@
+../repositories.manifest \ No newline at end of file
diff --git a/tests/common/satisfy/t14e/libfoo-1.1.0+3.tar.gz b/tests/common/satisfy/t14e/libfoo-1.1.0+3.tar.gz
new file mode 120000
index 0000000..616029d
--- /dev/null
+++ b/tests/common/satisfy/t14e/libfoo-1.1.0+3.tar.gz
@@ -0,0 +1 @@
+../libfoo-1.1.0+3.tar.gz \ No newline at end of file
diff --git a/tests/common/satisfy/t14e/repositories.manifest b/tests/common/satisfy/t14e/repositories.manifest
new file mode 120000
index 0000000..0d4767a
--- /dev/null
+++ b/tests/common/satisfy/t14e/repositories.manifest
@@ -0,0 +1 @@
+../repositories.manifest \ No newline at end of file
diff --git a/tests/common/satisfy/t14f/libfoo-1.1.1.tar.gz b/tests/common/satisfy/t14f/libfoo-1.1.1.tar.gz
new file mode 120000
index 0000000..b9ba788
--- /dev/null
+++ b/tests/common/satisfy/t14f/libfoo-1.1.1.tar.gz
@@ -0,0 +1 @@
+../libfoo-1.1.1.tar.gz \ No newline at end of file
diff --git a/tests/common/satisfy/t14f/repositories.manifest b/tests/common/satisfy/t14f/repositories.manifest
new file mode 120000
index 0000000..0d4767a
--- /dev/null
+++ b/tests/common/satisfy/t14f/repositories.manifest
@@ -0,0 +1 @@
+../repositories.manifest \ No newline at end of file
diff --git a/tests/common/satisfy/t14i/libfoo-1.2.0.tar.gz b/tests/common/satisfy/t14i/libfoo-1.2.0.tar.gz
new file mode 120000
index 0000000..55398c5
--- /dev/null
+++ b/tests/common/satisfy/t14i/libfoo-1.2.0.tar.gz
@@ -0,0 +1 @@
+../libfoo-1.2.0.tar.gz \ No newline at end of file
diff --git a/tests/common/satisfy/t14i/repositories.manifest b/tests/common/satisfy/t14i/repositories.manifest
new file mode 120000
index 0000000..0d4767a
--- /dev/null
+++ b/tests/common/satisfy/t14i/repositories.manifest
@@ -0,0 +1 @@
+../repositories.manifest \ No newline at end of file
diff --git a/tests/common/satisfy/t2/libfoo-0.1.0.tar.gz b/tests/common/satisfy/t2/libfoo-0.1.0.tar.gz
new file mode 120000
index 0000000..1e2ede1
--- /dev/null
+++ b/tests/common/satisfy/t2/libfoo-0.1.0.tar.gz
@@ -0,0 +1 @@
+../libfoo-0.1.0.tar.gz \ No newline at end of file
diff --git a/tests/common/satisfy/t4f/libbar-1.2.0.tar.gz b/tests/common/satisfy/t4f/libbar-1.2.0.tar.gz
new file mode 120000
index 0000000..b4a7773
--- /dev/null
+++ b/tests/common/satisfy/t4f/libbar-1.2.0.tar.gz
@@ -0,0 +1 @@
+../libbar-1.2.0.tar.gz \ No newline at end of file
diff --git a/tests/common/satisfy/t4f/libbar-2.1.0.tar.gz b/tests/common/satisfy/t4f/libbar-2.1.0.tar.gz
new file mode 120000
index 0000000..0df079a
--- /dev/null
+++ b/tests/common/satisfy/t4f/libbar-2.1.0.tar.gz
@@ -0,0 +1 @@
+../libbar-2.1.0.tar.gz \ No newline at end of file
diff --git a/tests/common/satisfy/t4f/libbax-1.0.0.tar.gz b/tests/common/satisfy/t4f/libbax-1.0.0.tar.gz
new file mode 120000
index 0000000..137b938
--- /dev/null
+++ b/tests/common/satisfy/t4f/libbax-1.0.0.tar.gz
@@ -0,0 +1 @@
+../libbax-1.0.0.tar.gz \ No newline at end of file
diff --git a/tests/common/satisfy/t4f/libbax-2.0.0.tar.gz b/tests/common/satisfy/t4f/libbax-2.0.0.tar.gz
new file mode 120000
index 0000000..465832f
--- /dev/null
+++ b/tests/common/satisfy/t4f/libbax-2.0.0.tar.gz
@@ -0,0 +1 @@
+../libbax-2.0.0.tar.gz \ No newline at end of file
diff --git a/tests/common/satisfy/t4f/libbix-1.0.0.tar.gz b/tests/common/satisfy/t4f/libbix-1.0.0.tar.gz
new file mode 120000
index 0000000..acf87bf
--- /dev/null
+++ b/tests/common/satisfy/t4f/libbix-1.0.0.tar.gz
@@ -0,0 +1 @@
+../libbix-1.0.0.tar.gz \ No newline at end of file
diff --git a/tests/common/satisfy/t4f/libbix-2.0.0.tar.gz b/tests/common/satisfy/t4f/libbix-2.0.0.tar.gz
new file mode 120000
index 0000000..26683f9
--- /dev/null
+++ b/tests/common/satisfy/t4f/libbix-2.0.0.tar.gz
@@ -0,0 +1 @@
+../libbix-2.0.0.tar.gz \ No newline at end of file
diff --git a/tests/common/satisfy/t4f/libbox-1.0.0.tar.gz b/tests/common/satisfy/t4f/libbox-1.0.0.tar.gz
new file mode 120000
index 0000000..3bc2a0c
--- /dev/null
+++ b/tests/common/satisfy/t4f/libbox-1.0.0.tar.gz
@@ -0,0 +1 @@
+../libbox-1.0.0.tar.gz \ No newline at end of file
diff --git a/tests/common/satisfy/t4f/libbox-2.0.0.tar.gz b/tests/common/satisfy/t4f/libbox-2.0.0.tar.gz
new file mode 120000
index 0000000..1ec50e6
--- /dev/null
+++ b/tests/common/satisfy/t4f/libbox-2.0.0.tar.gz
@@ -0,0 +1 @@
+../libbox-2.0.0.tar.gz \ No newline at end of file
diff --git a/tests/common/satisfy/t4f/libbux-1.0.0.tar.gz b/tests/common/satisfy/t4f/libbux-1.0.0.tar.gz
new file mode 120000
index 0000000..8718789
--- /dev/null
+++ b/tests/common/satisfy/t4f/libbux-1.0.0.tar.gz
@@ -0,0 +1 @@
+../libbux-1.0.0.tar.gz \ No newline at end of file
diff --git a/tests/common/satisfy/t4f/libfix-1.0.0.tar.gz b/tests/common/satisfy/t4f/libfix-1.0.0.tar.gz
new file mode 120000
index 0000000..aad4c49
--- /dev/null
+++ b/tests/common/satisfy/t4f/libfix-1.0.0.tar.gz
@@ -0,0 +1 @@
+../libfix-1.0.0.tar.gz \ No newline at end of file
diff --git a/tests/common/satisfy/t4f/libfoo-2.0.0.tar.gz b/tests/common/satisfy/t4f/libfoo-2.0.0.tar.gz
new file mode 120000
index 0000000..406696d
--- /dev/null
+++ b/tests/common/satisfy/t4f/libfoo-2.0.0.tar.gz
@@ -0,0 +1 @@
+../libfoo-2.0.0.tar.gz \ No newline at end of file
diff --git a/tests/common/satisfy/t4f/libfox-1.1.0.tar.gz b/tests/common/satisfy/t4f/libfox-1.1.0.tar.gz
new file mode 120000
index 0000000..97bb68b
--- /dev/null
+++ b/tests/common/satisfy/t4f/libfox-1.1.0.tar.gz
@@ -0,0 +1 @@
+../libfox-1.1.0.tar.gz \ No newline at end of file
diff --git a/tests/common/satisfy/t4f/libfox-2.0.0.tar.gz b/tests/common/satisfy/t4f/libfox-2.0.0.tar.gz
new file mode 120000
index 0000000..2996971
--- /dev/null
+++ b/tests/common/satisfy/t4f/libfox-2.0.0.tar.gz
@@ -0,0 +1 @@
+../libfox-2.0.0.tar.gz \ No newline at end of file
diff --git a/tests/common/satisfy/t4f/repositories.manifest b/tests/common/satisfy/t4f/repositories.manifest
new file mode 100644
index 0000000..5b70556
--- /dev/null
+++ b/tests/common/satisfy/t4f/repositories.manifest
@@ -0,0 +1 @@
+: 1
diff --git a/tests/common/satisfy/t4i/libbar-0.1.0.tar.gz b/tests/common/satisfy/t4i/libbar-0.1.0.tar.gz
new file mode 120000
index 0000000..f622e36
--- /dev/null
+++ b/tests/common/satisfy/t4i/libbar-0.1.0.tar.gz
@@ -0,0 +1 @@
+../libbar-0.1.0.tar.gz \ No newline at end of file
diff --git a/tests/common/satisfy/t4i/libbaz-2.0.0.tar.gz b/tests/common/satisfy/t4i/libbaz-2.0.0.tar.gz
new file mode 120000
index 0000000..8787403
--- /dev/null
+++ b/tests/common/satisfy/t4i/libbaz-2.0.0.tar.gz
@@ -0,0 +1 @@
+../libbaz-2.0.0.tar.gz \ No newline at end of file
diff --git a/tests/common/satisfy/t4i/repositories.manifest b/tests/common/satisfy/t4i/repositories.manifest
new file mode 100644
index 0000000..5b70556
--- /dev/null
+++ b/tests/common/satisfy/t4i/repositories.manifest
@@ -0,0 +1 @@
+: 1
diff --git a/tests/common/satisfy/t4j/libbar-0.1.0.tar.gz b/tests/common/satisfy/t4j/libbar-0.1.0.tar.gz
new file mode 120000
index 0000000..f622e36
--- /dev/null
+++ b/tests/common/satisfy/t4j/libbar-0.1.0.tar.gz
@@ -0,0 +1 @@
+../libbar-0.1.0.tar.gz \ No newline at end of file
diff --git a/tests/common/satisfy/t4j/libbar-1.2.0.tar.gz b/tests/common/satisfy/t4j/libbar-1.2.0.tar.gz
new file mode 120000
index 0000000..b4a7773
--- /dev/null
+++ b/tests/common/satisfy/t4j/libbar-1.2.0.tar.gz
@@ -0,0 +1 @@
+../libbar-1.2.0.tar.gz \ No newline at end of file
diff --git a/tests/common/satisfy/t4j/libbaz-1.2.0.tar.gz b/tests/common/satisfy/t4j/libbaz-1.2.0.tar.gz
new file mode 120000
index 0000000..d43cdcd
--- /dev/null
+++ b/tests/common/satisfy/t4j/libbaz-1.2.0.tar.gz
@@ -0,0 +1 @@
+../libbaz-1.2.0.tar.gz \ No newline at end of file
diff --git a/tests/common/satisfy/t4j/libbaz-2.1.0.tar.gz b/tests/common/satisfy/t4j/libbaz-2.1.0.tar.gz
new file mode 120000
index 0000000..11cd8c8
--- /dev/null
+++ b/tests/common/satisfy/t4j/libbaz-2.1.0.tar.gz
@@ -0,0 +1 @@
+../libbaz-2.1.0.tar.gz \ No newline at end of file
diff --git a/tests/common/satisfy/t4j/libfix-1.0.0.tar.gz b/tests/common/satisfy/t4j/libfix-1.0.0.tar.gz
new file mode 120000
index 0000000..aad4c49
--- /dev/null
+++ b/tests/common/satisfy/t4j/libfix-1.0.0.tar.gz
@@ -0,0 +1 @@
+../libfix-1.0.0.tar.gz \ No newline at end of file
diff --git a/tests/common/satisfy/t4j/libfoo-3.0.0.tar.gz b/tests/common/satisfy/t4j/libfoo-3.0.0.tar.gz
new file mode 120000
index 0000000..7678898
--- /dev/null
+++ b/tests/common/satisfy/t4j/libfoo-3.0.0.tar.gz
@@ -0,0 +1 @@
+../libfoo-3.0.0.tar.gz \ No newline at end of file
diff --git a/tests/common/satisfy/t4j/libfox-0.0.1.tar.gz b/tests/common/satisfy/t4j/libfox-0.0.1.tar.gz
new file mode 120000
index 0000000..674ac04
--- /dev/null
+++ b/tests/common/satisfy/t4j/libfox-0.0.1.tar.gz
@@ -0,0 +1 @@
+../libfox-0.0.1.tar.gz \ No newline at end of file
diff --git a/tests/common/satisfy/t4j/libfox-2.1.0.tar.gz b/tests/common/satisfy/t4j/libfox-2.1.0.tar.gz
new file mode 120000
index 0000000..157a046
--- /dev/null
+++ b/tests/common/satisfy/t4j/libfox-2.1.0.tar.gz
@@ -0,0 +1 @@
+../libfox-2.1.0.tar.gz \ No newline at end of file
diff --git a/tests/common/satisfy/t4j/libfox-3.0.0.tar.gz b/tests/common/satisfy/t4j/libfox-3.0.0.tar.gz
new file mode 120000
index 0000000..2aef930
--- /dev/null
+++ b/tests/common/satisfy/t4j/libfox-3.0.0.tar.gz
@@ -0,0 +1 @@
+../libfox-3.0.0.tar.gz \ No newline at end of file
diff --git a/tests/common/satisfy/t4j/repositories.manifest b/tests/common/satisfy/t4j/repositories.manifest
new file mode 100644
index 0000000..5b70556
--- /dev/null
+++ b/tests/common/satisfy/t4j/repositories.manifest
@@ -0,0 +1 @@
+: 1
diff --git a/tests/common/satisfy/t4k/libbar-1.0.0.tar.gz b/tests/common/satisfy/t4k/libbar-1.0.0.tar.gz
new file mode 100644
index 0000000..4fbd21e
--- /dev/null
+++ b/tests/common/satisfy/t4k/libbar-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/satisfy/t4k/libbaz-1.0.0.tar.gz b/tests/common/satisfy/t4k/libbaz-1.0.0.tar.gz
new file mode 100644
index 0000000..dc17b9f
--- /dev/null
+++ b/tests/common/satisfy/t4k/libbaz-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/satisfy/t4k/libfax-1.0.0.tar.gz b/tests/common/satisfy/t4k/libfax-1.0.0.tar.gz
new file mode 100644
index 0000000..8145884
--- /dev/null
+++ b/tests/common/satisfy/t4k/libfax-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/satisfy/t4k/libfax-2.0.0.tar.gz b/tests/common/satisfy/t4k/libfax-2.0.0.tar.gz
new file mode 100644
index 0000000..7ab921f
--- /dev/null
+++ b/tests/common/satisfy/t4k/libfax-2.0.0.tar.gz
Binary files differ
diff --git a/tests/common/satisfy/t4k/libfaz-1.0.0.tar.gz b/tests/common/satisfy/t4k/libfaz-1.0.0.tar.gz
new file mode 100644
index 0000000..3ab004a
--- /dev/null
+++ b/tests/common/satisfy/t4k/libfaz-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/satisfy/t4k/libfaz-2.0.0.tar.gz b/tests/common/satisfy/t4k/libfaz-2.0.0.tar.gz
new file mode 100644
index 0000000..1c3d003
--- /dev/null
+++ b/tests/common/satisfy/t4k/libfaz-2.0.0.tar.gz
Binary files differ
diff --git a/tests/common/satisfy/t4k/libfex-1.0.0.tar.gz b/tests/common/satisfy/t4k/libfex-1.0.0.tar.gz
new file mode 100644
index 0000000..f008cc0
--- /dev/null
+++ b/tests/common/satisfy/t4k/libfex-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/satisfy/t4k/libfex-2.0.0.tar.gz b/tests/common/satisfy/t4k/libfex-2.0.0.tar.gz
new file mode 100644
index 0000000..9cefe9f
--- /dev/null
+++ b/tests/common/satisfy/t4k/libfex-2.0.0.tar.gz
Binary files differ
diff --git a/tests/common/satisfy/t4k/libfix-1.0.0.tar.gz b/tests/common/satisfy/t4k/libfix-1.0.0.tar.gz
new file mode 100644
index 0000000..fe1a6e9
--- /dev/null
+++ b/tests/common/satisfy/t4k/libfix-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/satisfy/t4k/libfix-2.0.0.tar.gz b/tests/common/satisfy/t4k/libfix-2.0.0.tar.gz
new file mode 100644
index 0000000..7b09f8b
--- /dev/null
+++ b/tests/common/satisfy/t4k/libfix-2.0.0.tar.gz
Binary files differ
diff --git a/tests/common/satisfy/t4k/libfoo-1.0.0.tar.gz b/tests/common/satisfy/t4k/libfoo-1.0.0.tar.gz
new file mode 100644
index 0000000..38bb25b
--- /dev/null
+++ b/tests/common/satisfy/t4k/libfoo-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/satisfy/t4k/libfoo-2.0.0.tar.gz b/tests/common/satisfy/t4k/libfoo-2.0.0.tar.gz
new file mode 100644
index 0000000..b868a61
--- /dev/null
+++ b/tests/common/satisfy/t4k/libfoo-2.0.0.tar.gz
Binary files differ
diff --git a/tests/common/satisfy/t4k/libfox-1.0.0.tar.gz b/tests/common/satisfy/t4k/libfox-1.0.0.tar.gz
new file mode 100644
index 0000000..0a1e328
--- /dev/null
+++ b/tests/common/satisfy/t4k/libfox-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/satisfy/t4k/libfox-1.2.0.tar.gz b/tests/common/satisfy/t4k/libfox-1.2.0.tar.gz
new file mode 100644
index 0000000..34efae0
--- /dev/null
+++ b/tests/common/satisfy/t4k/libfox-1.2.0.tar.gz
Binary files differ
diff --git a/tests/common/satisfy/t4k/libfox-2.0.0.tar.gz b/tests/common/satisfy/t4k/libfox-2.0.0.tar.gz
new file mode 100644
index 0000000..ce73b71
--- /dev/null
+++ b/tests/common/satisfy/t4k/libfox-2.0.0.tar.gz
Binary files differ
diff --git a/tests/common/satisfy/t4k/libfux-1.0.0.tar.gz b/tests/common/satisfy/t4k/libfux-1.0.0.tar.gz
new file mode 100644
index 0000000..983cda7
--- /dev/null
+++ b/tests/common/satisfy/t4k/libfux-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/satisfy/t4k/libfux-2.0.0.tar.gz b/tests/common/satisfy/t4k/libfux-2.0.0.tar.gz
new file mode 100644
index 0000000..0f63133
--- /dev/null
+++ b/tests/common/satisfy/t4k/libfux-2.0.0.tar.gz
Binary files differ
diff --git a/tests/common/satisfy/t4k/libfuz-1.0.0.tar.gz b/tests/common/satisfy/t4k/libfuz-1.0.0.tar.gz
new file mode 100644
index 0000000..42b2c0e
--- /dev/null
+++ b/tests/common/satisfy/t4k/libfuz-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/satisfy/t4k/libfuz-2.0.0.tar.gz b/tests/common/satisfy/t4k/libfuz-2.0.0.tar.gz
new file mode 100644
index 0000000..c31d82a
--- /dev/null
+++ b/tests/common/satisfy/t4k/libfuz-2.0.0.tar.gz
Binary files differ
diff --git a/tests/common/satisfy/t4k/repositories.manifest b/tests/common/satisfy/t4k/repositories.manifest
new file mode 100644
index 0000000..5b70556
--- /dev/null
+++ b/tests/common/satisfy/t4k/repositories.manifest
@@ -0,0 +1 @@
+: 1
diff --git a/tests/common/satisfy/t5/libbox-1.2.0.tar.gz b/tests/common/satisfy/t5/libbox-1.2.0.tar.gz
new file mode 120000
index 0000000..3d49749
--- /dev/null
+++ b/tests/common/satisfy/t5/libbox-1.2.0.tar.gz
@@ -0,0 +1 @@
+../libbox-1.2.0.tar.gz \ No newline at end of file
diff --git a/tests/common/satisfy/t9/foo-1.0.0.tar.gz b/tests/common/satisfy/t9/foo-1.0.0.tar.gz
new file mode 100644
index 0000000..5332259
--- /dev/null
+++ b/tests/common/satisfy/t9/foo-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/satisfy/t9/libbar-1.0.0.tar.gz b/tests/common/satisfy/t9/libbar-1.0.0.tar.gz
new file mode 100644
index 0000000..ff5a4f8
--- /dev/null
+++ b/tests/common/satisfy/t9/libbar-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/satisfy/t9/libbaz-1.0.0.tar.gz b/tests/common/satisfy/t9/libbaz-1.0.0.tar.gz
new file mode 100644
index 0000000..ba21c62
--- /dev/null
+++ b/tests/common/satisfy/t9/libbaz-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/satisfy/t9/libbox-1.0.0.tar.gz b/tests/common/satisfy/t9/libbox-1.0.0.tar.gz
new file mode 100644
index 0000000..7baeed3
--- /dev/null
+++ b/tests/common/satisfy/t9/libbox-1.0.0.tar.gz
Binary files differ
diff --git a/tests/common/satisfy/t9/repositories.manifest b/tests/common/satisfy/t9/repositories.manifest
new file mode 120000
index 0000000..0d4767a
--- /dev/null
+++ b/tests/common/satisfy/t9/repositories.manifest
@@ -0,0 +1 @@
+../repositories.manifest \ No newline at end of file
diff --git a/tests/config.testscript b/tests/config.testscript
index 0bcfb93..442cf36 100644
--- a/tests/config.testscript
+++ b/tests/config.testscript
@@ -9,17 +9,18 @@
# shared between multiple bpkg processes. Also we need to make sure that
# configurations are not cloned while being used by bpkg.
#
-+$cfg_create -d cfg 2>- &cfg/***
+cfg_uuid = "00000000-0000-0000-0000-000000000001"
++$cfg_create -d cfg --uuid $cfg_uuid 2>- &cfg/***
# The most commonly used configuration cloning command that copies it from the
# parent scope working directory.
#
-clone_cfg = cp -pr ../cfg ./
+clone_cfg = [cmdline] cp -pr ../cfg ./
# Clones the original (presumably empty) configuration from the root scope
# working directory.
#
-clone_root_cfg = cp -pr $~/cfg ./
+clone_root_cfg = [cmdline] cp -pr $~/cfg ./
# Setup a test command to use a cloned configuration directory by default.
#
diff --git a/tests/pkg-build.testscript b/tests/pkg-build.testscript
index 50db679..9d19846 100644
--- a/tests/pkg-build.testscript
+++ b/tests/pkg-build.testscript
@@ -21,6 +21,13 @@
# | |-- buildfile
# | `-- manifest
# |
+# |-- libhello-1.0.0
+# | |-- build
+# | | |-- bootstrap.build
+# | | |-- export.build
+# | | `-- root.build
+# | `-- *
+# |
# |-- libfoo-1.1.0.tar.gz
# |-- libfoo-1.2.0.tar.gz
# |
@@ -68,6 +75,7 @@
# |
# |-- t2
# | |-- libbar-1.0.0.tar.gz -> libfoo
+# | |-- libfoo-0.1.0.tar.gz
# | |-- libfoo-1.0.0.tar.gz
# | `-- repositories.manifest
# |
@@ -94,24 +102,425 @@
# | |-- libfox-1.0.0.tar.gz
# | `-- repositories.manifest
# |
-# |-- t4e
+# |-- t4e -> t4a (complement repository)
# | |-- libfoo-1.1.0+1.tar.gz
# | `-- repositories.manifest
# |
+# |-- t4f
+# | |-- libfoo-2.0.0.tar.gz -> libbar == 1.2.0
+# | |-- libbar-1.2.0.tar.gz
+# | |-- libbar-2.1.0.tar.gz -> libbox
+# | |-- libbox-1.0.0.tar.gz -> libbax
+# | |-- libbox-2.0.0.tar.gz -> libbax == 1.0.0
+# | |-- libbax-1.0.0.tar.gz
+# | |-- libbax-2.0.0.tar.gz
+# | |-- libbix-1.0.0.tar.gz -> libbax == 1.0.0
+# | |-- libbix-2.0.0.tar.gz -> libbax == 2.0.0
+# | |-- libbux-1.0.0.tar.gz -> libbix
+# | |-- libfix-1.0.0.tar.gz -> libfox
+# | |-- libfox-1.1.0.tar.gz -> libbar >= 1.0.0
+# | |-- libfox-2.0.0.tar.gz -> libbar >= 2.0.0
+# | `-- repositories.manifest
+# |
+# |-- t4i
+# | |-- libbaz-2.0.0.tar.gz -> libbar < 2.1.0
+# | |-- libbar-0.1.0.tar.gz
+# | `-- repositories.manifest
+# |
+# |-- t4j
+# | |-- libbar-0.1.0.tar.gz
+# | |-- libbar-1.2.0.tar.gz
+# | |-- libfoo-3.0.0.tar.gz -> libbar
+# | |-- libfox-0.0.1.tar.gz
+# | |-- libfox-2.1.0.tar.gz -> libbar, libbaz == 1.2.0
+# | |-- libfox-3.0.0.tar.gz -> libbar == 0.1.0, libbaz == 1.2.0
+# | |-- libbaz-1.2.0.tar.gz -> libbar == 1.2.0
+# | |-- libbaz-2.1.0.tar.gz
+# | `-- repositories.manifest
+# |
+# |-- t4k
+# | |-- libbar-1.0.0.tar.gz -> libfoo == 1.0.0
+# | |-- libbaz-1.0.0.tar.gz -> libfox
+# | |-- libfoo-1.0.0.tar.gz -> libfix == 1.0.0
+# | |-- libfoo-2.0.0.tar.gz -> libfix == 2.0.0
+# | |-- libfox-1.0.0.tar.gz -> libfux == 1.0.0
+# | |-- libfox-1.2.0.tar.gz -> libfux == 1.0.0, libfex >= 2.0.0
+# | |-- libfox-2.0.0.tar.gz -> libfux == 2.0.0
+# | |-- libfix-1.0.0.tar.gz -> libfax == 1.0.0
+# | |-- libfix-2.0.0.tar.gz -> libfax == 2.0.0
+# | |-- libfux-1.0.0.tar.gz -> libfaz == 1.0.0, libfex == 1.0.0
+# | |-- libfux-2.0.0.tar.gz -> libfaz == 2.0.0, libfex == 2.0.0
+# | |-- libfex-1.0.0.tar.gz -> libfaz == 1.0.0
+# | |-- libfex-2.0.0.tar.gz -> libfaz == 2.0.0
+# | |-- libfax-1.0.0.tar.gz -> libfuz == 1.0.0
+# | |-- libfax-2.0.0.tar.gz -> libfuz == 2.0.0
+# | |-- libfaz-1.0.0.tar.gz -> libfuz == 1.0.0
+# | |-- libfaz-2.0.0.tar.gz -> libfuz == 2.0.0
+# | |-- libfuz-1.0.0.tar.gz
+# | |-- libfuz-2.0.0.tar.gz
+# | `-- repositories.manifest
+# |
# |-- t5
# | |-- libbar-1.2.0.tar.gz
+# | |-- libbox-1.2.0.tar.gz
# | `-- repositories.manifest
# |
# |-- t6
# | |-- libBar-2.0.0.tar.gz
# | `-- repositories.manifest
# |
-# |-- libhello-1.0.0
-# | |-- build
-# | | |-- bootstrap.build
-# | | |-- export.build
-# | | `-- root.build
-# | `-- *
+# |-- t7a
+# | |-- libbaz-1.0.0.tar.gz
+# | |-- libbuild2-bar-1.0.0.tar.gz
+# | |-- foo-1.0.0.tar.gz -> * libbuild2-bar ^1.0.0, libbaz ^1.0.0
+# | |-- libbuild2-foo-1.0.0.tar.gz -> libbaz ^1.0.0
+# | |-- libbiz-1.0.0.tar.gz -> * libbuild2-foo ^1.0.0, * foo ^1.0.0,
+# | | libbaz ^1.0.0
+# | |-- libbuz-1.0.0.tar.gz -> * libbuild2-foo ^1.0.0, * foo ^1.0.0
+# | |-- libbix-1.0.0.tar.gz -> libbiz ^1.0.0, libbuz ^1.0.0
+# | |-- libbar-1.0.0.tar.gz -> * foo ^1.0.0, libbaz ^1.0.0
+# | |-- libbox-1.0.0.tar.gz -> * foo ^1.0.0, libbaz ^1.0.0
+# | |-- libfax-1.0.0.tar.gz
+# | |-- libfix-1.0.0.tar.gz -> libbar ^1.0.0, libbox ^1.0.0,
+# | | libfax ^1.0.0
+# | `-- repositories.manifest
+# |
+# |-- t7b -> t7a (complement repository)
+# | |-- libbaz-1.1.0.tar.gz
+# | |-- foo-1.1.0.tar.gz -> libbaz ^1.1.0
+# | |-- libbar-1.1.0.tar.gz -> * foo ^1.1.0, libbaz ^1.0.0
+# | |-- libbox-1.1.0.tar.gz -> * foo ^1.0.0
+# | `-- repositories.manifest
+# |
+# |-- t8a
+# | |-- libbar-1.0.0.tar.gz
+# | |-- libbaz-1.0.0.tar.gz
+# | |-- libbaz-1.1.0.tar.gz
+# | |-- libbiz-0.1.0.tar.gz
+# | |-- libbiz-1.0.0.tar.gz
+# | |-- libbox-0.1.0.tar.gz
+# | |-- libbox-0.1.1.tar.gz
+# | |-- libbox-1.0.0.tar.gz
+# | |-- libfoo-1.0.0.tar.gz
+# | |-- libfoo-2.0.0.tar.gz
+# | |-- bar-1.0.0.tar.gz -> libbar
+# | |-- baz-1.0.0.tar.gz -> libbaz
+# | |-- bax-0.1.0.tar.gz -> libbox config.bax.backend=libbox
+# | |-- bax-1.0.0.tar.gz -> libbox >= 0.1.1 config.bax.backend=libbox
+# | |-- bix-0.1.0.tar.gz
+# | |-- bix-1.0.0.tar.gz -> bax == 0.1.0
+# | |-- box-1.0.0.tar.gz -> libbiz ^1.0.0 config.box.backend=libbiz |
+# | | libbox >= 0.1.1 config.box.backend=libbox,
+# | | libbaz
+# | |-- bux-1.0.0.tar.gz -> bix
+# | |-- dax-1.0.0.tar.gz -> libbar ? ($config.dax.extras)
+# | | libbaz
+# | |-- dix-0.1.0.tar.gz
+# | |-- dix-1.0.0.tar.gz -> dax require {config.dax.extras=true}
+# | |-- dox-1.0.0.tar.gz -> dax
+# | |-- dux-1.0.0.tar.gz -> dix
+# | |-- fax-1.0.0.tar.gz -> libbar ^1.0.0 ? ($cxx.target.class == 'windows') config.fax.backend=libbar |
+# | | libbaz ^1.0.0 ? ($cxx.target.class != 'windows') config.fax.backend=libbaz,
+# | | libbiz ? ($config.fax.libbiz) config.fax.extras='[b\i$z]',
+# | | libbox ? ($config.fax.libbox && $config.fax.backend == libbaz && $config.fax.extras == '[b\i$z]')
+# | |-- fix-1.0.0.tar.gz -> libbaz ^1.0.0 | libbar ^1.0.0
+# | |-- foo-1.0.0.tar.gz -> {libbar libbaz} ^1.0.0
+# | |-- fox-1.0.0.tar.gz -> libbar ^1.0.0 | libbaz ^1.0.0
+# | |-- fux-1.0.0.tar.gz -> libbiz ? (!$config.fux.libbiz_old) | libbiz ^0.1.0 ? ($config.fux.libbiz_old)
+# | |-- fuz-1.0.0.tar.gz -> libfoo
+# | |-- foz-1.0.0.tar.gz -> fuz
+# | |-- tax-1.0.0.tar.gz -> libfoo == 1.0.0 | libfoo == 2.0.0
+# | |-- tex-1.0.0.tar.gz -> libfoo prefer{} accept(true) reflect {...}
+# | |-- tix-1.0.0.tar.gz -> libfoo >= 2.0.0 reflect {...} | libfoo >= 1.0.0 reflect {...}
+# | |-- tox-1.0.0.tar.gz -> libfoo >= 2.0.0 prefer{} accept(true) reflect {...} | libfoo >= 1.0.0 reflect {...}
+# | |-- tpx-1.0.0.tar.gz -> libfoo >= 2.0.0 prefer{...} accept(true) reflect {...} | libfoo >= 1.0.0 prefer{...} accept(true) reflect {...}
+# | |-- tux-1.0.0.tar.gz -> libfoo prefer{config.libfoo.protocol = "1"} accept(true),
+# | | libbox ? (config.libfoo.protocol == "1")
+# | |-- twx-1.0.0.tar.gz -> libbiz,
+# | | libfoo prefer{config.libfoo.protocol = "1"} accept(true),
+# | | libbox ? (config.libfoo.protocol == "1")
+# | |-- tvx-1.0.0.tar.gz -> libfoo >= 2.0.0 reflect {...} | libfoo >= 1.0.0 reflect {...},
+# | | libfox prefer{config.libfox.level = $config.tvx.reflect} accept(true)
+# | `-- repositories.manifest
+# |
+# |-- t9
+# | |-- libbar-1.0.0.tar.gz
+# | |-- libbaz-1.0.0.tar.gz -> libbar ^1.0.0
+# | |-- libbox-1.0.0.tar.gz -> libbar ^1.0.0
+# | |-- foo-1.0.0.tar.gz -> libbaz, libbox
+# | `-- repositories.manifest
+# |
+# |-- t10
+# | |-- libfoo-bar-1.0.0.tar.gz (tests) -> libfoo-tests
+# | |-- libfoo-baz-1.0.0.tar.gz (tests) -> libfoo-tests
+# | |-- libfoo-tests-1.0.0.tar.gz
+# | |-- libbar-foo-1.0.0.tar.gz (tests) -> libbar-tests
+# | |-- libbar-baz-1.0.0.tar.gz (tests) -> libbar-tests
+# | |-- libbar-tests-1.0.0.tar.gz -> ? libbar-foo, ? libbar-baz
+# | `-- repositories.manifest
+# |
+# | NOTE: remember to update
+# | pkg-build/dependency/config-negotiation-order/repo-packages/*
+# | tests if adding any packages to the below repository.
+# |
+# |-- t11a
+# | |-- libfoo-0.1.0.tar.gz
+# | |-- libfoo-1.0.0.tar.gz
+# | |-- libbar-0.1.0.tar.gz
+# | |-- libbar-1.0.0.tar.gz
+# | |-- libbaz-0.1.0.tar.gz
+# | |-- libbaz-1.0.0.tar.gz
+# | |-- libbox-0.1.0.tar.gz
+# | |-- libbox-1.0.0.tar.gz
+# | |-- libbiz-0.1.0.tar.gz
+# | |-- libbiz-1.0.0.tar.gz -> libbar
+# | |-- foo-0.1.0.tar.gz -> libfoo {require {config.libfoo.extras=true}}
+# | |-- foo-0.2.0.tar.gz -> libfoo {require {config.libfoo.extras=true}} | libbar
+# | |-- foo-1.0.0.tar.gz -> libfoo {require {config.libfoo.extras=true} reflect {...}}
+# | |-- fox-0.1.0.tar.gz -> libfoo {prefer {config.libfoo.extras=true} accept (false)} |
+# | | libbar
+# | |-- fox-0.2.0.tar.gz -> libfoo {prefer {config.libfoo.extras=false} accept (!$config.libfoo.extras)} |
+# | | libfoo {prefer {config.libfoo.extras=true} accept (true)} |
+# | | libbar {require {config.libbar.extras=true}}
+# | |-- fox-1.0.0.tar.gz -> libfoo {require {config.libfoo.extras=true}}
+# | |-- fux-0.1.0.tar.gz -> libfoo ? ($config.fux.extras=true)
+# | |-- fux-0.1.1.tar.gz -> libfoo ? ($config.fux.extras=true)
+# | |-- fux-0.2.0.tar.gz -> libfoo {enable($config.fux.extras=true) require {config.libfoo.extras=true}}
+# | |-- fux-1.0.0.tar.gz -> libfoo
+# | |-- fix-0.1.0.tar.gz -> foo == 0.1.0
+# | |-- fix-1.0.0.tar.gz -> foo {require {config.foo.extras=true}}
+# | |-- fex-0.1.0.tar.gz -> fux {require {config.fux.extras=true}}
+# | |-- fex-1.0.0.tar.gz -> foo, libfoo {require {config.libfoo.extras=true}}
+# | |-- bar-0.1.0.tar.gz -> libbar == 0.1.0 {require {config.libbar.extras=true}}
+# | |-- bar-1.0.0.tar.gz -> libbar {require {config.libbar.extras=true}}
+# | |-- baz-0.1.0.tar.gz -> {libbar libfoo} == 0.1.0 {require {config.libbar.extras=true config.libfoo.extras=true}}
+# | |-- baz-1.0.0.tar.gz -> {libbar libfoo} {require {config.libbar.extras=true config.libfoo.extras=true}}
+# | |-- bac-1.0.0.tar.gz -> libbar {require {config.libbar.extras=true}},
+# | | libbaz {require {config.libbaz.extras=true}},
+# | | libfoo {require {config.libfoo.extras=true}}
+# | |-- bat-1.0.0.tar.gz -> libbaz {require {config.libbaz.extras=true}}
+# | |-- bas-1.0.0.tar.gz -> libbar {require {config.libbar.extras=true}},
+# | | bus {require {config.bus.extras=true}}
+# | |-- bus-0.1.0.tar.gz -> foo {require {config.foo.extras=true} reflect {...}}
+# | |-- bus-1.0.0.tar.gz -> libaz {require {config.libbaz.extras=true}},
+# | | foo {require {config.foo.extras=true}}
+# | |-- box-0.1.0.tar.gz -> libbox == 0.1.0 {require {config.libbox.extras=true}}
+# | |-- box-0.2.0.tar.gz -> libbox {require {config.libbox.extras=true}}
+# | |-- box-1.0.0.tar.gz -> {libbar libfoo} {require {config.libbar.extras=true config.libfoo.extras=true}} |
+# | | libbox
+# | |-- bax-0.1.0.tar.gz -> {libbox libbar} {require {config.libbox.extras=true}}
+# | |-- bax-1.0.0.tar.gz -> libfoo {require {config.libfoo.extras=true} reflect {...}},
+# | | {libbox libbar} {require {config.libbox.extras=true config.libbar.extras=true}}
+# | |-- bux-1.0.0.tar.gz -> libbar {require {config.libbar.extras=true}}
+# | |-- bix-1.0.0.tar.gz -> {libbar bar} {require {config.libbar.extras=true config.bar.extras=true}},
+# | | bux
+# | |-- bex-1.0.0.tar.gz -> libbar
+# | |-- boo-1.0.0.tar.gz -> libbar | libfoo {require {config.libfoo.extras=true}} | libbox
+# | |-- biz-0.1.0.tar.gz -> libbiz == 0.1.0
+# | |-- biz-1.0.0.tar.gz -> boo {require {config.boo.extras=true}}
+# | |-- buz-1.0.0.tar.gz -> bux {require {config.bux.extras=true}}
+# | |-- buc-1.0.0.tar.gz -> libfoo {require {config.libfoo.extras=true}},
+# | | bux {require {config.bux.extras=true}}
+# | |-- tax-1.0.0.tar.gz -> libbar {require {config.libbar.extras=true}},
+# | | libfoo
+# | |-- tex-0.1.0.tar.gz -> libfoo {require {config.libfoo.extras=true}}
+# | |-- tex-0.2.0.tar.gz -> libbar,
+# | | libfoo {require {config.libfoo.extras=true}}
+# | |-- tex-0.3.0.tar.gz -> libbar {require {config.libbar.extras=true}},
+# | | libfoo {require {config.libfoo.extras=true}}
+# | |-- tex-1.0.0.tar.gz -> libbar {require {config.libbar.extras=true}},
+# | | libfoo {require {config.libfoo.extras=true} reflect {...}}
+# | |-- tix-0.1.0.tar.gz
+# | |-- tix-1.0.0.tar.gz -> libbar {require {config.libbar.extras=true}},
+# | | tex {require {config.tex.extras=true}}
+# | |-- tiz-1.0.0.tar.gz -> tex {require {config.tex.extras=true}, reflect {...}},
+# | | libbar {require {config.libbar.extras=true}}
+# | |-- toz-0.1.0.tar.gz
+# | |-- toz-0.2.0.tar.gz -> libfoo {require {config.libfoo.extras=true}},
+# | | libbar {require {config.libbar.extras=true}}
+# | |-- toz-1.0.0.tar.gz -> libbaz {require {config.libbaz.extras=true}},
+# | | libfoo {require {config.libfoo.extras=true}},
+# | | libbar {require {config.libbar.extras=true}}
+# | |-- tez-0.1.0.tar.gz -> libbox {require {config.libbox.extras=true}},
+# | | toz == 0.1.0 {require {config.toz.extras=true}}
+# | |-- tez-1.0.0.tar.gz -> libbox {require {config.libbox.extras=true}},
+# | | toz == 0.1.0 {require {config.toz.extras=true}},
+# | | libbar {require {config.libbar.extras=true}}
+# | |-- tuz-1.0.0.tar.gz -> toz {require {config.toz.extras=true}}
+# | |-- tux-1.0.0.tar.gz -> libbox {require {config.libbox.extras=true}},
+# | | tix == 0.1.0
+# | |-- tvz-0.1.0.tar.gz -> toz == 0.2.0 {require {config.toz.extras=true}},
+# | | bax,
+# | | libfoo {require {config.libfoo.network=true}}
+# | |-- tvz-1.0.0.tar.gz -> toz == 0.2.0 {require {config.toz.extras=true}}
+# | |-- dex-1.0.0.tar.gz -> bar {require {config.bar.extras=true}},
+# | | libfoo {require {config.libfoo.extras=true}}
+# | |-- dix-1.0.0.tar.gz -> libbar {require {config.libbar.extras=true}},
+# | | libbox {require {config.libbox.extras=true}},
+# | | dox {require {config.dox.extras=true}}
+# | |-- diz-1.0.0.tar.gz -> dox {require {config.dox.extras=true}},
+# | | libbox {require {config.libbox.extras=true}},
+# | | libbar {require {config.libbar.extras=true}}
+# | |-- dox-1.0.0.tar.gz -> dex {require {config.dex.extras=true}}
+# | `-- repositories.manifest
+# |
+# |-- t12a
+# | |-- libbaz-1.0.0.tar.gz
+# | |-- libbar-0.1.0.tar.gz -> libbaz
+# | `-- repositories.manifest
+# |
+# |-- t12b -> t12a (prerequisite repository)
+# | |-- libbaz-0.1.0.tar.gz
+# | |-- libbar-1.0.0.tar.gz -> libbaz == 0.1.0
+# | |-- foo-0.1.0.tar.gz
+# | |-- foo-1.0.0.tar.gz -> libbar
+# | |-- bar-1.0.0.tar.gz -> libbar == 0.1.0
+# | |-- baz-0.1.0.tar.gz -> libbaz
+# | |-- baz-1.0.0.tar.gz -> libbaz == 1.0.0
+# | `-- repositories.manifest
+# |
+# |-- t13a
+# | |-- liba-0.1.0.tar.gz
+# | |-- bar-1.0.0.tar.gz -> liba {prefer {...} accept (...)}
+# | |-- baz-1.0.0.tar.gz -> liba {prefer {...} accept (...)}
+# | |-- biz-1.0.0.tar.gz -> liba {prefer {...} accept (...)}
+# | |-- box-1.0.0.tar.gz -> liba {prefer {} accept (true) reflect {...}}
+# | `-- repositories.manifest
+# |
+# |-- t13b
+# | |-- liba-0.1.0.tar.gz
+# | |-- bar-1.0.0.tar.gz -> liba {prefer {...} accept (...)}
+# | |-- baz-1.0.0.tar.gz -> liba {prefer {...} accept (...)}
+# | |-- biz-1.0.0.tar.gz -> liba {require {...}}
+# | `-- repositories.manifest
+# |
+# |-- t13c
+# | |-- liba-0.1.0.tar.gz
+# | |-- bar-1.0.0.tar.gz -> liba {prefer {...} accept (...) reflect {...}}
+# | |-- baz-1.0.0.tar.gz -> liba {require {...}}
+# | `-- repositories.manifest
+# |
+# |-- t13d
+# | |-- liba-0.1.0.tar.gz
+# | |-- libb-0.1.0.tar.gz
+# | |-- bar-1.0.0.tar.gz -> liba {prefer {...} accept (...) reflect {...}},
+# | | libb ? (...)
+# | |-- baz-1.0.0.tar.gz -> bar, liba {require {...}}
+# | `-- repositories.manifest
+# |
+# |-- t13e
+# | |-- liba-0.1.0.tar.gz
+# | |-- bar-1.0.0.tar.gz -> liba {prefer {...} accept (...)}
+# | |-- baz-1.0.0.tar.gz -> liba {prefer {...} accept (...)},
+# | | bar ? (...),
+# | | biz
+# | |-- biz-1.0.0.tar.gz -> liba {require {...}}
+# | `-- repositories.manifest
+# |
+# |-- t13f
+# | |-- liba-0.1.0.tar.gz
+# | |-- bar-1.0.0.tar.gz -> liba {prefer {...} accept (...)}
+# | |-- baz-1.0.0.tar.gz -> liba {prefer {...} accept (...)}
+# | `-- repositories.manifest
+# |
+# |-- t13g
+# | |-- liba-0.1.0.tar.gz
+# | |-- libb-0.1.0.tar.gz
+# | |-- bar-1.0.0.tar.gz -> liba {prefer {...} accept (...) reflect {...}},
+# | | libb {prefer {...} accept (...) reflect {...}}
+# | |-- baz-1.0.0.tar.gz -> liba {prefer {...} accept (...) reflect {...}},
+# | | libb ? (...)
+# | |-- biz-1.0.0.tar.gz -> liba {prefer {...} accept (...) reflect {...}},
+# | | libb {prefer {...} accept (...) reflect {...}}
+# | |-- box-1.0.0.tar.gz -> liba {prefer {...} accept (...) reflect {...}},
+# | | libb {prefer {...} accept (...) reflect {...}}
+# | `-- repositories.manifest
+# |
+# |-- t13h
+# | |-- liba-0.1.0.tar.gz
+# | |-- bar-1.0.0.tar.gz -> liba {prefer {...} accept (...)}
+# | |-- baz-1.0.0.tar.gz -> liba {prefer {...} accept (...)}
+# | `-- repositories.manifest
+# |
+# |-- t13i
+# | |-- liba-0.1.0.tar.gz
+# | |-- bar-1.0.0.tar.gz -> liba {require {...} reflect {...}}
+# | `-- repositories.manifest
+# |
+# |-- t13j
+# | |-- liba-0.1.0.tar.gz
+# | |-- bar-1.0.0.tar.gz -> liba {prefer {...} accept (...)}
+# | |-- baz-1.0.0.tar.gz -> liba {require {...}}
+# | |-- biz-1.0.0.tar.gz -> liba {prefer {...} accept (...)}
+# | `-- repositories.manifest
+# |
+# |-- t13k
+# | |-- liba-0.1.0.tar.gz
+# | |-- bar-1.0.0.tar.gz -> liba {prefer {...} accept (...)}
+# | |-- baz-1.0.0.tar.gz -> liba {prefer {...} accept (...)}
+# | `-- repositories.manifest
+# |
+# |-- t13l
+# | |-- liba-0.1.0.tar.gz
+# | |-- libb-0.1.0.tar.gz
+# | |-- bar-1.0.0.tar.gz -> liba {require {...}},
+# | | libb ? (...)
+# | |-- baz-1.0.0.tar.gz -> liba {prefer {...} accept (...)},
+# | | libb ? (...)
+# | `-- repositories.manifest
+# |
+# |-- t13m
+# | |-- liba-0.1.0.tar.gz
+# | |-- bar-1.0.0.tar.gz -> liba {require {...}}
+# | |-- baz-1.0.0.tar.gz -> liba {require {...}},
+# | | bar { enable (...) reflect {...}}
+# | |-- biz-1.0.0.tar.gz -> liba {require {...}}
+# | |-- bix-1.0.0.tar.gz -> liba {require {...}}
+# | |-- box-1.0.0.tar.gz -> liba {require {...}}
+# | `-- repositories.manifest
+# |
+# |-- t13n
+# | |-- liba-0.1.0.tar.gz
+# | |-- libb-0.1.0.tar.gz
+# | |-- bar-1.0.0.tar.gz -> liba {reflect {...}},
+# | | libb {reflect {...}}
+# | `-- repositories.manifest
+# |
+# |-- t13o
+# | |-- liba-0.1.0.tar.gz
+# | |-- bar-1.0.0.tar.gz -> liba {reflect {...}}
+# | |-- baz-1.0.0.tar.gz -> bar {require {...}}
+# | |-- biz-1.0.0.tar.gz -> bar {prefer {...} accept (...)}
+# | |-- bix-1.0.0.tar.gz -> bar {prefer {...} accept (...)}
+# | `-- repositories.manifest
+# |
+# |-- t14a
+# | |-- libfoo-1.0.0.tar.gz
+# | `-- repositories.manifest
+# |
+# |-- t14b
+# | |-- libfoo-1.1.0.tar.gz
+# | `-- repositories.manifest
+# |
+# |-- t14c
+# | |-- libfoo-1.1.0+1.tar.gz
+# | `-- repositories.manifest
+# |
+# |-- t14d
+# | |-- libfoo-1.1.0+2.tar.gz
+# | `-- repositories.manifest
+# |
+# |-- t14e
+# | |-- libfoo-1.1.0+3.tar.gz
+# | `-- repositories.manifest
+# |
+# |-- t14f
+# | |-- libfoo-1.1.1.tar.gz
+# | `-- repositories.manifest
+# |
+# |-- t14i
+# | |-- libfoo-1.2.0.tar.gz
+# | `-- repositories.manifest
# |
# `-- git
# |-- libbar.git -> style-basic.git (prerequisite repository)
@@ -125,20 +534,55 @@ posix = ($cxx.target.class != 'windows')
+if! $remote
rep_create += 2>!
- cp -r $src/t0a $out/t0a && $rep_create $out/t0a &$out/t0a/packages.manifest
- cp -r $src/t0b $out/t0b && $rep_create $out/t0b &$out/t0b/packages.manifest
- cp -r $src/t0c $out/t0c && $rep_create $out/t0c &$out/t0c/packages.manifest
- cp -r $src/t0d $out/t0d && $rep_create $out/t0d &$out/t0d/packages.manifest
- cp -r $src/t1 $out/t1 && $rep_create $out/t1 &$out/t1/packages.manifest
- cp -r $src/t2 $out/t2 && $rep_create $out/t2 &$out/t2/packages.manifest
- cp -r $src/t3 $out/t3 && $rep_create $out/t3 &$out/t3/packages.manifest
- cp -r $src/t4a $out/t4a && $rep_create $out/t4a &$out/t4a/packages.manifest
- cp -r $src/t4b $out/t4b && $rep_create $out/t4b &$out/t4b/packages.manifest
- cp -r $src/t4c $out/t4c && $rep_create $out/t4c &$out/t4c/packages.manifest
- cp -r $src/t4d $out/t4d && $rep_create $out/t4d &$out/t4d/packages.manifest
- cp -r $src/t4e $out/t4e && $rep_create $out/t4e &$out/t4e/packages.manifest
- cp -r $src/t5 $out/t5 && $rep_create $out/t5 &$out/t5/packages.manifest
- cp -r $src/t6 $out/t6 && $rep_create $out/t6 &$out/t6/packages.manifest
+ cp -r $src/t0a $out/t0a && $rep_create $out/t0a &$out/t0a/packages.manifest
+ cp -r $src/t0b $out/t0b && $rep_create $out/t0b &$out/t0b/packages.manifest
+ cp -r $src/t0c $out/t0c && $rep_create $out/t0c &$out/t0c/packages.manifest
+ cp -r $src/t0d $out/t0d && $rep_create $out/t0d &$out/t0d/packages.manifest
+ cp -r $src/t1 $out/t1 && $rep_create $out/t1 &$out/t1/packages.manifest
+ cp -r $src/t2 $out/t2 && $rep_create $out/t2 &$out/t2/packages.manifest
+ cp -r $src/t3 $out/t3 && $rep_create $out/t3 &$out/t3/packages.manifest
+ cp -r $src/t4a $out/t4a && $rep_create $out/t4a &$out/t4a/packages.manifest
+ cp -r $src/t4b $out/t4b && $rep_create $out/t4b &$out/t4b/packages.manifest
+ cp -r $src/t4c $out/t4c && $rep_create $out/t4c &$out/t4c/packages.manifest
+ cp -r $src/t4d $out/t4d && $rep_create $out/t4d &$out/t4d/packages.manifest
+ cp -r $src/t4e $out/t4e && $rep_create $out/t4e &$out/t4e/packages.manifest
+ cp -r $src/t4f $out/t4f && $rep_create $out/t4f &$out/t4f/packages.manifest
+ cp -r $src/t4i $out/t4i && $rep_create $out/t4i &$out/t4i/packages.manifest
+ cp -r $src/t4j $out/t4j && $rep_create $out/t4j &$out/t4j/packages.manifest
+ cp -r $src/t4k $out/t4k && $rep_create $out/t4k &$out/t4k/packages.manifest
+ cp -r $src/t5 $out/t5 && $rep_create $out/t5 &$out/t5/packages.manifest
+ cp -r $src/t6 $out/t6 && $rep_create $out/t6 &$out/t6/packages.manifest
+ cp -r $src/t7a $out/t7a && $rep_create $out/t7a &$out/t7a/packages.manifest
+ cp -r $src/t7b $out/t7b && $rep_create $out/t7b &$out/t7b/packages.manifest
+ cp -r $src/t8a $out/t8a && $rep_create $out/t8a &$out/t8a/packages.manifest
+ cp -r $src/t9 $out/t9 && $rep_create $out/t9 &$out/t9/packages.manifest
+ cp -r $src/t10 $out/t10 && $rep_create $out/t10 &$out/t10/packages.manifest
+ cp -r $src/t11a $out/t11a && $rep_create $out/t11a &$out/t11a/packages.manifest
+ cp -r $src/t12a $out/t12a && $rep_create $out/t12a &$out/t12a/packages.manifest
+ cp -r $src/t12b $out/t12b && $rep_create $out/t12b &$out/t12b/packages.manifest
+ cp -r $src/t13a $out/t13a && $rep_create $out/t13a &$out/t13a/packages.manifest
+ cp -r $src/t13b $out/t13b && $rep_create $out/t13b &$out/t13b/packages.manifest
+ cp -r $src/t13c $out/t13c && $rep_create $out/t13c &$out/t13c/packages.manifest
+ cp -r $src/t13d $out/t13d && $rep_create $out/t13d &$out/t13d/packages.manifest
+ cp -r $src/t13e $out/t13e && $rep_create $out/t13e &$out/t13e/packages.manifest
+ cp -r $src/t13f $out/t13f && $rep_create $out/t13f &$out/t13f/packages.manifest
+ cp -r $src/t13g $out/t13g && $rep_create $out/t13g &$out/t13g/packages.manifest
+ cp -r $src/t13h $out/t13h && $rep_create $out/t13h &$out/t13h/packages.manifest
+ cp -r $src/t13i $out/t13i && $rep_create $out/t13i &$out/t13i/packages.manifest
+ cp -r $src/t13j $out/t13j && $rep_create $out/t13j &$out/t13j/packages.manifest
+ cp -r $src/t13k $out/t13k && $rep_create $out/t13k &$out/t13k/packages.manifest
+ cp -r $src/t13l $out/t13l && $rep_create $out/t13l &$out/t13l/packages.manifest
+ cp -r $src/t13m $out/t13m && $rep_create $out/t13m &$out/t13m/packages.manifest
+ cp -r $src/t13n $out/t13n && $rep_create $out/t13n &$out/t13n/packages.manifest
+ cp -r $src/t13o $out/t13o && $rep_create $out/t13o &$out/t13o/packages.manifest
+ cp -r $src/t14a $out/t14a && $rep_create $out/t14a &$out/t14a/packages.manifest
+ cp -r $src/t14b $out/t14b && $rep_create $out/t14b &$out/t14b/packages.manifest
+ cp -r $src/t14c $out/t14c && $rep_create $out/t14c &$out/t14c/packages.manifest
+ cp -r $src/t14d $out/t14d && $rep_create $out/t14d &$out/t14d/packages.manifest
+ cp -r $src/t14e $out/t14e && $rep_create $out/t14e &$out/t14e/packages.manifest
+ cp -r $src/t14f $out/t14f && $rep_create $out/t14f &$out/t14f/packages.manifest
+ cp -r $src/t14i $out/t14i && $rep_create $out/t14i &$out/t14i/packages.manifest
+ cp -r $src/t15 $out/t15 && $rep_create $out/t15 &$out/t15/packages.manifest --ignore-unknown
# Create git repositories.
#
@@ -147,9 +591,13 @@ posix = ($cxx.target.class != 'windows')
$git_extract $src/git/libbaz.tar &$out_git/state1/***
end
-config_cxx = config.cxx=$quote($recall($cxx.path) $cxx.config.mode, true)
+config_cxx = [cmdline] config.cxx=$quote($recall($cxx.path) $cxx.config.mode, true)
+
+tar = [cmdline] ($posix ? tar : bsdtar)
-pkg_configure += -d cfg $config_cxx 2>!
+cfg_create += 2>!
+cfg_link += 2>!
+pkg_configure += -d cfg 2>!
pkg_disfigure += -d cfg
pkg_drop += -d cfg --yes 2>!
pkg_fetch += -d cfg 2>!
@@ -166,6 +614,10 @@ rep_list += -d cfg
#
test.options += --no-progress
+# Disable the use of the system package manager.
+#
+test.arguments += --sys-no-query
+
: libfoo
:
: Test building different versions of libfoo.
@@ -247,6 +699,41 @@ test.options += --no-progress
info: use 'bpkg rep-add' to add a repository
EOE
+ : mask-repository-not-found
+ :
+ $clone_root_cfg;
+ $* --mask-repository 'https://example.com/1' libfoo 2>>EOE != 0
+ error: repository 'https://example.com/1' cannot be masked: not found
+ EOE
+
+ : mask-repository-empty
+ :
+ $clone_root_cfg;
+ $* --mask-repository '' libfoo 2>>EOE != 0
+ error: repository '' cannot be masked: invalid repository location: empty URL
+ EOE
+
+ : mask-repository-uuid-db-not-found
+ :
+ $clone_root_cfg;
+ $* --mask-repository-uuid '00000000-0000-0000-0000-123456789012=repo' libfoo 2>>/EOE != 0
+ error: configuration repository '00000000-0000-0000-0000-123456789012=repo' cannot be masked: no configuration with uuid 00000000-0000-0000-0000-123456789012 is linked with cfg/
+ EOE
+
+ : mask-repository-uuid-empty
+ :
+ $clone_root_cfg;
+ $* --mask-repository-uuid "$cfg_uuid=" libfoo 2>>EOE != 0
+ error: configuration repository '00000000-0000-0000-0000-000000000001=' cannot be masked: invalid repository location '': empty URL
+ EOE
+
+ : mask-repository-uuid-not-found
+ :
+ $clone_root_cfg;
+ $* --mask-repository-uuid "$cfg_uuid=https://example.com/1" libfoo 2>>EOE != 0
+ error: configuration repository '00000000-0000-0000-0000-000000000001=https://example.com/1' cannot be masked: repository location 'https://example.com/1' not found in configuration 00000000-0000-0000-0000-000000000001
+ EOE
+
: archive
:
$clone_root_cfg;
@@ -404,10 +891,22 @@ test.options += --no-progress
:
$clone_root_cfg;
$* $src/libbar-1.0.0.tar.gz 2>>EOE != 0
- error: unknown dependency libfoo of package libbar
+ error: no package available for dependency libfoo of package libbar
info: while satisfying libbar/1.0.0
EOE
+ : unknown-dependency-config
+ :
+ : As above but with a linked configuration.
+ :
+ $clone_root_cfg;
+ $cfg_create -d cfg2 &cfg2/***;
+ $cfg_link -d cfg cfg2;
+ $* $src/libbar-1.0.0.tar.gz +{ --config-id 1 } 2>>~%EOE% != 0
+ %error: no package available for dependency libfoo of package libbar \[cfg2.\]%
+ %info: while satisfying libbar/1.0.0 \[cfg2.\]%
+ EOE
+
: t2
:
{
@@ -513,6 +1012,21 @@ test.options += --no-progress
EOO
}
+ : latest-config
+ :
+ : As above but with a linked configuration.
+ :
+ {
+ $clone_cfg;
+ $cfg_create -d cfg2 &cfg2/***;
+ $cfg_link -d cfg cfg2;
+
+ $* "libbar/1.1.0@$rep/t4e" +{ --config-id 1 } --trust-yes >>~%EOO% 2>!
+ %new libfoo/1.1.0\+1 \[cfg2.\] \(required by libbar \[cfg2.\]\)%
+ %new libbar/1.1.0 \[cfg2.\]%
+ EOO
+ }
+
: zero
:
{
@@ -727,7 +1241,7 @@ test.options += --no-progress
:
$clone_cfg;
$* libbaz >>EOO
- new libfoo/1.1.0 (required by libbar libbaz)
+ new libfoo/1.1.0 (required by libbar, libbaz)
new libbar/1.1.0 (required by libbaz)
new libbaz/1.1.0
EOO
@@ -741,19 +1255,146 @@ test.options += --no-progress
new libbaz/1.1.0
EOO
- : unable-satisfy
+ : libbaz-unable-satisfy
:
- $clone_cfg;
- $* libfoo/1.0.0 libbaz 2>>EOE != 0
- error: unable to satisfy constraints on package libfoo
- info: libbar depends on (libfoo == 1.1.0)
- info: command line depends on (libfoo == 1.0.0)
- info: available libfoo/1.1.0
- info: available libfoo/1.0.0
- info: explicitly specify libfoo version to manually satisfy both constraints
- info: while satisfying libbar/1.1.0
- info: while satisfying libbaz/1.1.0
- EOE
+ {
+ +$clone_cfg
+
+ : basic
+ :
+ {
+ $clone_cfg;
+
+ $* libfoo/1.0.0 libbaz 2>>EOE != 0
+ error: unable to satisfy constraints on package libfoo
+ info: command line depends on (libfoo == 1.0.0)
+ info: libbar/1.1.0 depends on (libfoo == 1.1.0)
+ info: available libfoo/1.0.0
+ info: available libfoo/1.1.0
+ info: while satisfying libbar/1.1.0
+ info: while satisfying libbaz/1.1.0
+ info: explicitly specify libfoo version to manually satisfy both constraints
+ EOE
+ }
+
+ : unable-satisfy-reorder
+ :
+ : As above but the packages are specified in a different order on the
+ : command line.
+ :
+ {
+ $clone_cfg;
+
+ $* libbaz libfoo/1.0.0 2>>EOE != 0
+ error: unable to satisfy constraints on package libfoo
+ info: command line depends on (libfoo == 1.0.0)
+ info: libbar/1.1.0 depends on (libfoo == 1.1.0)
+ info: available libfoo/1.0.0
+ info: available libfoo/1.1.0
+ info: while satisfying libbar/1.1.0
+ info: while satisfying libbaz/1.1.0
+ info: explicitly specify libfoo version to manually satisfy both constraints
+ EOE
+ }
+
+ : unable-satisfy-dependency
+ :
+ : As above but specify libfoo as a dependency.
+ :
+ {
+ $clone_cfg;
+
+ $* libbaz ?libfoo/1.0.0 2>>EOE != 0
+ error: unable to satisfy constraints on package libfoo
+ info: libbaz/1.1.0 depends on (libfoo == 1.0.0)
+ info: libbar/1.1.0 depends on (libfoo == 1.1.0)
+ info: available libfoo/1.0.0
+ info: available libfoo/1.1.0
+ info: while satisfying libbar/1.1.0
+ info: while satisfying libbaz/1.1.0
+ info: explicitly specify libfoo version to manually satisfy both constraints
+ EOE
+ }
+
+ : unable-satisfy-config
+ :
+ : As above but with a linked configuration.
+ :
+ {
+ $clone_cfg;
+ $cfg_create -d cfg2 &cfg2/***;
+ $cfg_link -d cfg cfg2;
+ $rep_add -d cfg2 $rep/t4c && $rep_fetch -d cfg2;
+ $* libbaz ?libbar +{ --config-id 1 } libfoo/1.0.0 +{ --config-id 1 } 2>>~%EOE% != 0
+ error: unable to satisfy constraints on package libfoo
+ info: command line depends on (libfoo == 1.0.0)
+ % info: libbar/1.1.0 \[cfg2.\] depends on \(libfoo == 1.1.0\)%
+ info: available libfoo/1.0.0
+ info: available libfoo/1.1.0
+ % info: while satisfying libbar/1.1.0 \[cfg2.\]%
+ info: while satisfying libbaz/1.1.0
+ info: explicitly specify libfoo version to manually satisfy both constraints
+ EOE
+ }
+ }
+
+ : libbar-unable-satisfy
+ :
+ {
+ +$clone_cfg
+ +$rep_add $rep/t4b && $rep_fetch
+
+ : basic
+ :
+ {
+ $clone_cfg;
+
+ $* libfoo/1.0.0 libbar 2>>EOE != 0
+ error: unable to satisfy constraints on package libfoo
+ info: command line depends on (libfoo == 1.0.0)
+ info: libbar/1.1.0 depends on (libfoo == 1.1.0)
+ info: available libfoo/1.0.0
+ info: available libfoo/1.1.0
+ info: while satisfying libbar/1.1.0
+ info: explicitly specify libfoo version to manually satisfy both constraints
+ EOE
+ }
+
+ : unable-satisfy-reorder
+ :
+ : As above but the packages are specified in a different order on the
+ : command line.
+ :
+ {
+ $clone_cfg;
+
+ $* libbar libfoo/1.0.0 2>>EOE != 0
+ error: unable to satisfy constraints on package libfoo
+ info: command line depends on (libfoo == 1.0.0)
+ info: libbar/1.1.0 depends on (libfoo == 1.1.0)
+ info: available libfoo/1.0.0
+ info: available libfoo/1.1.0
+ info: while satisfying libbar/1.1.0
+ info: explicitly specify libfoo version to manually satisfy both constraints
+ EOE
+ }
+
+ : unable-satisfy-dependency
+ :
+ : Similar to the above, but specify libfoo as a dependency.
+ :
+ {
+ $clone_cfg;
+
+ # Add libbaz, so that libfoo package is available from its dependents
+ # (libbaz) repositories.
+ #
+ $* libbar ?libfoo/1.0.0 libbaz 2>>EOE != 0
+ error: package libfoo doesn't satisfy its dependents
+ info: libfoo/1.0.0 doesn't satisfy libbar/1.1.0
+ EOE
+ }
+ }
: not-available
:
@@ -771,7 +1412,7 @@ test.options += --no-progress
$pkg_fetch -e $src/libfix-0.0.1.tar.gz && $pkg_unpack libfix;
$* libbaz >>EOO;
- upgrade libfoo/1.1.0 (required by libbar libbaz)
+ upgrade libfoo/1.1.0 (required by libbar, libbaz)
new libbar/1.1.0 (required by libbaz)
new libbaz/1.1.0
EOO
@@ -787,7 +1428,7 @@ test.options += --no-progress
$pkg_fetch -e $src/libfoo-1.2.0.tar.gz && $pkg_unpack libfoo;
$* libbaz >>EOO;
- downgrade libfoo/1.1.0 (required by libbar libbaz)
+ downgrade libfoo/1.1.0 (required by libbar, libbaz)
new libbar/1.1.0 (required by libbaz)
new libbaz/1.1.0
EOO
@@ -810,8 +1451,9 @@ test.options += --no-progress
:
{
$clone_cfg;
- $pkg_fetch libfoo/1.1.0 && $pkg_unpack libfoo && $pkg_configure libfoo;
- $pkg_fetch libbar/1.1.0 && $pkg_unpack libbar && $pkg_configure libbar;
+
+ $rep_add $rep/t4a $rep/t4b && $rep_fetch;
+ $pkg_build libfoo/1.1.0 libbar/1.1.0 -d cfg 2>!;
$* libfoo-1.2.0.tar.gz 2>>EOE != 0;
error: unknown package libfoo-1.2.0.tar.gz
@@ -819,11 +1461,52 @@ test.options += --no-progress
$* libfoo/1.0.0 2>>EOE != 0;
error: unable to downgrade package libfoo/1.1.0 to 1.0.0
- info: because package libbar depends on (libfoo == 1.1.0)
- info: explicitly request up/downgrade of package libbar
+ info: because configured package libbar/1.1.0 depends on (libfoo == 1.1.0)
+ info: re-run with -v for additional dependency information
+ info: consider re-trying with --upgrade|-u potentially combined with --recursive|-r
+ info: or explicitly request up/downgrade of package libbar
+ info: or explicitly specify package libfoo version to manually satisfy these constraints
+ EOE
+
+ $* libfoo/1.1.0 --keep-unused >'update libfoo/1.1.0';
+
+ $pkg_disfigure libbar 2>'disfigured libbar/1.1.0';
+ $pkg_purge libbar 2>'purged libbar/1.1.0';
+
+ $pkg_disfigure libfoo 2>'disfigured libfoo/1.1.0';
+ $pkg_purge libfoo 2>'purged libfoo/1.1.0'
+ }
+
+ : unable-downgrade-config
+ :
+ : As above but with a linked configuration.
+ :
+ {
+ $clone_cfg;
+ $cfg_create -d cfg2 &cfg2/***;
+ $rep_add -d cfg2 $rep/t4a $rep/t4b $rep/t4c && $rep_fetch -d cfg2;
+ $cfg_link -d cfg2 cfg;
+
+ $rep_add $rep/t4a $rep/t4b && $rep_fetch;
+ $pkg_build libfoo/1.1.0 libbar/1.1.0 -d cfg 2>!;
+
+ $* libfoo-1.2.0.tar.gz 2>>EOE != 0;
+ error: unknown package libfoo-1.2.0.tar.gz
+ EOE
+
+ test.arguments = $regex.apply($test.arguments, cfg, cfg2);
+
+ $* libfoo/1.0.0 +{ --config-id 1 } 2>>~%EOE% != 0;
+ %error: unable to downgrade package libfoo/1.1.0 \[cfg.\] to 1.0.0%
+ % info: because configured package libbar/1.1.0 \[cfg.\] depends on \(libfoo == 1.1.0\)%
+ info: re-run with -v for additional dependency information
+ info: consider re-trying with --upgrade|-u potentially combined with --recursive|-r
+ info: or explicitly request up/downgrade of package libbar
info: or explicitly specify package libfoo version to manually satisfy these constraints
EOE
+ test.arguments = $regex.apply($test.arguments, cfg2, cfg);
+
$* libfoo/1.1.0 --keep-unused >'update libfoo/1.1.0';
$pkg_disfigure libbar 2>'disfigured libbar/1.1.0';
@@ -832,6 +1515,49 @@ test.options += --no-progress
$pkg_disfigure libfoo 2>'disfigured libfoo/1.1.0';
$pkg_purge libfoo 2>'purged libfoo/1.1.0'
}
+
+ : able-downgrade
+ :
+ : Similar to the above unable-downgrade, but this time libfoo and libbar
+ : are configured manually and so are not held. Thus, libfoo downgrades
+ : successfully since libbar is just dropped having no dependents.
+ :
+ {
+ $clone_cfg;
+
+ $pkg_fetch libfoo/1.1.0 && $pkg_unpack libfoo && $pkg_configure libfoo;
+ $pkg_fetch libbar/1.1.0 && $pkg_unpack libbar && $pkg_configure libbar;
+
+ $* libfoo/1.0.0 >>EOO;
+ downgrade libfoo/1.0.0
+ drop libbar/1.1.0 (unused)
+ EOO
+
+ $pkg_drop libbar libfoo
+ }
+
+ : able-downgrade-config
+ :
+ : As above but with a linked configuration.
+ :
+ {
+ $clone_cfg;
+ $cfg_create -d cfg2 &cfg2/***;
+ $rep_add -d cfg2 $rep/t4c && $rep_fetch -d cfg2;
+ $cfg_link -d cfg2 cfg;
+
+ $pkg_fetch libfoo/1.1.0 && $pkg_unpack libfoo && $pkg_configure libfoo;
+ $pkg_fetch libbar/1.1.0 && $pkg_unpack libbar && $pkg_configure libbar;
+
+ test.arguments = $regex.apply($test.arguments, cfg, cfg2);
+
+ $* libfoo/1.0.0 +{ --config-id 1 } >>~%EOO%;
+ %downgrade libfoo/1.0.0 \[cfg.\]%
+ %drop libbar/1.1.0 \[cfg.\] \(unused\)%
+ EOO
+
+ $pkg_drop libbar libfoo
+ }
}
: dependent-reconfiguration
@@ -862,7 +1588,7 @@ test.options += --no-progress
$* libbar >>EOO
upgrade libfoo/1.1.0 (required by libbar)
upgrade libbar/1.1.0
- reconfigure libbaz (dependent of libbar libfoo)
+ reconfigure libbaz (dependent of libbar, libfoo)
EOO
: foo
@@ -871,7 +1597,7 @@ test.options += --no-progress
$* libfoo >>EOO
upgrade libfoo/1.1.0
reconfigure libbar (dependent of libfoo)
- reconfigure libbaz (dependent of libbar libfoo)
+ reconfigure libbaz (dependent of libbar, libfoo)
EOO
: foo-bar
@@ -880,7 +1606,7 @@ test.options += --no-progress
$* libfoo libbar/1.0.0 >>EOO
upgrade libfoo/1.1.0
reconfigure/update libbar/1.0.0
- reconfigure libbaz (dependent of libbar libfoo)
+ reconfigure libbaz (dependent of libbar, libfoo)
EOO
: bar-foo
@@ -889,7 +1615,7 @@ test.options += --no-progress
$* libbar/1.0.0 libfoo >>EOO
upgrade libfoo/1.1.0
reconfigure/update libbar/1.0.0
- reconfigure libbaz (dependent of libbar libfoo)
+ reconfigure libbaz (dependent of libbar, libfoo)
EOO
: baz-foo
@@ -1113,7 +1839,6 @@ test.options += --no-progress
$pkg_status libfoo >'!libfoo configured 1.0.0 available [1.1.0]';
$* libbaz 2>>~%EOE%;
- warning: package libbar dependency on (libfoo == 1.1.0) is forcing upgrade of libfoo/1.0.0 to 1.1.0
disfigured libfoo/1.0.0
fetched libfoo/1.1.0
unpacked libfoo/1.1.0
@@ -1166,6 +1891,38 @@ test.options += --no-progress
$pkg_disfigure libfoo 2>'disfigured libfoo/1.0.0';
$pkg_purge libfoo 2>'purged libfoo/1.0.0'
}
+
+ : forcing-upgrade-held-config
+ :
+ : As above but with a linked configuration.
+ :
+ {
+ $clone_cfg;
+ $cfg_create -d cfg2 &cfg2/***;
+ $cfg_link -d cfg cfg2;
+ $rep_fetch -d cfg2 $rep/t4c;
+
+ $* libfoo/1.0.0 +{ --config-id 1 } 2>>~%EOE%;
+ %fetched libfoo/1.0.0 \[cfg2.\]%
+ %unpacked libfoo/1.0.0 \[cfg2.\]%
+ %configured libfoo/1.0.0 \[cfg2.\]%
+ %info: .+ is up to date%
+ %updated libfoo/1.0.0 \[cfg2.\]%
+ EOE
+
+ $pkg_status -d cfg2 libfoo >'!libfoo configured !1.0.0 available [1.1.0]';
+
+ $* libbaz ?libbar +{ --config-id 1 } 2>>~%EOE% != 0;
+ %error: package libbar \[cfg2.\] dependency on \(libfoo == 1.1.0\) is forcing upgrade of libfoo/1.0.0 \[cfg2.\] to 1.1.0%
+ % info: package version libfoo/1.0.0 \[cfg2.\] is held%
+ info: explicitly request version upgrade to continue
+ %info: while satisfying libbar/1.1.0 \[cfg2.\]%
+ info: while satisfying libbaz/1.1.0
+ EOE
+
+ $pkg_disfigure -d cfg2 libfoo 2>'disfigured libfoo/1.0.0';
+ $pkg_purge -d cfg2 libfoo 2>'purged libfoo/1.0.0'
+ }
}
: drop-dependencies
@@ -1200,7 +1957,7 @@ test.options += --no-progress
updated libbar/1.2.0
EOE
- $pkg_status libfoo >'libfoo available 1.0.0';
+ $pkg_status libfoo >'libfoo available 1.0.0 0.1.0';
$pkg_status libbar >'!libbar configured 1.2.0';
$* libbar/1.0.0 libfoo 2>>~%EOE%;
@@ -1317,6 +2074,17 @@ test.options += --no-progress
$pkg_status libbar >'!libbar configured 1.2.0';
+ # While at it, test using --mask-repository* instead of rep-remove.
+ #
+ $* --upgrade --mask-repository $rep/t2 --mask-repository $rep/t5 2>>/EOE != 0;
+ error: libbar is not available
+ EOE
+
+ $* --upgrade --mask-repository-uuid "$cfg_uuid=($rep/t2)" \
+ --mask-repository-uuid "$cfg_uuid=($rep/t5)" 2>>/EOE != 0;
+ error: libbar is not available
+ EOE
+
$rep_remove $rep/t2 $rep/t5;
$* --upgrade 2>>/EOE != 0;
@@ -1327,6 +2095,687 @@ test.options += --no-progress
$pkg_drop libbar
}
+
+ : foo-baz-box-bar
+ :
+ : Test build scenarios described in
+ : https://github.com/conan-io/conan/issues/9547.
+ :
+ if (!$remote)
+ {
+ +mkdir 1/
+
+ # Repository state 0: initial (see the above t9 directory tree for details).
+
+ # Repository state 1: libbar/2.0.0 is released.
+ #
+ r = 1/t9-1
+ +cp -r $rep/t9 $r && \
+ $tar xzf $r/libbar-1.0.0.tar.gz -C $r && \
+ mv $r/libbar-1.0.0 $r/libbar-2.0.0 && \
+ sed -i -e 's/(version:).+/\1 2.0.0/' $r/libbar-2.0.0/manifest && \
+ $tar cfz $r/libbar-2.0.0.tar.gz -C $r libbar-2.0.0 &$r/libbar-2.0.0.tar.gz && \
+ rm -r $r/libbar-2.0.0 && \
+ $rep_create $r &$r/packages.manifest
+
+ # Repository state 2: libbaz/2.0.0 is released, which depends on
+ # libbar ^2.0.0.
+ #
+ r = 1/t9-2
+ +cp -r 1/t9-1 $r && \
+ $tar xzf $r/libbaz-1.0.0.tar.gz -C $r && \
+ mv $r/libbaz-1.0.0 $r/libbaz-2.0.0 && \
+ sed -i -e 's/(version:).+/\1 2.0.0/' -e 's/(depends: libbar).+/\1 ^2.0.0/' $r/libbaz-2.0.0/manifest && \
+ $tar cfz $r/libbaz-2.0.0.tar.gz -C $r libbaz-2.0.0 &$r/libbaz-2.0.0.tar.gz && \
+ rm -r $r/libbaz-2.0.0 && \
+ $rep_create $r &$r/packages.manifest
+
+ # Repository state 3: libbox/1.1.0 is released, which still depends on
+ # libbar ^1.0.0.
+ #
+ r = 1/t9-3
+ +cp -r 1/t9-2 $r && \
+ $tar xzf $r/libbox-1.0.0.tar.gz -C $r && \
+ mv $r/libbox-1.0.0 $r/libbox-1.1.0 && \
+ sed -i -e 's/(version:).+/\1 1.1.0/' $r/libbox-1.1.0/manifest && \
+ $tar cfz $r/libbox-1.1.0.tar.gz -C $r libbox-1.1.0 &$r/libbox-1.1.0.tar.gz && \
+ rm -r $r/libbox-1.1.0 && \
+ $rep_create $r &$r/packages.manifest
+
+ : repo-state-1
+ :
+ {
+ : build
+ :
+ {
+ $clone_root_cfg;
+ $rep_add ../../1/t9-1 && $rep_fetch;
+
+ $* foo 2>!;
+ $pkg_status -r >>EOO;
+ !foo configured 1.0.0
+ libbaz configured 1.0.0
+ libbar configured 1.0.0 available 2.0.0
+ libbox configured 1.0.0
+ libbar configured 1.0.0 available 2.0.0
+ EOO
+
+ $pkg_drop foo
+ }
+
+ : upgrade
+ :
+ : Test upgrade of packages (initially built from the repository state 0)
+ : after the state 1 is fetched.
+ :
+ {
+ $clone_root_cfg;
+
+ mkdir 1/;
+ ln -s $rep/t9 1/repo;
+ $rep_add 1/repo && $rep_fetch;
+
+ $* foo 2>!;
+ $pkg_status -r >>EOO;
+ !foo configured 1.0.0
+ libbaz configured 1.0.0
+ libbar configured 1.0.0
+ libbox configured 1.0.0
+ libbar configured 1.0.0
+ EOO
+
+ rm 1/repo;
+ ln -s ../../../1/t9-1 1/repo;
+
+ $rep_fetch;
+
+ $* foo 2>!; # Noop.
+ $pkg_status -r >>EOO;
+ !foo configured 1.0.0
+ libbaz configured 1.0.0
+ libbar configured 1.0.0 available 2.0.0
+ libbox configured 1.0.0
+ libbar configured 1.0.0 available 2.0.0
+ EOO
+
+ $* ?libbar 2>!; # Noop.
+ $pkg_status -r >>EOO;
+ !foo configured 1.0.0
+ libbaz configured 1.0.0
+ libbar configured 1.0.0 available 2.0.0
+ libbox configured 1.0.0
+ libbar configured 1.0.0 available 2.0.0
+ EOO
+
+ $* ?libbaz 2>!; # Noop.
+ $pkg_status -r >>EOO;
+ !foo configured 1.0.0
+ libbaz configured 1.0.0
+ libbar configured 1.0.0 available 2.0.0
+ libbox configured 1.0.0
+ libbar configured 1.0.0 available 2.0.0
+ EOO
+
+ $pkg_drop foo
+ }
+ }
+
+ : repo-state-2
+ :
+ {
+ : build
+ :
+ {
+ $clone_root_cfg;
+ $rep_add ../../1/t9-2 && $rep_fetch;
+
+ # Picks up the latest libbaz (2.0.0) as a prerequisite for foo, which
+ # leads to the conflict between libbaz/2.0.0 and libbox/1.0.0 about
+ # prerequisite libbar because of the incompatible version constraints.
+ # This get automatically resolved by the unsatisfied constraints
+ # resolution machinery.
+ #
+ $* foo --plan '' --verbose 5 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add foo/1.0.0
+ trace: collect_build_prerequisites: begin foo/1.0.0
+ trace: collect_build: add libbaz/2.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbaz/2.0.0 of dependent foo/1.0.0
+ trace: collect_build_prerequisites: begin libbaz/2.0.0
+ trace: collect_build: add libbar/2.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/2.0.0 of dependent libbaz/2.0.0
+ trace: collect_build_prerequisites: begin libbar/2.0.0
+ trace: collect_build_prerequisites: end libbar/2.0.0
+ trace: collect_build_prerequisites: end libbaz/2.0.0
+ trace: collect_build: add libbox/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbox/1.0.0 of dependent foo/1.0.0
+ trace: collect_build_prerequisites: begin libbox/1.0.0
+ trace: collect_build: postpone failure for dependent libbox unsatisfied with dependency libbar/2.0.0 (^1.0.0)
+ trace: collect_build: pick libbar/2.0.0 over libbar/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/2.0.0 of dependent libbox/1.0.0
+ trace: collect_build_prerequisites: end libbox/1.0.0
+ trace: collect_build_prerequisites: end foo/1.0.0
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: pkg_build: try to replace unsatisfactory dependency libbar/2.0.0 with some other version
+ trace: try_replace_dependent: try to replace unsatisfied dependent libbox/1.0.0 of dependency libbar/2.0.0 with some other version
+ trace: try_replace_dependent: try to replace conflicting dependent libbaz/2.0.0 of dependency libbar/2.0.0 with some other version
+ trace: try_replace_dependency: replace conflicting dependent version libbaz/2.0.0 with 1.0.0 by adding package spec '?libbaz == 1.0.0' to command line
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add foo/1.0.0
+ trace: collect_build_prerequisites: begin foo/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbaz/1.0.0 of dependent foo/1.0.0
+ trace: collect_build_prerequisites: begin libbaz/1.0.0
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/1.0.0 of dependent libbaz/1.0.0
+ trace: collect_build_prerequisites: begin libbar/1.0.0
+ trace: collect_build_prerequisites: end libbar/1.0.0
+ trace: collect_build_prerequisites: end libbaz/1.0.0
+ trace: collect_build: add libbox/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbox/1.0.0 of dependent foo/1.0.0
+ trace: collect_build_prerequisites: begin libbox/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/1.0.0 of dependent libbox/1.0.0
+ trace: collect_build_prerequisites: end libbox/1.0.0
+ trace: collect_build_prerequisites: end foo/1.0.0
+ trace: execute_plan: simulate: yes
+ %.*
+ new libbar/1.0.0 (required by libbaz, libbox)
+ new libbaz/1.0.0 (required by foo)
+ new libbox/1.0.0 (required by foo)
+ new foo/1.0.0
+ trace: execute_plan: simulate: no
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !foo configured 1.0.0
+ libbaz configured 1.0.0 available 2.0.0
+ libbar configured 1.0.0 available 2.0.0
+ libbox configured 1.0.0
+ libbar configured 1.0.0 available 2.0.0
+ EOO
+
+ $pkg_drop foo
+ }
+
+ : upgrade
+ :
+ : Test upgrade of packages (initially built from the repository state 0)
+ : after the state 2 is fetched.
+ :
+ {
+ $clone_root_cfg;
+
+ mkdir 1/;
+ ln -s $rep/t9 1/repo;
+ $rep_add 1/repo && $rep_fetch;
+
+ $* foo 2>!;
+ $pkg_status -r >>EOO;
+ !foo configured 1.0.0
+ libbaz configured 1.0.0
+ libbar configured 1.0.0
+ libbox configured 1.0.0
+ libbar configured 1.0.0
+ EOO
+
+ rm 1/repo;
+ ln -s ../../../1/t9-2 1/repo;
+
+ $rep_fetch;
+
+ $* foo 2>!; # Noop.
+ $pkg_status -r >>EOO;
+ !foo configured 1.0.0
+ libbaz configured 1.0.0 available 2.0.0
+ libbar configured 1.0.0 available 2.0.0
+ libbox configured 1.0.0
+ libbar configured 1.0.0 available 2.0.0
+ EOO
+
+ $* ?libbar 2>!; # Noop.
+ $pkg_status -r >>EOO;
+ !foo configured 1.0.0
+ libbaz configured 1.0.0 available 2.0.0
+ libbar configured 1.0.0 available 2.0.0
+ libbox configured 1.0.0
+ libbar configured 1.0.0 available 2.0.0
+ EOO
+
+ # Initially unable to satisfy the dependent libbox with an upgraded
+ # (due to libbaz 2.0.0) prerequisite libbar/2.0.0. But this get
+ # automatically resolved by the unsatisfied constraints resolution
+ # machinery.
+ #
+ $* ?libbaz --plan '' --verbose 5 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: execute_plan: simulate: yes
+ trace: evaluate_dependency: libbaz/1.0.0: update to libbaz/2.0.0
+ trace: pkg_build: refine package collection/plan execution
+ trace: collect_build_prerequisites: pre-reeval foo/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated foo/1.0.0: end reached
+ trace: collect_build_prerequisites: begin libbaz/2.0.0
+ trace: collect_build: add libbar/2.0.0
+ info: package libbaz dependency on (libbar ^2.0.0) is forcing upgrade of libbar/1.0.0 to 2.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/2.0.0 of dependent libbaz/2.0.0
+ trace: collect_build_prerequisites: skip being built existing dependent libbaz of dependency libbar
+ trace: collect_build_prerequisites: skip unsatisfied existing dependent libbox of dependency libbar/2.0.0 due to constraint (libbar ^1.0.0)
+ trace: collect_build_prerequisites: begin libbar/2.0.0
+ trace: collect_build_prerequisites: end libbar/2.0.0
+ trace: collect_build_prerequisites: end libbaz/2.0.0
+ trace: collect_dependents: postpone failure for existing dependent libbox unsatisfied with dependency libbar/2.0.0 (^1.0.0)
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: pkg_build: try to replace unsatisfactory dependency libbar/2.0.0 with some other version
+ trace: try_replace_dependent: try to replace unsatisfiable dependent libbaz/2.0.0 of dependency libbar/2.0.0 with some other version
+ trace: try_replace_dependency: replace unsatisfiable dependent version libbaz/2.0.0 with 1.0.0 by adding constraint '?libbaz' -> '?libbaz == 1.0.0' on command line
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: execute_plan: simulate: no
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !foo configured 1.0.0
+ libbaz configured 1.0.0 available 2.0.0
+ libbar configured 1.0.0 available 2.0.0
+ libbox configured 1.0.0
+ libbar configured 1.0.0 available 2.0.0
+ EOO
+
+ $pkg_drop foo
+ }
+ }
+
+ : repo-state-3
+ :
+ {
+ : build
+ :
+ {
+ $clone_root_cfg;
+ $rep_add ../../1/t9-3 && $rep_fetch;
+
+ # Similar to the repository state 2, picks up the latest libbaz
+ # (2.0.0) as a prerequisite for foo, which leads to the conflict,
+ # which is resolved automatically.
+ #
+ $* foo --plan '' --verbose 5 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add foo/1.0.0
+ trace: collect_build_prerequisites: begin foo/1.0.0
+ trace: collect_build: add libbaz/2.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbaz/2.0.0 of dependent foo/1.0.0
+ trace: collect_build_prerequisites: begin libbaz/2.0.0
+ trace: collect_build: add libbar/2.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/2.0.0 of dependent libbaz/2.0.0
+ trace: collect_build_prerequisites: begin libbar/2.0.0
+ trace: collect_build_prerequisites: end libbar/2.0.0
+ trace: collect_build_prerequisites: end libbaz/2.0.0
+ trace: collect_build: add libbox/1.1.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbox/1.1.0 of dependent foo/1.0.0
+ trace: collect_build_prerequisites: begin libbox/1.1.0
+ trace: collect_build: postpone failure for dependent libbox unsatisfied with dependency libbar/2.0.0 (^1.0.0)
+ trace: collect_build: pick libbar/2.0.0 over libbar/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/2.0.0 of dependent libbox/1.1.0
+ trace: collect_build_prerequisites: end libbox/1.1.0
+ trace: collect_build_prerequisites: end foo/1.0.0
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: pkg_build: try to replace unsatisfactory dependency libbar/2.0.0 with some other version
+ trace: try_replace_dependent: try to replace unsatisfied dependent libbox/1.1.0 of dependency libbar/2.0.0 with some other version
+ trace: try_replace_dependency: replace unsatisfied dependent version libbox/1.1.0 with 1.0.0 by adding package spec '?libbox == 1.0.0' to command line
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add foo/1.0.0
+ trace: collect_build_prerequisites: begin foo/1.0.0
+ trace: collect_build: add libbaz/2.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbaz/2.0.0 of dependent foo/1.0.0
+ trace: collect_build_prerequisites: begin libbaz/2.0.0
+ trace: collect_build: add libbar/2.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/2.0.0 of dependent libbaz/2.0.0
+ trace: collect_build_prerequisites: begin libbar/2.0.0
+ trace: collect_build_prerequisites: end libbar/2.0.0
+ trace: collect_build_prerequisites: end libbaz/2.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbox/1.0.0 of dependent foo/1.0.0
+ trace: collect_build_prerequisites: begin libbox/1.0.0
+ trace: collect_build: postpone failure for dependent libbox unsatisfied with dependency libbar/2.0.0 (^1.0.0)
+ trace: collect_build: pick libbar/2.0.0 over libbar/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/2.0.0 of dependent libbox/1.0.0
+ trace: collect_build_prerequisites: end libbox/1.0.0
+ trace: collect_build_prerequisites: end foo/1.0.0
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: pkg_build: try to replace unsatisfactory dependency libbar/2.0.0 with some other version
+ trace: try_replace_dependent: try to replace unsatisfied dependent libbox/1.0.0 of dependency libbar/2.0.0 with some other version
+ trace: try_replace_dependency: replacement of unsatisfied dependent version libbox/1.0.0 is denied since it is specified on command line as '?libbox == 1.0.0'
+ trace: try_replace_dependent: try to replace conflicting dependent libbaz/2.0.0 of dependency libbar/2.0.0 with some other version
+ trace: try_replace_dependency: replace conflicting dependent version libbaz/2.0.0 with 1.0.0 by adding package spec '?libbaz == 1.0.0' to command line
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add foo/1.0.0
+ trace: collect_build_prerequisites: begin foo/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbaz/1.0.0 of dependent foo/1.0.0
+ trace: collect_build_prerequisites: begin libbaz/1.0.0
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/1.0.0 of dependent libbaz/1.0.0
+ trace: collect_build_prerequisites: begin libbar/1.0.0
+ trace: collect_build_prerequisites: end libbar/1.0.0
+ trace: collect_build_prerequisites: end libbaz/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbox/1.0.0 of dependent foo/1.0.0
+ trace: collect_build_prerequisites: begin libbox/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/1.0.0 of dependent libbox/1.0.0
+ trace: collect_build_prerequisites: end libbox/1.0.0
+ trace: collect_build_prerequisites: end foo/1.0.0
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: pkg_build: start command line adjustments refinement cycle by rolling back first adjustment ('?libbox == 1.0.0')
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add foo/1.0.0
+ trace: collect_build_prerequisites: begin foo/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbaz/1.0.0 of dependent foo/1.0.0
+ trace: collect_build_prerequisites: begin libbaz/1.0.0
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/1.0.0 of dependent libbaz/1.0.0
+ trace: collect_build_prerequisites: begin libbar/1.0.0
+ trace: collect_build_prerequisites: end libbar/1.0.0
+ trace: collect_build_prerequisites: end libbaz/1.0.0
+ trace: collect_build: add libbox/1.1.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbox/1.1.0 of dependent foo/1.0.0
+ trace: collect_build_prerequisites: begin libbox/1.1.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/1.0.0 of dependent libbox/1.1.0
+ trace: collect_build_prerequisites: end libbox/1.1.0
+ trace: collect_build_prerequisites: end foo/1.0.0
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: pkg_build: command line adjustment '?libbox == 1.0.0' is redundant, dropping it
+ new libbar/1.0.0 (required by libbaz, libbox)
+ new libbaz/1.0.0 (required by foo)
+ new libbox/1.1.0 (required by foo)
+ new foo/1.0.0
+ trace: execute_plan: simulate: no
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !foo configured 1.0.0
+ libbaz configured 1.0.0 available 2.0.0
+ libbar configured 1.0.0 available 2.0.0
+ libbox configured 1.1.0
+ libbar configured 1.0.0 available 2.0.0
+ EOO
+
+ $pkg_drop foo
+ }
+
+ : upgrade
+ :
+ : Test upgrade of packages (initially built from the repository state 0)
+ : after the state 3 is fetched.
+ :
+ {
+ $clone_root_cfg;
+
+ mkdir 1/;
+ ln -s $rep/t9 1/repo;
+ $rep_add 1/repo && $rep_fetch;
+
+ $* foo 2>!;
+ $pkg_status -r >>EOO;
+ !foo configured 1.0.0
+ libbaz configured 1.0.0
+ libbar configured 1.0.0
+ libbox configured 1.0.0
+ libbar configured 1.0.0
+ EOO
+
+ rm 1/repo;
+ ln -s ../../../1/t9-3 1/repo;
+
+ $rep_fetch;
+
+ $* foo 2>!; # Noop.
+ $pkg_status -r >>EOO;
+ !foo configured 1.0.0
+ libbaz configured 1.0.0 available 2.0.0
+ libbar configured 1.0.0 available 2.0.0
+ libbox configured 1.0.0 available 1.1.0
+ libbar configured 1.0.0 available 2.0.0
+ EOO
+
+ $* ?libbar 2>!; # Noop.
+ $pkg_status -r >>EOO;
+ !foo configured 1.0.0
+ libbaz configured 1.0.0 available 2.0.0
+ libbar configured 1.0.0 available 2.0.0
+ libbox configured 1.0.0 available 1.1.0
+ libbar configured 1.0.0 available 2.0.0
+ EOO
+
+ test.arguments += --plan '' --verbose 5;
+
+ # Initially, unable to satisfy the dependent libbox with an upgraded
+ # (due to libbaz 2.0.0) prerequisite libbar/2.0.0. But this get
+ # automatically resolved by the unsatisfied constraints resolution
+ # machinery.
+ #
+ $* foo ?libbaz 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add foo/1.0.0
+ trace: collect_build_prerequisites: skip configured foo/1.0.0
+ trace: execute_plan: simulate: yes
+ trace: evaluate_dependency: libbaz/1.0.0: update to libbaz/2.0.0
+ trace: pkg_build: refine package collection/plan execution
+ trace: collect_build_prerequisites: pre-reeval foo/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated foo/1.0.0: end reached
+ trace: collect_build_prerequisites: begin libbaz/2.0.0
+ trace: collect_build: add libbar/2.0.0
+ info: package libbaz dependency on (libbar ^2.0.0) is forcing upgrade of libbar/1.0.0 to 2.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/2.0.0 of dependent libbaz/2.0.0
+ trace: collect_build_prerequisites: skip being built existing dependent libbaz of dependency libbar
+ trace: collect_build_prerequisites: skip unsatisfied existing dependent libbox of dependency libbar/2.0.0 due to constraint (libbar ^1.0.0)
+ trace: collect_build_prerequisites: begin libbar/2.0.0
+ trace: collect_build_prerequisites: end libbar/2.0.0
+ trace: collect_build_prerequisites: end libbaz/2.0.0
+ trace: collect_dependents: postpone failure for existing dependent libbox unsatisfied with dependency libbar/2.0.0 (^1.0.0)
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: pkg_build: try to replace unsatisfactory dependency libbar/2.0.0 with some other version
+ trace: try_replace_dependent: try to replace unsatisfiable dependent libbaz/2.0.0 of dependency libbar/2.0.0 with some other version
+ trace: try_replace_dependency: replace unsatisfiable dependent version libbaz/2.0.0 with 1.0.0 by adding constraint '?libbaz' -> '?libbaz == 1.0.0' on command line
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add foo/1.0.0
+ trace: collect_build_prerequisites: skip configured foo/1.0.0
+ trace: execute_plan: simulate: yes
+ %.*
+ update foo/1.0.0
+ trace: execute_plan: simulate: no
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !foo configured 1.0.0
+ libbaz configured 1.0.0 available 2.0.0
+ libbar configured 1.0.0 available 2.0.0
+ libbox configured 1.0.0 available 1.1.0
+ libbar configured 1.0.0 available 2.0.0
+ EOO
+
+ # libbar/2.0.0 is still unsatisfactory, initially, as a prerequisite
+ # for libbox, even after libbox upgrade is requested on the command
+ # line. This get automatically resolved by the unsatisfied constraints
+ # resolution machinery.
+ #
+ $* foo ?libbaz ?libbox 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add foo/1.0.0
+ trace: collect_build_prerequisites: skip configured foo/1.0.0
+ trace: execute_plan: simulate: yes
+ trace: evaluate_dependency: libbaz/1.0.0: update to libbaz/2.0.0
+ trace: evaluate_dependency: libbox/1.0.0: update to libbox/1.1.0
+ trace: pkg_build: refine package collection/plan execution
+ trace: collect_build_prerequisites: pre-reeval foo/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated foo/1.0.0: end reached
+ trace: collect_build_prerequisites: begin libbaz/2.0.0
+ trace: collect_build: add libbar/2.0.0
+ info: package libbaz dependency on (libbar ^2.0.0) is forcing upgrade of libbar/1.0.0 to 2.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/2.0.0 of dependent libbaz/2.0.0
+ trace: collect_build_prerequisites: skip being built existing dependent libbaz of dependency libbar
+ trace: collect_build_prerequisites: skip unsatisfied existing dependent libbox of dependency libbar/2.0.0 due to constraint (libbar ^1.0.0)
+ trace: collect_build_prerequisites: begin libbar/2.0.0
+ trace: collect_build_prerequisites: end libbar/2.0.0
+ trace: collect_build_prerequisites: end libbaz/2.0.0
+ trace: collect_build_prerequisites: pre-reeval foo/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated foo/1.0.0: end reached
+ trace: collect_build_prerequisites: begin libbox/1.1.0
+ trace: collect_build: postpone failure for dependent libbox unsatisfied with dependency libbar/2.0.0 (^1.0.0)
+ trace: collect_build: pick libbar/2.0.0 over libbar/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/2.0.0 of dependent libbox/1.1.0
+ trace: collect_build_prerequisites: end libbox/1.1.0
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: pkg_build: try to replace unsatisfactory dependency libbar/2.0.0 with some other version
+ trace: try_replace_dependent: try to replace unsatisfied dependent libbox/1.1.0 of dependency libbar/2.0.0 with some other version
+ trace: try_replace_dependency: replace unsatisfied dependent version libbox/1.1.0 with 1.0.0 by adding constraint '?libbox' -> '?libbox == 1.0.0' on command line
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add foo/1.0.0
+ trace: collect_build_prerequisites: skip configured foo/1.0.0
+ trace: execute_plan: simulate: yes
+ trace: evaluate_dependency: libbaz/1.0.0: update to libbaz/2.0.0
+ %.*
+ trace: pkg_build: refine package collection/plan execution
+ trace: collect_build_prerequisites: pre-reeval foo/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated foo/1.0.0: end reached
+ trace: collect_build_prerequisites: begin libbaz/2.0.0
+ trace: collect_build: add libbar/2.0.0
+ info: package libbaz dependency on (libbar ^2.0.0) is forcing upgrade of libbar/1.0.0 to 2.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/2.0.0 of dependent libbaz/2.0.0
+ trace: collect_build_prerequisites: skip being built existing dependent libbaz of dependency libbar
+ trace: collect_build_prerequisites: skip unsatisfied existing dependent libbox of dependency libbar/2.0.0 due to constraint (libbar ^1.0.0)
+ trace: collect_build_prerequisites: begin libbar/2.0.0
+ trace: collect_build_prerequisites: end libbar/2.0.0
+ trace: collect_build_prerequisites: end libbaz/2.0.0
+ trace: collect_dependents: postpone failure for existing dependent libbox unsatisfied with dependency libbar/2.0.0 (^1.0.0)
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: pkg_build: try to replace unsatisfactory dependency libbar/2.0.0 with some other version
+ trace: try_replace_dependent: try to replace unsatisfiable dependent libbaz/2.0.0 of dependency libbar/2.0.0 with some other version
+ trace: try_replace_dependency: replace unsatisfiable dependent version libbaz/2.0.0 with 1.0.0 by adding constraint '?libbaz' -> '?libbaz == 1.0.0' on command line
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add foo/1.0.0
+ trace: collect_build_prerequisites: skip configured foo/1.0.0
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: pkg_build: start command line adjustments refinement cycle by rolling back first adjustment ('?libbox' -> '?libbox == 1.0.0')
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add foo/1.0.0
+ trace: collect_build_prerequisites: skip configured foo/1.0.0
+ trace: execute_plan: simulate: yes
+ trace: evaluate_dependency: libbaz/1.0.0: unchanged
+ trace: evaluate_dependency: libbox/1.0.0: update to libbox/1.1.0
+ trace: pkg_build: refine package collection/plan execution
+ trace: collect_build_prerequisites: pre-reeval foo/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated foo/1.0.0: end reached
+ trace: collect_build_prerequisites: begin libbox/1.1.0
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/1.0.0 of dependent libbox/1.1.0
+ trace: collect_build_prerequisites: skip configured libbar/1.0.0
+ trace: collect_build_prerequisites: end libbox/1.1.0
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: pkg_build: command line adjustment '?libbox' -> '?libbox == 1.0.0' is redundant, dropping it
+ upgrade libbox/1.1.0
+ reconfigure/update foo/1.0.0
+ trace: execute_plan: simulate: no
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !foo configured 1.0.0
+ libbaz configured 1.0.0 available 2.0.0
+ libbar configured 1.0.0 available 2.0.0
+ libbox configured 1.1.0
+ libbar configured 1.0.0 available 2.0.0
+ EOO
+
+ $pkg_drop foo
+ }
+ }
+ }
+
+ : config-vars
+ :
+ {
+ +$cfg_create -d cfg cc config.cc.coptions=-Wall 2>- &cfg/***
+ +$rep_add $rep/t5 && $rep_fetch
+
+ : override-package-specific
+ :
+ {
+ $clone_cfg;
+
+ $* --configure-only { config.cc.coptions+=-g }+ libbar \
+ { config.cc.coptions+=-O }+ libbox 2>>EOE;
+ fetched libbar/1.2.0
+ unpacked libbar/1.2.0
+ fetched libbox/1.2.0
+ unpacked libbox/1.2.0
+ configured libbar/1.2.0
+ configured libbox/1.2.0
+ EOE
+
+ cat cfg/build/config.build >>~%EOO%;
+ %.*
+ config.cc.coptions = -Wall
+ %.*
+ EOO
+
+ cat cfg/libbar-1.2.0/build/config.build >>~%EOO%;
+ %.*
+ config.cc.coptions = -Wall -g
+ %.*
+ EOO
+
+ cat cfg/libbox-1.2.0/build/config.build >>~%EOO%;
+ %.*
+ config.cc.coptions = -Wall -O
+ %.*
+ EOO
+
+ $pkg_drop libbar libbox
+ }
+
+ : override-globally
+ :
+ {
+ $clone_cfg;
+
+ $* --configure-only config.cc.coptions+=-g \
+ config.cc.coptions+=-O -- libbar 2>>EOE;
+ fetched libbar/1.2.0
+ unpacked libbar/1.2.0
+ configured libbar/1.2.0
+ EOE
+
+ cat cfg/build/config.build >>~%EOO%;
+ %.*
+ config.cc.coptions = -Wall
+ %.*
+ EOO
+
+ cat cfg/libbar-1.2.0/build/config.build >>~%EOO%;
+ %.*
+ config.cc.coptions = -Wall -g -O
+ %.*
+ EOO
+
+ $pkg_drop libbar
+ }
+ }
}
: dependency
@@ -1427,7 +2876,10 @@ test.options += --no-progress
$rep_fetch $rep/t0c;
$* '?libbux' 2>'error: unknown package libbux' != 0;
- $* '?sys:libbux' 2>'error: unknown package sys:libbux' != 0;
+ $* '?sys:libbux' 2>>EOE != 0;
+ error: unknown package sys:libbux
+ info: consider specifying sys:libbux/*
+ EOE
$* '?libbar/1.3' 2>'error: unknown package libbar/1.3' != 0;
$* '?libbar[5 7]' 2>"error: unknown package 'libbar [5 7]'" != 0
}
@@ -1447,8 +2899,7 @@ test.options += --no-progress
unpacked libbox/0.0.1
configured libbaz/0.0.4
configured libbox/0.0.1
- %info: .+ is up to date%{2}
- updated libbaz/0.0.4
+ %info: .+libbox.+ is up to date%
updated libbox/0.0.1
EOE
@@ -1470,8 +2921,7 @@ test.options += --no-progress
unpacked libbaz/0.1.0
configured libbaz/0.1.0
configured libbox/0.0.1
- %info: .+ is up to date%{2}
- updated libbaz/0.1.0
+ %info: .+libbox.+ is up to date%
updated libbox/0.0.1
EOE
@@ -1492,20 +2942,26 @@ test.options += --no-progress
$rep_fetch;
$* libfoo 2>>~%EOE% != 0;
- error: unknown dependency libhello >= 1.0 of package libfoo
+ error: no package available for dependency libhello of package libfoo
%.+
EOE
- $* libfoo '?sys:libhello' 2>'error: unknown package sys:libhello' != 0;
+ $* libfoo '?sys:libhello' 2>>EOE != 0;
+ error: unknown package sys:libhello
+ info: consider specifying sys:libhello/*
+ EOE
- $* "?sys:libhello/2.0@$rep/t0a" --trust-yes 2>>~%EOE% != 0;
+ $* "sys:libhello/2.0@$rep/t0a" --trust-yes 2>>~%EOE% != 0;
%.+
%error: package sys:libhello/2\.0 is not found in .+t0a%
EOE
- $* libfoo '?sys:libhello/0.1' 2>>~%EOE% != 0;
+ $* libfoo '?sys:libhello/0.1' 2>>EOE != 0;
error: unable to satisfy constraints on package libhello
- %.+
+ info: libfoo depends on (libhello >= 1.0)
+ info: command line depends on (libhello == 0.1)
+ info: specify libhello version to satisfy libfoo constraint
+ info: while satisfying libfoo/1.1.0
EOE
$* libfoo '?sys:libhello/*' 2>>~%EOE%;
@@ -1570,7 +3026,7 @@ test.options += --no-progress
configured libbar/1.2.0
EOE
- $pkg_status libfoo >'libfoo available 1.0.0';
+ $pkg_status libfoo >'libfoo available 1.0.0 0.1.0';
$pkg_disfigure libbar 2>'disfigured libbar/1.2.0';
$pkg_purge libbar 2>'purged libbar/1.2.0'
@@ -1609,6 +3065,43 @@ test.options += --no-progress
$pkg_purge libfoo 2>'purged libfoo/1.0.0'
}
+ : order-drop
+ :
+ {
+ test.arguments += --yes
+
+ $clone_root_cfg;
+ $rep_fetch $rep/t2 $rep/t3;
+
+ $* libbaz libbar 2>!;
+
+ $pkg_status -r >>EOO;
+ !libbar configured 1.0.0
+ libfoo configured 1.0.0
+ !libbaz configured 1.0.0
+ !libbar configured 1.0.0
+ libfoo configured 1.0.0
+ EOO
+
+ $* ?libbaz ?libfoo/0.1.0 2>>EOE;
+ disfigured libbaz/1.0.0
+ disfigured libbar/1.0.0
+ disfigured libfoo/1.0.0
+ fetched libfoo/0.1.0
+ unpacked libfoo/0.1.0
+ purged libbaz/1.0.0
+ configured libfoo/0.1.0
+ configured libbar/1.0.0
+ EOE
+
+ $pkg_status -r >>EOO;
+ !libbar configured 1.0.0
+ libfoo configured !0.1.0 available 1.0.0
+ EOO
+
+ $pkg_drop libbar
+ }
+
: drop-recursive
:
{
@@ -1626,10 +3119,10 @@ test.options += --no-progress
disfigured libbar/0.0.1
disfigured libbaz/0.0.1
disfigured libfox/0.0.1
- fetched libfoo/1.0.0
- unpacked libfoo/1.0.0
purged libfox/0.0.1
purged libbaz/0.0.1
+ fetched libfoo/1.0.0
+ unpacked libfoo/1.0.0
fetched libbar/1.0.0
unpacked libbar/1.0.0
configured libfoo/1.0.0
@@ -1689,20 +3182,33 @@ test.options += --no-progress
$clone_root_cfg;
$rep_fetch $rep/t0a $rep/t0b;
+ # Note that before we have implemented the unsatisfied constraints
+ # resolution this command has failed as follows:
+ #
+ # error: unable to satisfy constraints on package libbaz
+ # info: libbar depends on (libbaz == 0.0.1)
+ # command line requires (libbar == 0.0.1)
+ # info: command line depends on (libbaz == 0.0.2)
+ # info: specify libbaz version to satisfy libbar constraint
+ # info: while satisfying libbar/0.0.1
+ #
$* libbar/0.0.1 ?libbaz/0.0.2 2>>EOE != 0;
- error: unable to satisfy constraints on package libbaz
- info: libbar depends on (libbaz == 0.0.1)
- info: command line depends on (libbaz == 0.0.2)
- info: specify libbaz version to satisfy libbar constraint
- info: while satisfying libbar/0.0.1
+ error: libbaz/0.0.2 is not available from its dependents' repositories
EOE
+ # Note that before we have implemented the unsatisfied constraints
+ # resolution this command has failed as follows:
+ #
+ # error: unable to satisfy constraints on package libbaz
+ # info: libbar depends on (libbaz == 0.0.1)
+ # command line requires (libbar == 0.0.1)
+ # info: command line depends on (libbaz >= 0.0.2)
+ # info: specify libbaz version to satisfy libbar constraint
+ # info: while satisfying libbar/0.0.1
+ #
$* -- libbar/0.0.1 '?libbaz>=0.0.2' 2>>EOE != 0
- error: unable to satisfy constraints on package libbaz
- info: libbar depends on (libbaz == 0.0.1)
- info: command line depends on (libbaz >= 0.0.2)
- info: specify libbaz version to satisfy libbar constraint
- info: while satisfying libbar/0.0.1
+ error: package libbaz doesn't satisfy its dependents
+ info: libbaz/0.0.3 doesn't satisfy libbar/0.0.1
EOE
}
@@ -1712,40 +3218,61 @@ test.options += --no-progress
: satisfy-dependents
:
: Test resolving a conflict when libfix and libbiz have selected such
- : versions of their dependency libbaz, that do not satisfy each other
- : constraints. We resolve the conflict explicitly specifying
+ : versions of their dependency libbaz, that don't satisfy each other
+ : constraints. We resolve the conflict automatically as if by specifying
: ?libbaz/0.0.3 on the command line, which satisfies both constraints.
:
{
$clone_root_cfg;
$rep_fetch $rep/t0b $rep/t0c;
- $* libfix libbiz 2>>EOE != 0;
- error: unable to satisfy constraints on package libbaz
- info: libfix depends on (libbaz >= 0.0.3)
- info: libbiz depends on (libbaz <= 0.0.3)
- info: available libbaz/0.1.0
- info: available libbaz/0.0.2
- info: explicitly specify libbaz version to manually satisfy both constraints
- info: while satisfying libbiz/0.0.2
- EOE
-
- $* libfix libbiz ?libbaz/0.0.3 2>>EOE;
- fetched libfoo/1.0.0
- unpacked libfoo/1.0.0
- fetched libbaz/0.0.3
- unpacked libbaz/0.0.3
- fetched libfix/0.0.3
- unpacked libfix/0.0.3
- fetched libbiz/0.0.2
- unpacked libbiz/0.0.2
- configured libfoo/1.0.0
- configured libbaz/0.0.3
- configured libfix/0.0.3
- configured libbiz/0.0.2
+ $* libfix libbiz --plan '' --verbose 5 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libfix/0.0.3
+ trace: collect_build: add libbiz/0.0.2
+ trace: collect_build_prerequisites: begin libfix/0.0.3
+ trace: collect_build: add libbaz/0.1.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbaz/0.1.0 of dependent libfix/0.0.3
+ trace: collect_build_prerequisites: begin libbaz/0.1.0
+ trace: collect_build_prerequisites: end libbaz/0.1.0
+ trace: collect_build_prerequisites: end libfix/0.0.3
+ trace: collect_build_prerequisites: begin libbiz/0.0.2
+ trace: collect_build: postpone failure for dependent libbiz unsatisfied with dependency libbaz/0.1.0 (<= 0.0.3)
+ trace: collect_build: pick libbaz/0.1.0 over libbaz/0.0.2
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbaz/0.1.0 of dependent libbiz/0.0.2
+ trace: collect_build_prerequisites: end libbiz/0.0.2
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: pkg_build: try to replace unsatisfactory dependency libbaz/0.1.0 with some other version
+ trace: try_replace_dependency: replace unsatisfactory dependency version libbaz/0.1.0 with 0.0.3 by adding package spec '?libbaz == 0.0.3' to command line
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libfix/0.0.3
+ trace: collect_build: add libbiz/0.0.2
+ trace: collect_build_prerequisites: begin libfix/0.0.3
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbaz/0.0.3 of dependent libfix/0.0.3
+ trace: collect_build_prerequisites: begin libbaz/0.0.3
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libfoo/1.0.0 of dependent libbaz/0.0.3
+ trace: collect_build_prerequisites: begin libfoo/1.0.0
+ trace: collect_build_prerequisites: end libfoo/1.0.0
+ trace: collect_build_prerequisites: end libbaz/0.0.3
+ trace: collect_build_prerequisites: end libfix/0.0.3
+ trace: collect_build_prerequisites: begin libbiz/0.0.2
+ trace: collect_build: pick libbaz/0.0.3 over libbaz/0.0.2
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbaz/0.0.3 of dependent libbiz/0.0.2
+ trace: collect_build_prerequisites: end libbiz/0.0.2
+ trace: execute_plan: simulate: yes
+ %.*
+ new libfoo/1.0.0 (required by libbaz)
+ new libbaz/0.0.3 (required by libbiz, libfix)
+ new libfix/0.0.3
+ new libbiz/0.0.2
+ trace: execute_plan: simulate: no
+ %.*
EOE
- $pkg_status libbaz >'libbaz configured !0.0.3 available 0.1.0 0.0.4';
+ $pkg_status libbaz >'libbaz configured 0.0.3 available 0.1.0 0.0.4';
$pkg_drop libbiz libfix
}
@@ -1787,40 +3314,71 @@ test.options += --no-progress
: Test resolving a conflict when libbox and libfox have selected such
: versions of their dependency libfoo, that do not satisfy each other
: constraints. Note that these constraints are incompatible, so we
- : resolve the conflict explicitly specifying ?libfox/0.0.1 on the
- : command line, to replace one of the conflicting dependents.
+ : automatically resolve the conflict by implicitly specifying
+ : ?libfox/0.0.1 on the command line, to replace one of the conflicting
+ : dependents.
:
{
$clone_root_cfg;
$rep_fetch $rep/t0d;
- $* libbiz 2>>EOE != 0;
- error: unable to satisfy constraints on package libfoo
- info: libbox depends on (libfoo == 1.0.0)
- info: libfox depends on (libfoo == 0.0.1)
- info: available libfoo/1.0.0
- info: available libfoo/0.0.1
- info: explicitly specify libfoo version to manually satisfy both constraints
- info: while satisfying libbox/0.0.2
- info: while satisfying libbiz/0.0.1
- EOE
-
- $* libbiz ?libfox/0.0.1 2>>EOE;
- fetched libfox/0.0.1
- unpacked libfox/0.0.1
- fetched libfoo/1.0.0
- unpacked libfoo/1.0.0
- fetched libbox/0.0.2
- unpacked libbox/0.0.2
- fetched libbiz/0.0.1
- unpacked libbiz/0.0.1
- configured libfox/0.0.1
- configured libfoo/1.0.0
- configured libbox/0.0.2
- configured libbiz/0.0.1
+ $* libbiz --plan "" --verbose 5 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libbiz/0.0.1
+ trace: collect_build_prerequisites: begin libbiz/0.0.1
+ trace: collect_build: add libfox/0.0.2
+ trace: collect_build_prerequisites: no cfg-clause for dependency libfox/0.0.2 of dependent libbiz/0.0.1
+ trace: collect_build_prerequisites: begin libfox/0.0.2
+ trace: collect_build: add libfoo/0.0.1
+ trace: collect_build_prerequisites: no cfg-clause for dependency libfoo/0.0.1 of dependent libfox/0.0.2
+ trace: collect_build_prerequisites: begin libfoo/0.0.1
+ trace: collect_build: add libfix/0.0.1
+ trace: collect_build_prerequisites: no cfg-clause for dependency libfix/0.0.1 of dependent libfoo/0.0.1
+ trace: collect_build_prerequisites: begin libfix/0.0.1
+ trace: collect_build_prerequisites: end libfix/0.0.1
+ trace: collect_build_prerequisites: end libfoo/0.0.1
+ trace: collect_build_prerequisites: end libfox/0.0.2
+ trace: collect_build: add libbox/0.0.2
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbox/0.0.2 of dependent libbiz/0.0.1
+ trace: collect_build_prerequisites: begin libbox/0.0.2
+ trace: collect_build: postpone failure for dependent libbox unsatisfied with dependency libfoo/0.0.1 (== 1.0.0)
+ trace: collect_build: pick libfoo/0.0.1 over libfoo/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libfoo/0.0.1 of dependent libbox/0.0.2
+ trace: collect_build_prerequisites: end libbox/0.0.2
+ trace: collect_build_prerequisites: end libbiz/0.0.1
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: pkg_build: try to replace unsatisfactory dependency libfoo/0.0.1 with some other version
+ trace: try_replace_dependent: try to replace unsatisfied dependent libbox/0.0.2 of dependency libfoo/0.0.1 with some other version
+ trace: try_replace_dependent: try to replace conflicting dependent libfox/0.0.2 of dependency libfoo/0.0.1 with some other version
+ trace: try_replace_dependency: replace conflicting dependent version libfox/0.0.2 with 0.0.1 by adding package spec '?libfox == 0.0.1' to command line
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libbiz/0.0.1
+ trace: collect_build_prerequisites: begin libbiz/0.0.1
+ trace: collect_build_prerequisites: no cfg-clause for dependency libfox/0.0.1 of dependent libbiz/0.0.1
+ trace: collect_build_prerequisites: begin libfox/0.0.1
+ trace: collect_build_prerequisites: end libfox/0.0.1
+ trace: collect_build: add libbox/0.0.2
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbox/0.0.2 of dependent libbiz/0.0.1
+ trace: collect_build_prerequisites: begin libbox/0.0.2
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libfoo/1.0.0 of dependent libbox/0.0.2
+ trace: collect_build_prerequisites: begin libfoo/1.0.0
+ trace: collect_build_prerequisites: end libfoo/1.0.0
+ trace: collect_build_prerequisites: end libbox/0.0.2
+ trace: collect_build_prerequisites: end libbiz/0.0.1
+ trace: execute_plan: simulate: yes
+ %.*
+ new libfox/0.0.1 (required by libbiz)
+ new libfoo/1.0.0 (required by libbox)
+ new libbox/0.0.2 (required by libbiz)
+ new libbiz/0.0.1
+ trace: execute_plan: simulate: no
+ %.*
EOE
- $pkg_status libfox >'libfox configured !0.0.1 available 0.0.2';
+ $pkg_status libfox >'libfox configured 0.0.1 available 0.0.2';
$pkg_drop libbiz
}
@@ -1911,12 +3469,273 @@ test.options += --no-progress
$rep_fetch $rep/t0a $rep/t0c;
$* libbar/1.0.0 ?libfoo/0.0.1 2>>EOE != 0
- error: unknown dependency libfoo == 0.0.1 of package libbar
+ error: unable to satisfy dependency constraint (libfoo == 0.0.1) of package libbar
+ info: available libfoo versions: 1.0.0
info: while satisfying libbar/1.0.0
EOE
}
}
+ : reconfigure
+ :
+ {
+ test.arguments += --yes --configure-only
+
+ +$clone_root_cfg
+ +$rep_fetch $rep/t8a
+
+ : deps-with-buildfile-clause
+ :
+ {
+ $clone_cfg;
+
+ $* dox 2>!;
+
+ $pkg_status -r >>EOO;
+ !dox configured 1.0.0
+ dax configured 1.0.0
+ libbaz configured 1.1.0
+ EOO
+
+ $* ?dax; # Noop.
+
+ $* { config.dax.extras=true }+ ?dax 2>>EOE;
+ disfigured dox/1.0.0
+ disfigured dax/1.0.0
+ fetched libbar/1.0.0
+ unpacked libbar/1.0.0
+ configured libbar/1.0.0
+ configured dax/1.0.0
+ configured dox/1.0.0
+ EOE
+
+ cat cfg/dax-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.dax.extras = true
+ %.*
+ EOO
+
+ $* ?dax; # Noop.
+
+ cat cfg/dax-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.dax.extras = true
+ %.*
+ EOO
+
+ $pkg_status -r >>EOO;
+ !dox configured 1.0.0
+ dax configured 1.0.0
+ libbar configured 1.0.0
+ libbaz configured 1.1.0
+ EOO
+
+ $* { config.dax.extras=true }+ ?dax 2>>EOE;
+ disfigured dox/1.0.0
+ disfigured dax/1.0.0
+ configured dax/1.0.0
+ configured dox/1.0.0
+ EOE
+
+ $pkg_status -r >>EOO;
+ !dox configured 1.0.0
+ dax configured 1.0.0
+ libbar configured 1.0.0
+ libbaz configured 1.1.0
+ EOO
+
+ $* { config.dax.extras=false }+ ?dax 2>>EOE;
+ disfigured dox/1.0.0
+ disfigured dax/1.0.0
+ disfigured libbar/1.0.0
+ purged libbar/1.0.0
+ configured dax/1.0.0
+ configured dox/1.0.0
+ EOE
+
+ cat cfg/dax-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.dax.extras = false
+ %.*
+ EOO
+
+ $pkg_status -r >>EOO;
+ !dox configured 1.0.0
+ dax configured 1.0.0
+ libbaz configured 1.1.0
+ EOO
+
+ # While at it, test that an attempt to reconfigure an orphan dependency
+ # which has its own dependencies with buildfile clauses fails.
+ #
+ $rep_remove $rep/t8a;
+
+ $* { config.dax.extras=true }+ ?dax 2>>/EOE != 0;
+ error: unknown package dax
+ info: configuration cfg/ has no repositories
+ info: use 'bpkg rep-add' to add a repository
+ EOE
+
+ cp -rp cfg/dax-1.0.0/ dax;
+
+ $rep_add --type dir "$~/dax";
+ $rep_fetch;
+
+ $* { config.dax.extras=true }+ ?dax 2>>EOE != 0;
+ error: package dax/1.0.0 is orphaned
+ info: explicitly upgrade it to a new version
+ info: while satisfying dax/1.0.0
+ EOE
+
+ $pkg_drop dox
+ }
+
+ : deps-without-buildfile-clause
+ :
+ {
+ $clone_cfg;
+
+ $* foz 2>!;
+
+ $pkg_status -r >>EOO;
+ !foz configured 1.0.0
+ fuz configured 1.0.0
+ libfoo configured 2.0.0
+ EOO
+
+ $* ?fuz; # Noop.
+
+ cat cfg/fuz-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.fuz.extras = false
+ %.*
+ EOO
+
+ $* { config.fuz.extras=true }+ ?fuz 2>>EOE;
+ disfigured foz/1.0.0
+ disfigured fuz/1.0.0
+ configured fuz/1.0.0
+ configured foz/1.0.0
+ EOE
+
+ cat cfg/fuz-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.fuz.extras = true
+ %.*
+ EOO
+
+ $* ?fuz; # Noop.
+
+ cat cfg/fuz-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.fuz.extras = true
+ %.*
+ EOO
+
+ $* { config.fuz.extras=false }+ ?fuz 2>>EOE;
+ disfigured foz/1.0.0
+ disfigured fuz/1.0.0
+ configured fuz/1.0.0
+ configured foz/1.0.0
+ EOE
+
+ cat cfg/fuz-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.fuz.extras = false
+ %.*
+ EOO
+
+ # While at it, test that we can also reconfigure an orphan with its own
+ # dependencies but without buildfile clauses.
+ #
+ $rep_remove $rep/t8a;
+
+ cp -rp cfg/fuz-1.0.0/ fuz;
+ sed -i -e 's/(version:) 1.0.0/\1 2.0.0/' fuz/manifest;
+
+ $rep_add --type dir "$~/fuz";
+ $rep_fetch;
+
+ $* { config.fuz.extras=true }+ ?fuz 2>>EOE;
+ disfigured foz/1.0.0
+ disfigured fuz/1.0.0
+ configured fuz/1.0.0
+ configured foz/1.0.0
+ EOE
+
+ cat cfg/fuz-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.fuz.extras = true
+ %.*
+ EOO
+
+ $pkg_status -r >>EOO;
+ !foz configured 1.0.0
+ fuz configured 1.0.0 available 2.0.0
+ libfoo configured 2.0.0
+ EOO
+
+ $pkg_drop foz
+ }
+
+ : no-deps
+ :
+ {
+ $clone_cfg;
+
+ $* fuz 2>!;
+
+ $* ?libfoo; # Noop.
+
+ cat cfg/libfoo-2.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.libfoo.protocol = 2
+ %.*
+ EOO
+
+ $* { config.libfoo.protocol=1 }+ ?libfoo 2>>EOE;
+ disfigured fuz/1.0.0
+ disfigured libfoo/2.0.0
+ configured libfoo/2.0.0
+ configured fuz/1.0.0
+ EOE
+
+ cat cfg/libfoo-2.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.libfoo.protocol = 1
+ %.*
+ EOO
+
+ $* ?libfoo; # Noop.
+
+ # While at it, test that we can also reconfigure an orphan without
+ # dependencies.
+ #
+ $rep_remove $rep/t8a;
+
+ cp -rp cfg/libfoo-2.0.0/ libfoo;
+ sed -i -e 's/(version:) 2.0.0/\1 3.0.0/' libfoo/manifest;
+
+ $rep_add --type dir "$~/libfoo";
+ $rep_fetch;
+
+ $* { config.libfoo.protocol=3 }+ ?libfoo 2>>EOE;
+ disfigured fuz/1.0.0
+ disfigured libfoo/2.0.0
+ configured libfoo/2.0.0
+ configured fuz/1.0.0
+ EOE
+
+ cat cfg/libfoo-2.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.libfoo.protocol = 3
+ %.*
+ EOO
+
+ $pkg_drop fuz
+ }
+ }
+
: refine
:
{
@@ -2170,6 +3989,76 @@ test.options += --no-progress
$pkg_drop libbar
}
+
+ : sys-to-src-unhold
+ :
+ {
+ $clone_cfg;
+
+ $* 'sys:libbaz/1.2.0' 2>>EOE;
+ configured sys:libbaz/1.2.0
+ EOE
+
+ $* ?libbaz libbar/0.0.3 2>>EOE;
+ purged libbaz/1.2.0
+ fetched libbaz/0.1.0
+ unpacked libbaz/0.1.0
+ fetched libbar/0.0.3
+ unpacked libbar/0.0.3
+ configured libbaz/0.1.0
+ configured libbar/0.0.3
+ EOE
+
+ $pkg_drop libbar libbaz
+ }
+
+ : sys-to-src-unhold-same-ver
+ :
+ : Same as above but the version does not change.
+ :
+ {
+ $clone_cfg;
+
+ $* 'sys:libbaz/0.1.0' 2>>EOE;
+ configured sys:libbaz/0.1.0
+ EOE
+
+ $* ?libbaz libbar/0.0.3 2>>EOE;
+ purged libbaz/0.1.0
+ fetched libbaz/0.1.0
+ unpacked libbaz/0.1.0
+ fetched libbar/0.0.3
+ unpacked libbar/0.0.3
+ configured libbaz/0.1.0
+ configured libbar/0.0.3
+ EOE
+
+ $pkg_drop libbar libbaz
+ }
+
+ : sys-to-src
+ :
+ : As above but keep held.
+ :
+ {
+ $clone_cfg;
+
+ $* 'sys:libbaz/1.2.0' 2>>EOE;
+ configured sys:libbaz/1.2.0
+ EOE
+
+ $* libbaz libbar/0.0.3 2>>EOE;
+ purged libbaz/1.2.0
+ fetched libbaz/0.1.0
+ unpacked libbaz/0.1.0
+ fetched libbar/0.0.3
+ unpacked libbar/0.0.3
+ configured libbaz/0.1.0
+ configured libbar/0.0.3
+ EOE
+
+ $pkg_drop libbar libbaz
+ }
}
: source
@@ -2262,6 +4151,71 @@ test.options += --no-progress
$pkg_drop libbar libbox
}
+ : satisfy-masked
+ :
+ : As above but using --mask-repository* instead of rep-remove.
+ :
+ {
+ $clone_cfg;
+ $rep_fetch $rep/t0b;
+
+ $* libbar/0.0.1 2>!;
+
+ $pkg_status libbaz >'libbaz configured 0.0.1 available 0.1.0 0.0.4 0.0.3 0.0.2';
+
+ $* libbar/0.0.2 ?libbaz 2>>EOE;
+ disfigured libbar/0.0.1
+ disfigured libbaz/0.0.1
+ disfigured libfox/0.0.1
+ purged libfox/0.0.1
+ fetched libfoo/1.0.0
+ unpacked libfoo/1.0.0
+ fetched libbaz/0.0.2
+ unpacked libbaz/0.0.2
+ fetched libbar/0.0.2
+ unpacked libbar/0.0.2
+ configured libfoo/1.0.0
+ configured libbaz/0.0.2
+ configured libbar/0.0.2
+ EOE
+
+ $pkg_status libbaz >'libbaz configured 0.0.2 available 0.1.0 0.0.4 0.0.3';
+
+ # Test that the selected package, that is "better" than the available
+ # one, is left.
+ #
+ $* --mask-repository $rep/t0b libbox ?libbaz 2>>EOE;
+ fetched libbox/0.0.1
+ unpacked libbox/0.0.1
+ configured libbox/0.0.1
+ EOE
+
+ $pkg_status libbaz >'libbaz configured 0.0.2 available 0.1.0 0.0.4 0.0.3';
+
+ # Test that the selected package is left as there is no satisfactory
+ # available package.
+ #
+ $* --mask-repository $rep/t0b --mask-repository-uuid "$cfg_uuid=($rep/t0a)" ?libbaz;
+
+ # Test that the above behavior is not triggered for the system package.
+ #
+ $* --mask-repository $rep/t0b --mask-repository $rep/t0a '?sys:libbaz' 2>>EOE;
+ disfigured libbar/0.0.2
+ disfigured libbox/0.0.1
+ disfigured libbaz/0.0.2
+ disfigured libfoo/1.0.0
+ purged libfoo/1.0.0
+ purged libbaz/0.0.2
+ configured sys:libbaz/*
+ configured libbox/0.0.1
+ configured libbar/0.0.2
+ EOE
+
+ $pkg_status libbaz >'libbaz configured,system !* available 0.1.0 0.0.4 0.0.3 0.0.2 0.0.1';
+
+ $pkg_drop libbar libbox
+ }
+
: unsatisfied
:
{
@@ -2280,6 +4234,29 @@ test.options += --no-progress
$pkg_drop libbar
}
+
+ : unsatisfied-config
+ :
+ : As above but with a linked configuration.
+ :
+ {
+ $clone_cfg;
+ $cfg_create -d cfg2 &cfg2/***;
+ $cfg_link -d cfg cfg2;
+
+ $* libbar/0.0.1 2>!;
+
+ $pkg_status libbaz >'libbaz configured 0.0.1 available 0.1.0 0.0.4 0.0.3';
+
+ $* ?libbaz/0.0.3 +{ --config-id 1 } 2>>~%EOE% != 0;
+ %error: package libbaz \[cfg2.\] doesn't satisfy its dependents%
+ info: libbaz/0.0.3 doesn't satisfy libbar/0.0.1
+ EOE
+
+ $pkg_status libbaz >'libbaz configured 0.0.1 available 0.1.0 0.0.4 0.0.3';
+
+ $pkg_drop libbar
+ }
}
: scratch
@@ -2434,7 +4411,7 @@ test.options += --no-progress
: as a dependency, so it is built incrementally.
:
{
- $cfg_create cxx $config_cxx -d cfg 2>- &cfg/***;
+ $cfg_create cxx $config_cxx -d cfg &cfg/***;
# Add libhello as the dir repository.
#
@@ -2452,7 +4429,7 @@ test.options += --no-progress
# Note that libfoo building doesn't trigger libhello building as it is a
# fake dependent, so build both explicitly.
#
- $* libfoo ?libhello 2>!;
+ $* libfoo libhello 2>!;
# Move libhello version ahead.
#
@@ -2466,16 +4443,17 @@ test.options += --no-progress
# date on filesystems with a low file timestamps resolution (for example
# HFS+).
#
- $* ?libhello --yes --keep-out 2>>~%EOE%
+ $* ?libhello --yes --keep-out 2>>~%EOE%;
disfigured libfoo/1.1.0
disfigured libhello/1.0.0
using libhello/1.0.1 (external)
configured libhello/1.0.1
configured libfoo/1.1.0
- %info: .+ is up to date%{1,2}
- updated libhello/1.0.1
+ %info: .+libfoo.+ is up to date%
updated libfoo/1.1.0
EOE
+
+ test -d cfg/libhello/libhello
}
}
@@ -2527,10 +4505,9 @@ test.options += --no-progress
: adjust-merge-build
:
- : Test that the registered in the map but not ordered package build
- : (libfoo) is properly merged into the reconfigure adjustment as a
- : dependent of the reconfigured dependency (see collect_order_dependents()
- : for more details).
+ : Test that the registered in the map package build (libfoo) is properly
+ : merged into the reconfigure adjustment as a dependent of the
+ : reconfigured dependency (see collect_dependents() for more details).
:
{
$clone_root_cfg;
@@ -2564,6 +4541,1564 @@ test.options += --no-progress
$pkg_drop libbaz libbar libfoo
}
+
+ : reconfiguration
+ :
+ {
+ +$clone_root_cfg
+ +$rep_add $rep/t4f && $rep_fetch
+
+ : re-order
+ :
+ : This test reproduced a failure of the collect_order_dependents()
+ : function (now turned into collect_dependents()) to properly order
+ : dependents of a being upgraded package, if the current version of this
+ : package is a dependent of a being reconfigured dependency. The now
+ : fixed bug ended up with the 'unable to satisfy dependency' failure of
+ : the subsequent pkg_configure() function call for the described case.
+ :
+ {
+ $clone_cfg;
+
+ $* libbar libbox/1.0.0 ?libbax/1.0.0 2>!;
+
+ $pkg_status -ar >>EOO;
+ libbax configured !1.0.0 available 2.0.0
+ !libbox configured !1.0.0 available 2.0.0
+ libbax configured !1.0.0 available 2.0.0
+ !libbar configured 2.1.0
+ !libbox configured !1.0.0 available 2.0.0
+ libbax configured !1.0.0 available 2.0.0
+ EOO
+
+ $* libfoo libbax 2>>EOE;
+ warning: package libfoo dependency on (libbar == 1.2.0) is forcing downgrade of libbar/2.1.0 to 1.2.0
+ disfigured libbar/2.1.0
+ disfigured libbox/1.0.0
+ disfigured libbax/1.0.0
+ fetched libbax/2.0.0
+ unpacked libbax/2.0.0
+ fetched libbar/1.2.0
+ unpacked libbar/1.2.0
+ fetched libfoo/2.0.0
+ unpacked libfoo/2.0.0
+ configured libbax/2.0.0
+ configured libbox/1.0.0
+ configured libbar/1.2.0
+ configured libfoo/2.0.0
+ EOE
+
+ $pkg_status -ar >>EOO;
+ !libbax configured 2.0.0
+ !libbox configured !1.0.0 available 2.0.0
+ !libbax configured 2.0.0
+ !libbar configured 1.2.0 available 2.1.0
+ !libfoo configured 2.0.0
+ !libbar configured 1.2.0 available 2.1.0
+ EOO
+
+ $pkg_drop libbax libbox libbar libfoo
+ }
+
+ : re-order-unsatisfied
+ :
+ : Similar to the above but the dependent of the mentioned package is
+ : unsatisfied with its dependencies.
+ :
+ {
+ $clone_cfg;
+
+ $* libbar libbox ?libbax/1.0.0 2>!;
+
+ $pkg_status -ar >>EOO;
+ libbax configured !1.0.0 available 2.0.0
+ !libbox configured 2.0.0
+ libbax configured !1.0.0 available 2.0.0
+ !libbar configured 2.1.0
+ !libbox configured 2.0.0
+ libbax configured !1.0.0 available 2.0.0
+ EOO
+
+ $* libfoo libbax/2.0.0 --verbose 5 2>>~%EOE% != 0;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libfoo/2.0.0
+ trace: collect_build: add libbax/2.0.0
+ trace: collect_build_prerequisites: begin libfoo/2.0.0
+ trace: collect_build: add libbar/1.2.0
+ warning: package libfoo dependency on (libbar == 1.2.0) is forcing downgrade of libbar/2.1.0 to 1.2.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/1.2.0 of dependent libfoo/2.0.0
+ trace: collect_build_prerequisites: begin libbar/1.2.0
+ trace: collect_build_prerequisites: end libbar/1.2.0
+ trace: collect_build_prerequisites: end libfoo/2.0.0
+ trace: collect_build_prerequisites: skip unsatisfied existing dependent libbox of dependency libbax/2.0.0 due to constraint (libbax == 1.0.0)
+ trace: collect_build_prerequisites: begin libbax/2.0.0
+ trace: collect_build_prerequisites: end libbax/2.0.0
+ trace: collect_dependents: postpone failure for existing dependent libbox unsatisfied with dependency libbax/2.0.0 (== 1.0.0)
+ trace: execute_plan: simulate: yes
+ %.*
+ error: unable to upgrade package libbax/1.0.0 to 2.0.0
+ info: because configured package libbox/2.0.0 depends on (libbax == 1.0.0)
+ info: consider re-trying with --upgrade|-u potentially combined with --recursive|-r
+ info: or explicitly request up/downgrade of package libbox
+ info: or explicitly specify package libbax version to manually satisfy these constraints
+ %.*
+ EOE
+
+ $pkg_drop libbox libbar
+ }
+ }
+
+ : denied-version-replacements
+ :
+ {
+ +$clone_root_cfg
+ +$rep_add $rep/t4j && $rep_fetch
+
+ : unsatisfactory-version
+ :
+ {
+ $clone_cfg;
+
+ $* libbaz libfoo libfox --plan "" --verbose 5 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libbaz/2.1.0
+ trace: collect_build: add libfoo/3.0.0
+ trace: collect_build: add libfox/3.0.0
+ trace: collect_build_prerequisites: begin libbaz/2.1.0
+ trace: collect_build_prerequisites: end libbaz/2.1.0
+ trace: collect_build_prerequisites: begin libfoo/3.0.0
+ trace: collect_build: add libbar/1.2.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/1.2.0 of dependent libfoo/3.0.0
+ trace: collect_build_prerequisites: begin libbar/1.2.0
+ trace: collect_build_prerequisites: end libbar/1.2.0
+ trace: collect_build_prerequisites: end libfoo/3.0.0
+ trace: collect_build_prerequisites: begin libfox/3.0.0
+ trace: collect_build: pick libbar/0.1.0 over libbar/1.2.0
+ trace: collect_build: libbar/1.2.0 package version needs to be replaced with libbar/0.1.0
+ trace: pkg_build: collection failed due to package version replacement, retry from scratch
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libbaz/2.1.0
+ trace: collect_build: add libfoo/3.0.0
+ trace: collect_build: add libfox/3.0.0
+ trace: collect_build_prerequisites: begin libbaz/2.1.0
+ trace: collect_build_prerequisites: end libbaz/2.1.0
+ trace: collect_build_prerequisites: begin libfoo/3.0.0
+ trace: collect_build: apply version replacement for libbar/1.2.0
+ trace: collect_build: replacement: libbar/0.1.0
+ trace: collect_build: add libbar/0.1.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/0.1.0 of dependent libfoo/3.0.0
+ trace: collect_build_prerequisites: begin libbar/0.1.0
+ trace: collect_build_prerequisites: end libbar/0.1.0
+ trace: collect_build_prerequisites: end libfoo/3.0.0
+ trace: collect_build_prerequisites: begin libfox/3.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/0.1.0 of dependent libfox/3.0.0
+ trace: collect_build: pick libbaz/1.2.0 over libbaz/2.1.0
+ trace: collect_build: libbaz/2.1.0 package version needs to be replaced with libbaz/1.2.0
+ trace: pkg_build: collection failed due to package version replacement, retry from scratch
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: apply version replacement for libbaz/2.1.0
+ trace: collect_build: replacement: libbaz/1.2.0
+ trace: collect_build: add libbaz/1.2.0
+ trace: collect_build: add libfoo/3.0.0
+ trace: collect_build: add libfox/3.0.0
+ trace: collect_build_prerequisites: begin libbaz/1.2.0
+ trace: collect_build: apply version replacement for libbar/1.2.0
+ trace: collect_build: replacement to 0.1.0 is denied since libbaz/1.2.0 depends on (libbar == 1.2.0)
+ trace: collect_build: add libbar/1.2.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/1.2.0 of dependent libbaz/1.2.0
+ trace: collect_build_prerequisites: begin libbar/1.2.0
+ trace: collect_build_prerequisites: end libbar/1.2.0
+ trace: collect_build_prerequisites: end libbaz/1.2.0
+ trace: collect_build_prerequisites: begin libfoo/3.0.0
+ trace: collect_build: apply version replacement for libbar/1.2.0
+ trace: collect_build: replacement: libbar/0.1.0
+ trace: collect_build: pick libbar/1.2.0 over libbar/0.1.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/1.2.0 of dependent libfoo/3.0.0
+ trace: collect_build_prerequisites: end libfoo/3.0.0
+ trace: collect_build_prerequisites: begin libfox/3.0.0
+ trace: collect_build: postpone failure for dependent libfox unsatisfied with dependency libbar/1.2.0 (== 0.1.0)
+ trace: collect_build: pick libbar/1.2.0 over libbar/0.1.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/1.2.0 of dependent libfox/3.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbaz/1.2.0 of dependent libfox/3.0.0
+ trace: collect_build_prerequisites: end libfox/3.0.0
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: pkg_build: try to replace unsatisfactory dependency libbar/1.2.0 with some other version
+ trace: try_replace_dependent: try to replace unsatisfied dependent libfox/3.0.0 of dependency libbar/1.2.0 with some other version
+ trace: try_replace_dependency: replace unsatisfied dependent version libfox/3.0.0 with 2.1.0 by adding constraint 'libfox' -> 'libfox == 2.1.0' on command line
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libbaz/2.1.0
+ trace: collect_build: add libfoo/3.0.0
+ trace: collect_build: add libfox/2.1.0
+ trace: collect_build_prerequisites: begin libbaz/2.1.0
+ trace: collect_build_prerequisites: end libbaz/2.1.0
+ trace: collect_build_prerequisites: begin libfoo/3.0.0
+ trace: collect_build: add libbar/1.2.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/1.2.0 of dependent libfoo/3.0.0
+ trace: collect_build_prerequisites: begin libbar/1.2.0
+ trace: collect_build_prerequisites: end libbar/1.2.0
+ trace: collect_build_prerequisites: end libfoo/3.0.0
+ trace: collect_build_prerequisites: begin libfox/2.1.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/1.2.0 of dependent libfox/2.1.0
+ trace: collect_build: pick libbaz/1.2.0 over libbaz/2.1.0
+ trace: collect_build: libbaz/2.1.0 package version needs to be replaced with libbaz/1.2.0
+ trace: pkg_build: collection failed due to package version replacement, retry from scratch
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: apply version replacement for libbaz/2.1.0
+ trace: collect_build: replacement: libbaz/1.2.0
+ trace: collect_build: add libbaz/1.2.0
+ trace: collect_build: add libfoo/3.0.0
+ trace: collect_build: add libfox/2.1.0
+ trace: collect_build_prerequisites: begin libbaz/1.2.0
+ trace: collect_build: add libbar/1.2.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/1.2.0 of dependent libbaz/1.2.0
+ trace: collect_build_prerequisites: begin libbar/1.2.0
+ trace: collect_build_prerequisites: end libbar/1.2.0
+ trace: collect_build_prerequisites: end libbaz/1.2.0
+ trace: collect_build_prerequisites: begin libfoo/3.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/1.2.0 of dependent libfoo/3.0.0
+ trace: collect_build_prerequisites: end libfoo/3.0.0
+ trace: collect_build_prerequisites: begin libfox/2.1.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/1.2.0 of dependent libfox/2.1.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbaz/1.2.0 of dependent libfox/2.1.0
+ trace: collect_build_prerequisites: end libfox/2.1.0
+ trace: execute_plan: simulate: yes
+ %.*
+ new libbar/1.2.0 (required by libbaz, libfoo, libfox)
+ new libbaz/1.2.0
+ new libfoo/3.0.0
+ new libfox/2.1.0
+ trace: execute_plan: simulate: no
+ %.*
+ EOE
+
+ $pkg_status -ar >>EOO;
+ libbar configured 1.2.0
+ !libbaz configured 1.2.0 available 2.1.0
+ libbar configured 1.2.0
+ !libfoo configured 3.0.0
+ libbar configured 1.2.0
+ !libfox configured 2.1.0 available 3.0.0
+ libbar configured 1.2.0
+ !libbaz configured 1.2.0 available 2.1.0
+ libbar configured 1.2.0
+ EOO
+
+ $pkg_drop libbaz libfoo libfox
+ }
+
+ : collect-drop
+ :
+ {
+ $clone_cfg;
+
+ $* libfix ?libfox/0.0.1 libbaz 2>!;
+
+ $pkg_status -ar >>EOO;
+ libfox configured !0.0.1 available 3.0.0 2.1.0
+ !libfix configured 1.0.0
+ libfox configured !0.0.1 available 3.0.0 2.1.0
+ !libbaz configured 2.1.0
+ EOO
+
+ $* ?libbaz ?libfox/2.1.0 --verbose 5 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: execute_plan: simulate: yes
+ trace: evaluate_dependency: libfox/0.0.1: update to libfox/2.1.0
+ trace: evaluate_dependency: libbaz/2.1.0: unused
+ trace: pkg_build: refine package collection/plan execution
+ trace: collect_build_prerequisites: pre-reeval libfix/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated libfix/1.0.0: end reached
+ trace: collect_build_prerequisites: begin libfox/2.1.0
+ trace: collect_build: add libbar/1.2.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/1.2.0 of dependent libfox/2.1.0
+ trace: collect_build_prerequisites: begin libbar/1.2.0
+ trace: collect_build_prerequisites: end libbar/1.2.0
+ warning: package libfox dependency on (libbaz == 1.2.0) is forcing downgrade of libbaz/2.1.0 to 1.2.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbaz/1.2.0 of dependent libfox/2.1.0
+ trace: collect_build_prerequisites: begin libbaz/1.2.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/1.2.0 of dependent libbaz/1.2.0
+ trace: collect_build_prerequisites: end libbaz/1.2.0
+ trace: collect_build_prerequisites: end libfox/2.1.0
+ trace: collect_drop: libbaz cannot be dropped since it is required by command line, libfox/2.1.0
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: pkg_build: one of dependency evaluation decisions has changed, re-collecting from scratch
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build_prerequisites: pre-reeval libfix/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated libfix/1.0.0: end reached
+ trace: collect_build_prerequisites: begin libfox/2.1.0
+ trace: collect_build: add libbar/1.2.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/1.2.0 of dependent libfox/2.1.0
+ trace: collect_build_prerequisites: begin libbar/1.2.0
+ trace: collect_build_prerequisites: end libbar/1.2.0
+ warning: package libfox dependency on (libbaz == 1.2.0) is forcing downgrade of libbaz/2.1.0 to 1.2.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbaz/1.2.0 of dependent libfox/2.1.0
+ trace: collect_build_prerequisites: begin libbaz/1.2.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/1.2.0 of dependent libbaz/1.2.0
+ trace: collect_build_prerequisites: end libbaz/1.2.0
+ trace: collect_build_prerequisites: end libfox/2.1.0
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: execute_plan: simulate: no
+ %.*
+ EOE
+
+ $pkg_status -ar >>EOO;
+ libfox configured !2.1.0 available 3.0.0
+ libbar configured 1.2.0
+ libbaz configured 1.2.0 available 2.1.0
+ libbar configured 1.2.0
+ !libfix configured 1.0.0
+ libfox configured !2.1.0 available 3.0.0
+ libbar configured 1.2.0
+ libbaz configured 1.2.0 available 2.1.0
+ libbar configured 1.2.0
+ libbaz configured 1.2.0 available 2.1.0
+ libbar configured 1.2.0
+ libbar configured 1.2.0
+ EOO
+
+ $pkg_drop libfix
+ }
+ }
+
+ : constraint-resolution
+ :
+ {
+ +$clone_root_cfg
+ +$rep_add $rep/t4f && $rep_fetch
+
+ test.arguments += --plan "" --verbose 5
+
+ : replace-dependent
+ :
+ {
+ +$clone_cfg
+
+ : basics
+ :
+ : This test demonstrates a case when the dependency resolution
+ : machinery resolves unsatisfied dependency constraints by adding the
+ : package spec to the command line for an unsatisfied dependent
+ : version.
+ :
+ {
+ $clone_cfg;
+
+ $* libfoo libfix 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libfoo/2.0.0
+ trace: collect_build: add libfix/1.0.0
+ trace: collect_build_prerequisites: begin libfoo/2.0.0
+ trace: collect_build: add libbar/1.2.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/1.2.0 of dependent libfoo/2.0.0
+ trace: collect_build_prerequisites: begin libbar/1.2.0
+ trace: collect_build_prerequisites: end libbar/1.2.0
+ trace: collect_build_prerequisites: end libfoo/2.0.0
+ trace: collect_build_prerequisites: begin libfix/1.0.0
+ trace: collect_build: add libfox/2.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libfox/2.0.0 of dependent libfix/1.0.0
+ trace: collect_build_prerequisites: begin libfox/2.0.0
+ trace: collect_build: postpone failure for dependent libfox unsatisfied with dependency libbar/1.2.0 (>= 2.0.0)
+ trace: collect_build: pick libbar/1.2.0 over libbar/2.1.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/1.2.0 of dependent libfox/2.0.0
+ trace: collect_build_prerequisites: end libfox/2.0.0
+ trace: collect_build_prerequisites: end libfix/1.0.0
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: pkg_build: try to replace unsatisfactory dependency libbar/1.2.0 with some other version
+ trace: try_replace_dependent: try to replace unsatisfied dependent libfox/2.0.0 of dependency libbar/1.2.0 with some other version
+ trace: try_replace_dependency: replace unsatisfied dependent version libfox/2.0.0 with 1.1.0 by adding package spec '?libfox == 1.1.0' to command line
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libfoo/2.0.0
+ trace: collect_build: add libfix/1.0.0
+ trace: collect_build_prerequisites: begin libfoo/2.0.0
+ trace: collect_build: add libbar/1.2.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/1.2.0 of dependent libfoo/2.0.0
+ trace: collect_build_prerequisites: begin libbar/1.2.0
+ trace: collect_build_prerequisites: end libbar/1.2.0
+ trace: collect_build_prerequisites: end libfoo/2.0.0
+ trace: collect_build_prerequisites: begin libfix/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libfox/1.1.0 of dependent libfix/1.0.0
+ trace: collect_build_prerequisites: begin libfox/1.1.0
+ trace: collect_build: pick libbar/1.2.0 over libbar/2.1.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/1.2.0 of dependent libfox/1.1.0
+ trace: collect_build_prerequisites: end libfox/1.1.0
+ trace: collect_build_prerequisites: end libfix/1.0.0
+ trace: execute_plan: simulate: yes
+ %.*
+ new libbar/1.2.0 (required by libfoo, libfox)
+ new libfoo/2.0.0
+ new libfox/1.1.0 (required by libfix)
+ new libfix/1.0.0
+ trace: execute_plan: simulate: no
+ %.*
+ EOE
+
+ $pkg_status -ar >>EOO;
+ libbar configured 1.2.0 available 2.1.0
+ !libfoo configured 2.0.0
+ libbar configured 1.2.0 available 2.1.0
+ libfox configured 1.1.0 available 2.0.0
+ libbar configured 1.2.0 available 2.1.0
+ !libfix configured 1.0.0
+ libfox configured 1.1.0 available 2.0.0
+ libbar configured 1.2.0 available 2.1.0
+ EOO
+
+ $pkg_drop libfoo libfix
+ }
+
+ : reorder
+ :
+ : Similar to the above, but the unsatisfied dependent which needs to be
+ : replaced differs from the one added to the unsatisfied dependents
+ : list.
+ :
+ {
+ $clone_cfg;
+
+ $* libfix libfoo 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libfix/1.0.0
+ trace: collect_build: add libfoo/2.0.0
+ trace: collect_build_prerequisites: begin libfix/1.0.0
+ trace: collect_build: add libfox/2.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libfox/2.0.0 of dependent libfix/1.0.0
+ trace: collect_build_prerequisites: begin libfox/2.0.0
+ trace: collect_build: add libbar/2.1.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/2.1.0 of dependent libfox/2.0.0
+ trace: collect_build_prerequisites: begin libbar/2.1.0
+ trace: collect_build: add libbox/2.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbox/2.0.0 of dependent libbar/2.1.0
+ trace: collect_build_prerequisites: begin libbox/2.0.0
+ trace: collect_build: add libbax/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbax/1.0.0 of dependent libbox/2.0.0
+ trace: collect_build_prerequisites: begin libbax/1.0.0
+ trace: collect_build_prerequisites: end libbax/1.0.0
+ trace: collect_build_prerequisites: end libbox/2.0.0
+ trace: collect_build_prerequisites: end libbar/2.1.0
+ trace: collect_build_prerequisites: end libfox/2.0.0
+ trace: collect_build_prerequisites: end libfix/1.0.0
+ trace: collect_build_prerequisites: begin libfoo/2.0.0
+ trace: collect_build: postpone failure for dependent libfoo unsatisfied with dependency libbar/2.1.0 (== 1.2.0)
+ trace: collect_build: pick libbar/2.1.0 over libbar/1.2.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/2.1.0 of dependent libfoo/2.0.0
+ trace: collect_build_prerequisites: end libfoo/2.0.0
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: pkg_build: try to replace unsatisfactory dependency libbar/2.1.0 with some other version
+ trace: try_replace_dependent: try to replace unsatisfied dependent libfoo/2.0.0 of dependency libbar/2.1.0 with some other version
+ trace: try_replace_dependent: try to replace conflicting dependent libfox/2.0.0 of dependency libbar/2.1.0 with some other version
+ trace: try_replace_dependency: replace conflicting dependent version libfox/2.0.0 with 1.1.0 by adding package spec '?libfox == 1.1.0' to command line
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libfix/1.0.0
+ trace: collect_build: add libfoo/2.0.0
+ trace: collect_build_prerequisites: begin libfix/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libfox/1.1.0 of dependent libfix/1.0.0
+ trace: collect_build_prerequisites: begin libfox/1.1.0
+ trace: collect_build: add libbar/2.1.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/2.1.0 of dependent libfox/1.1.0
+ trace: collect_build_prerequisites: begin libbar/2.1.0
+ trace: collect_build: add libbox/2.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbox/2.0.0 of dependent libbar/2.1.0
+ trace: collect_build_prerequisites: begin libbox/2.0.0
+ trace: collect_build: add libbax/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbax/1.0.0 of dependent libbox/2.0.0
+ trace: collect_build_prerequisites: begin libbax/1.0.0
+ trace: collect_build_prerequisites: end libbax/1.0.0
+ trace: collect_build_prerequisites: end libbox/2.0.0
+ trace: collect_build_prerequisites: end libbar/2.1.0
+ trace: collect_build_prerequisites: end libfox/1.1.0
+ trace: collect_build_prerequisites: end libfix/1.0.0
+ trace: collect_build_prerequisites: begin libfoo/2.0.0
+ trace: collect_build: pick libbar/1.2.0 over libbar/2.1.0
+ trace: collect_build: libbar/2.1.0 package version needs to be replaced with libbar/1.2.0
+ trace: pkg_build: collection failed due to package version replacement, retry from scratch
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libfix/1.0.0
+ trace: collect_build: add libfoo/2.0.0
+ trace: collect_build_prerequisites: begin libfix/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libfox/1.1.0 of dependent libfix/1.0.0
+ trace: collect_build_prerequisites: begin libfox/1.1.0
+ trace: collect_build: apply version replacement for libbar/2.1.0
+ trace: collect_build: replacement: libbar/1.2.0
+ trace: collect_build: add libbar/1.2.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/1.2.0 of dependent libfox/1.1.0
+ trace: collect_build_prerequisites: begin libbar/1.2.0
+ trace: collect_build_prerequisites: end libbar/1.2.0
+ trace: collect_build_prerequisites: end libfox/1.1.0
+ trace: collect_build_prerequisites: end libfix/1.0.0
+ trace: collect_build_prerequisites: begin libfoo/2.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/1.2.0 of dependent libfoo/2.0.0
+ trace: collect_build_prerequisites: end libfoo/2.0.0
+ trace: execute_plan: simulate: yes
+ %.*
+ new libbar/1.2.0 (required by libfoo, libfox)
+ new libfox/1.1.0 (required by libfix)
+ new libfix/1.0.0
+ new libfoo/2.0.0
+ trace: execute_plan: simulate: no
+ %.*
+ EOE
+
+ $pkg_status -ar >>EOO;
+ libbar configured 1.2.0 available 2.1.0
+ libfox configured 1.1.0 available 2.0.0
+ libbar configured 1.2.0 available 2.1.0
+ !libfix configured 1.0.0
+ libfox configured 1.1.0 available 2.0.0
+ libbar configured 1.2.0 available 2.1.0
+ !libfoo configured 2.0.0
+ libbar configured 1.2.0 available 2.1.0
+ EOO
+
+ $pkg_drop libfoo libfix
+ }
+
+ : to-hold
+ :
+ : Similar to the basics test, but the unsatisfied dependent is being
+ : built to hold rather than as a dependency.
+ :
+ {
+ $clone_cfg;
+
+ $* libfoo libfox 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libfoo/2.0.0
+ trace: collect_build: add libfox/2.0.0
+ trace: collect_build_prerequisites: begin libfoo/2.0.0
+ trace: collect_build: add libbar/1.2.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/1.2.0 of dependent libfoo/2.0.0
+ trace: collect_build_prerequisites: begin libbar/1.2.0
+ trace: collect_build_prerequisites: end libbar/1.2.0
+ trace: collect_build_prerequisites: end libfoo/2.0.0
+ trace: collect_build_prerequisites: begin libfox/2.0.0
+ trace: collect_build: postpone failure for dependent libfox unsatisfied with dependency libbar/1.2.0 (>= 2.0.0)
+ trace: collect_build: pick libbar/1.2.0 over libbar/2.1.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/1.2.0 of dependent libfox/2.0.0
+ trace: collect_build_prerequisites: end libfox/2.0.0
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: try_replace_dependent: try to replace unsatisfied dependent libfox/2.0.0 of dependency libbar/1.2.0 with some other version
+ trace: try_replace_dependency: replace unsatisfied dependent version libfox/2.0.0 with 1.1.0 by adding constraint 'libfox' -> 'libfox == 1.1.0' on command line
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libfoo/2.0.0
+ trace: collect_build: add libfox/1.1.0
+ trace: collect_build_prerequisites: begin libfoo/2.0.0
+ trace: collect_build: add libbar/1.2.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/1.2.0 of dependent libfoo/2.0.0
+ trace: collect_build_prerequisites: begin libbar/1.2.0
+ trace: collect_build_prerequisites: end libbar/1.2.0
+ trace: collect_build_prerequisites: end libfoo/2.0.0
+ trace: collect_build_prerequisites: begin libfox/1.1.0
+ trace: collect_build: pick libbar/1.2.0 over libbar/2.1.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/1.2.0 of dependent libfox/1.1.0
+ trace: collect_build_prerequisites: end libfox/1.1.0
+ trace: execute_plan: simulate: yes
+ %.*
+ new libbar/1.2.0 (required by libfoo, libfox)
+ new libfoo/2.0.0
+ new libfox/1.1.0
+ trace: execute_plan: simulate: no
+ %.*
+ EOE
+
+ $pkg_status -ar >>EOO;
+ libbar configured 1.2.0 available 2.1.0
+ !libfoo configured 2.0.0
+ libbar configured 1.2.0 available 2.1.0
+ !libfox configured 1.1.0 available 2.0.0
+ libbar configured 1.2.0 available 2.1.0
+ EOO
+
+ $pkg_drop libfoo libfox
+ }
+
+ : to-hold-reorder
+ :
+ : Similar to the above, but the unsatisfied dependent which needs to be
+ : replaced differs from the one added to the unsatisfied dependents
+ : list.
+ :
+ {
+ $clone_cfg;
+
+ $* libfox libfoo 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libfox/2.0.0
+ trace: collect_build: add libfoo/2.0.0
+ trace: collect_build_prerequisites: begin libfox/2.0.0
+ trace: collect_build: add libbar/2.1.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/2.1.0 of dependent libfox/2.0.0
+ trace: collect_build_prerequisites: begin libbar/2.1.0
+ trace: collect_build: add libbox/2.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbox/2.0.0 of dependent libbar/2.1.0
+ trace: collect_build_prerequisites: begin libbox/2.0.0
+ trace: collect_build: add libbax/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbax/1.0.0 of dependent libbox/2.0.0
+ trace: collect_build_prerequisites: begin libbax/1.0.0
+ trace: collect_build_prerequisites: end libbax/1.0.0
+ trace: collect_build_prerequisites: end libbox/2.0.0
+ trace: collect_build_prerequisites: end libbar/2.1.0
+ trace: collect_build_prerequisites: end libfox/2.0.0
+ trace: collect_build_prerequisites: begin libfoo/2.0.0
+ trace: collect_build: postpone failure for dependent libfoo unsatisfied with dependency libbar/2.1.0 (== 1.2.0)
+ trace: collect_build: pick libbar/2.1.0 over libbar/1.2.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/2.1.0 of dependent libfoo/2.0.0
+ trace: collect_build_prerequisites: end libfoo/2.0.0
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: pkg_build: try to replace unsatisfactory dependency libbar/2.1.0 with some other version
+ trace: try_replace_dependent: try to replace unsatisfied dependent libfoo/2.0.0 of dependency libbar/2.1.0 with some other version
+ trace: try_replace_dependent: try to replace conflicting dependent libfox/2.0.0 of dependency libbar/2.1.0 with some other version
+ trace: try_replace_dependency: replace conflicting dependent version libfox/2.0.0 with 1.1.0 by adding constraint 'libfox' -> 'libfox == 1.1.0' on command line
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libfox/1.1.0
+ trace: collect_build: add libfoo/2.0.0
+ trace: collect_build_prerequisites: begin libfox/1.1.0
+ trace: collect_build: add libbar/2.1.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/2.1.0 of dependent libfox/1.1.0
+ trace: collect_build_prerequisites: begin libbar/2.1.0
+ trace: collect_build: add libbox/2.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbox/2.0.0 of dependent libbar/2.1.0
+ trace: collect_build_prerequisites: begin libbox/2.0.0
+ trace: collect_build: add libbax/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbax/1.0.0 of dependent libbox/2.0.0
+ trace: collect_build_prerequisites: begin libbax/1.0.0
+ trace: collect_build_prerequisites: end libbax/1.0.0
+ trace: collect_build_prerequisites: end libbox/2.0.0
+ trace: collect_build_prerequisites: end libbar/2.1.0
+ trace: collect_build_prerequisites: end libfox/1.1.0
+ trace: collect_build_prerequisites: begin libfoo/2.0.0
+ trace: collect_build: pick libbar/1.2.0 over libbar/2.1.0
+ trace: collect_build: libbar/2.1.0 package version needs to be replaced with libbar/1.2.0
+ trace: pkg_build: collection failed due to package version replacement, retry from scratch
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libfox/1.1.0
+ trace: collect_build: add libfoo/2.0.0
+ trace: collect_build_prerequisites: begin libfox/1.1.0
+ trace: collect_build: apply version replacement for libbar/2.1.0
+ trace: collect_build: replacement: libbar/1.2.0
+ trace: collect_build: add libbar/1.2.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/1.2.0 of dependent libfox/1.1.0
+ trace: collect_build_prerequisites: begin libbar/1.2.0
+ trace: collect_build_prerequisites: end libbar/1.2.0
+ trace: collect_build_prerequisites: end libfox/1.1.0
+ trace: collect_build_prerequisites: begin libfoo/2.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/1.2.0 of dependent libfoo/2.0.0
+ trace: collect_build_prerequisites: end libfoo/2.0.0
+ trace: execute_plan: simulate: yes
+ %.*
+ new libbar/1.2.0 (required by libfoo, libfox)
+ new libfox/1.1.0
+ new libfoo/2.0.0
+ trace: execute_plan: simulate: no
+ %.*
+ EOE
+
+ $pkg_status -ar >>EOO;
+ libbar configured 1.2.0 available 2.1.0
+ !libfox configured 1.1.0 available 2.0.0
+ libbar configured 1.2.0 available 2.1.0
+ !libfoo configured 2.0.0
+ libbar configured 1.2.0 available 2.1.0
+ EOO
+
+ $pkg_drop libfoo libfox
+ }
+
+ : unsatisfied-dependent
+ :
+ : This test demonstrates a case when the dependency resolution
+ : machinery resolves unsatisfied dependency constraints by
+ : enforcing noop.
+ :
+ : Note that there is no version constraints are specified on the
+ : command line, the request is to upgrade all packages to the latest
+ : possible versions, and thus noop in this case is an appropriate
+ : outcome.
+ :
+ {
+ $clone_cfg;
+
+ $* libbox ?libbix/1.0.0 libbux 2>!;
+
+ $pkg_status -ar >>EOO;
+ libbax configured 1.0.0 available 2.0.0
+ !libbox configured 2.0.0
+ libbax configured 1.0.0 available 2.0.0
+ libbix configured !1.0.0 available 2.0.0
+ libbax configured 1.0.0 available 2.0.0
+ !libbux configured 1.0.0
+ libbix configured !1.0.0 available 2.0.0
+ libbax configured 1.0.0 available 2.0.0
+ EOO
+
+ $* --upgrade --recursive 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libbox/2.0.0
+ trace: collect_build: add libbux/1.0.0
+ trace: collect_build_prerequisites: skip configured libbox/2.0.0
+ trace: collect_build_prerequisites: skip configured libbux/1.0.0
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: evaluate_dependency: libbix/1.0.0: update to libbix/2.0.0
+ %.*
+ trace: pkg_build: refine package collection/plan execution
+ trace: collect_build: add libbix/2.0.0
+ trace: collect_build_prerequisites: pre-reeval libbux/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated libbux/1.0.0: end reached
+ trace: collect_build_prerequisites: begin libbix/2.0.0
+ trace: collect_build: add libbax/2.0.0
+ info: package libbix dependency on (libbax == 2.0.0) is forcing upgrade of libbax/1.0.0 to 2.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbax/2.0.0 of dependent libbix/2.0.0
+ trace: collect_build_prerequisites: skip unsatisfied existing dependent libbox of dependency libbax/2.0.0 due to constraint (libbax == 1.0.0)
+ trace: collect_build_prerequisites: skip being built existing dependent libbix of dependency libbax
+ trace: collect_build_prerequisites: begin libbax/2.0.0
+ trace: collect_build_prerequisites: end libbax/2.0.0
+ trace: collect_build_prerequisites: end libbix/2.0.0
+ trace: collect_dependents: postpone failure for existing dependent libbox unsatisfied with dependency libbax/2.0.0 (== 1.0.0)
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: pkg_build: try to replace unsatisfactory dependency libbax/2.0.0 with some other version
+ trace: try_replace_dependent: try to replace unsatisfiable dependent libbix/2.0.0 of dependency libbax/2.0.0 with some other version
+ trace: try_replace_dependency: replace unsatisfiable dependent version libbix/2.0.0 with 1.0.0 by adding package spec '?libbix == 1.0.0' to command line
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libbox/2.0.0
+ trace: collect_build: add libbux/1.0.0
+ trace: collect_build_prerequisites: skip configured libbox/2.0.0
+ trace: collect_build_prerequisites: skip configured libbux/1.0.0
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: execute_plan: simulate: no
+ %.*
+ EOE
+
+ $pkg_status -ar >>EOO;
+ libbax configured 1.0.0 available 2.0.0
+ !libbox configured 2.0.0
+ libbax configured 1.0.0 available 2.0.0
+ libbix configured !1.0.0 available 2.0.0
+ libbax configured 1.0.0 available 2.0.0
+ !libbux configured 1.0.0
+ libbix configured !1.0.0 available 2.0.0
+ libbax configured 1.0.0 available 2.0.0
+ EOO
+
+ $pkg_drop libbox libbux
+ }
+
+ : indirect
+ :
+ : Test replacement of indirect dependents of an unsatisfactory
+ : dependency.
+ :
+ {
+ $clone_root_cfg;
+ $rep_add $rep/t4k && $rep_fetch;
+
+ $* libbaz libbar 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libbaz/1.0.0
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: begin libbaz/1.0.0
+ trace: collect_build: add libfox/2.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libfox/2.0.0 of dependent libbaz/1.0.0
+ trace: collect_build_prerequisites: begin libfox/2.0.0
+ trace: collect_build: add libfux/2.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libfux/2.0.0 of dependent libfox/2.0.0
+ trace: collect_build_prerequisites: begin libfux/2.0.0
+ trace: collect_build: add libfaz/2.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libfaz/2.0.0 of dependent libfux/2.0.0
+ trace: collect_build_prerequisites: begin libfaz/2.0.0
+ trace: collect_build: add libfuz/2.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libfuz/2.0.0 of dependent libfaz/2.0.0
+ trace: collect_build_prerequisites: begin libfuz/2.0.0
+ trace: collect_build_prerequisites: end libfuz/2.0.0
+ trace: collect_build_prerequisites: end libfaz/2.0.0
+ trace: collect_build: add libfex/2.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libfex/2.0.0 of dependent libfux/2.0.0
+ trace: collect_build_prerequisites: begin libfex/2.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libfaz/2.0.0 of dependent libfex/2.0.0
+ trace: collect_build_prerequisites: end libfex/2.0.0
+ trace: collect_build_prerequisites: end libfux/2.0.0
+ trace: collect_build_prerequisites: end libfox/2.0.0
+ trace: collect_build_prerequisites: end libbaz/1.0.0
+ trace: collect_build_prerequisites: begin libbar/1.0.0
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libfoo/1.0.0 of dependent libbar/1.0.0
+ trace: collect_build_prerequisites: begin libfoo/1.0.0
+ trace: collect_build: add libfix/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libfix/1.0.0 of dependent libfoo/1.0.0
+ trace: collect_build_prerequisites: begin libfix/1.0.0
+ trace: collect_build: add libfax/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libfax/1.0.0 of dependent libfix/1.0.0
+ trace: collect_build_prerequisites: begin libfax/1.0.0
+ trace: collect_build: postpone failure for dependent libfax unsatisfied with dependency libfuz/2.0.0 (== 1.0.0)
+ trace: collect_build: pick libfuz/2.0.0 over libfuz/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libfuz/2.0.0 of dependent libfax/1.0.0
+ trace: collect_build_prerequisites: end libfax/1.0.0
+ trace: collect_build_prerequisites: end libfix/1.0.0
+ trace: collect_build_prerequisites: end libfoo/1.0.0
+ trace: collect_build_prerequisites: end libbar/1.0.0
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: pkg_build: try to replace unsatisfactory dependency libfuz/2.0.0 with some other version
+ trace: try_replace_dependent: try to replace unsatisfied dependent libfax/1.0.0 of dependency libfuz/2.0.0 with some other version
+ trace: try_replace_dependent: try to replace conflicting dependent libfaz/2.0.0 of dependency libfuz/2.0.0 with some other version
+ trace: try_replace_dependent: try to replace constraining dependent libfix/1.0.0 of dependency libfax/1.0.0 with some other version
+ trace: try_replace_dependent: try to replace constraining dependent libfoo/1.0.0 of dependency libfix/1.0.0 with some other version
+ trace: try_replace_dependent: try to replace constraining dependent libbar/1.0.0 of dependency libfoo/1.0.0 with some other version
+ trace: try_replace_dependent: try to replace constraining dependent libfux/2.0.0 of dependency libfaz/2.0.0 with some other version
+ trace: try_replace_dependent: try to replace constraining dependent libfex/2.0.0 of dependency libfaz/2.0.0 with some other version
+ trace: try_replace_dependent: try to replace constraining dependent libfox/2.0.0 of dependency libfux/2.0.0 with some other version
+ trace: try_replace_dependency: replace constraining dependent version libfox/2.0.0 with 1.2.0 by adding package spec '?libfox == 1.2.0' to command line
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libbaz/1.0.0
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: begin libbaz/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libfox/1.2.0 of dependent libbaz/1.0.0
+ trace: collect_build_prerequisites: begin libfox/1.2.0
+ trace: collect_build: add libfux/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libfux/1.0.0 of dependent libfox/1.2.0
+ trace: collect_build_prerequisites: begin libfux/1.0.0
+ trace: collect_build: add libfaz/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libfaz/1.0.0 of dependent libfux/1.0.0
+ trace: collect_build_prerequisites: begin libfaz/1.0.0
+ trace: collect_build: add libfuz/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libfuz/1.0.0 of dependent libfaz/1.0.0
+ trace: collect_build_prerequisites: begin libfuz/1.0.0
+ trace: collect_build_prerequisites: end libfuz/1.0.0
+ trace: collect_build_prerequisites: end libfaz/1.0.0
+ trace: collect_build: add libfex/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libfex/1.0.0 of dependent libfux/1.0.0
+ trace: collect_build_prerequisites: begin libfex/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libfaz/1.0.0 of dependent libfex/1.0.0
+ trace: collect_build_prerequisites: end libfex/1.0.0
+ trace: collect_build_prerequisites: end libfux/1.0.0
+ trace: collect_build: postpone failure for dependent libfox unsatisfied with dependency libfex/1.0.0 (>= 2.0.0)
+ trace: collect_build: pick libfex/1.0.0 over libfex/2.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libfex/1.0.0 of dependent libfox/1.2.0
+ trace: collect_build_prerequisites: end libfox/1.2.0
+ trace: collect_build_prerequisites: end libbaz/1.0.0
+ trace: collect_build_prerequisites: begin libbar/1.0.0
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libfoo/1.0.0 of dependent libbar/1.0.0
+ trace: collect_build_prerequisites: begin libfoo/1.0.0
+ trace: collect_build: add libfix/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libfix/1.0.0 of dependent libfoo/1.0.0
+ trace: collect_build_prerequisites: begin libfix/1.0.0
+ trace: collect_build: add libfax/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libfax/1.0.0 of dependent libfix/1.0.0
+ trace: collect_build_prerequisites: begin libfax/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libfuz/1.0.0 of dependent libfax/1.0.0
+ trace: collect_build_prerequisites: end libfax/1.0.0
+ trace: collect_build_prerequisites: end libfix/1.0.0
+ trace: collect_build_prerequisites: end libfoo/1.0.0
+ trace: collect_build_prerequisites: end libbar/1.0.0
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: pkg_build: try to replace unsatisfactory dependency libfex/1.0.0 with some other version
+ trace: try_replace_dependent: try to replace unsatisfied dependent libfox/1.2.0 of dependency libfex/1.0.0 with some other version
+ trace: try_replace_dependency: replacement of unsatisfied dependent version libfox/1.2.0 is denied since it is specified on command line as '?libfox == 1.2.0'
+ trace: try_replace_dependent: try to replace conflicting dependent libfux/1.0.0 of dependency libfex/1.0.0 with some other version
+ trace: pkg_build: cannot replace any package, rolling back latest command line adjustment ('?libfox == 1.2.0')
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libbaz/1.0.0
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: begin libbaz/1.0.0
+ trace: collect_build: add libfox/2.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libfox/2.0.0 of dependent libbaz/1.0.0
+ trace: collect_build_prerequisites: begin libfox/2.0.0
+ trace: collect_build: add libfux/2.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libfux/2.0.0 of dependent libfox/2.0.0
+ trace: collect_build_prerequisites: begin libfux/2.0.0
+ trace: collect_build: add libfaz/2.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libfaz/2.0.0 of dependent libfux/2.0.0
+ trace: collect_build_prerequisites: begin libfaz/2.0.0
+ trace: collect_build: add libfuz/2.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libfuz/2.0.0 of dependent libfaz/2.0.0
+ trace: collect_build_prerequisites: begin libfuz/2.0.0
+ trace: collect_build_prerequisites: end libfuz/2.0.0
+ trace: collect_build_prerequisites: end libfaz/2.0.0
+ trace: collect_build: add libfex/2.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libfex/2.0.0 of dependent libfux/2.0.0
+ trace: collect_build_prerequisites: begin libfex/2.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libfaz/2.0.0 of dependent libfex/2.0.0
+ trace: collect_build_prerequisites: end libfex/2.0.0
+ trace: collect_build_prerequisites: end libfux/2.0.0
+ trace: collect_build_prerequisites: end libfox/2.0.0
+ trace: collect_build_prerequisites: end libbaz/1.0.0
+ trace: collect_build_prerequisites: begin libbar/1.0.0
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libfoo/1.0.0 of dependent libbar/1.0.0
+ trace: collect_build_prerequisites: begin libfoo/1.0.0
+ trace: collect_build: add libfix/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libfix/1.0.0 of dependent libfoo/1.0.0
+ trace: collect_build_prerequisites: begin libfix/1.0.0
+ trace: collect_build: add libfax/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libfax/1.0.0 of dependent libfix/1.0.0
+ trace: collect_build_prerequisites: begin libfax/1.0.0
+ trace: collect_build: postpone failure for dependent libfax unsatisfied with dependency libfuz/2.0.0 (== 1.0.0)
+ trace: collect_build: pick libfuz/2.0.0 over libfuz/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libfuz/2.0.0 of dependent libfax/1.0.0
+ trace: collect_build_prerequisites: end libfax/1.0.0
+ trace: collect_build_prerequisites: end libfix/1.0.0
+ trace: collect_build_prerequisites: end libfoo/1.0.0
+ trace: collect_build_prerequisites: end libbar/1.0.0
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: pkg_build: try to replace unsatisfactory dependency libfuz/2.0.0 with some other version
+ trace: try_replace_dependent: try to replace unsatisfied dependent libfax/1.0.0 of dependency libfuz/2.0.0 with some other version
+ trace: try_replace_dependent: try to replace conflicting dependent libfaz/2.0.0 of dependency libfuz/2.0.0 with some other version
+ trace: try_replace_dependent: try to replace constraining dependent libfix/1.0.0 of dependency libfax/1.0.0 with some other version
+ trace: try_replace_dependent: try to replace constraining dependent libfoo/1.0.0 of dependency libfix/1.0.0 with some other version
+ trace: try_replace_dependent: try to replace constraining dependent libbar/1.0.0 of dependency libfoo/1.0.0 with some other version
+ trace: try_replace_dependent: try to replace constraining dependent libfux/2.0.0 of dependency libfaz/2.0.0 with some other version
+ trace: try_replace_dependent: try to replace constraining dependent libfex/2.0.0 of dependency libfaz/2.0.0 with some other version
+ trace: try_replace_dependent: try to replace constraining dependent libfox/2.0.0 of dependency libfux/2.0.0 with some other version
+ trace: try_replace_dependency: replacement libfox/1.2.0 tried earlier for same command line, skipping
+ trace: try_replace_dependency: replace constraining dependent version libfox/2.0.0 with 1.0.0 by adding package spec '?libfox == 1.0.0' to command line
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libbaz/1.0.0
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: begin libbaz/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libfox/1.0.0 of dependent libbaz/1.0.0
+ trace: collect_build_prerequisites: begin libfox/1.0.0
+ trace: collect_build: add libfux/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libfux/1.0.0 of dependent libfox/1.0.0
+ trace: collect_build_prerequisites: begin libfux/1.0.0
+ trace: collect_build: add libfaz/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libfaz/1.0.0 of dependent libfux/1.0.0
+ trace: collect_build_prerequisites: begin libfaz/1.0.0
+ trace: collect_build: add libfuz/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libfuz/1.0.0 of dependent libfaz/1.0.0
+ trace: collect_build_prerequisites: begin libfuz/1.0.0
+ trace: collect_build_prerequisites: end libfuz/1.0.0
+ trace: collect_build_prerequisites: end libfaz/1.0.0
+ trace: collect_build: add libfex/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libfex/1.0.0 of dependent libfux/1.0.0
+ trace: collect_build_prerequisites: begin libfex/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libfaz/1.0.0 of dependent libfex/1.0.0
+ trace: collect_build_prerequisites: end libfex/1.0.0
+ trace: collect_build_prerequisites: end libfux/1.0.0
+ trace: collect_build_prerequisites: end libfox/1.0.0
+ trace: collect_build_prerequisites: end libbaz/1.0.0
+ trace: collect_build_prerequisites: begin libbar/1.0.0
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libfoo/1.0.0 of dependent libbar/1.0.0
+ trace: collect_build_prerequisites: begin libfoo/1.0.0
+ trace: collect_build: add libfix/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libfix/1.0.0 of dependent libfoo/1.0.0
+ trace: collect_build_prerequisites: begin libfix/1.0.0
+ trace: collect_build: add libfax/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libfax/1.0.0 of dependent libfix/1.0.0
+ trace: collect_build_prerequisites: begin libfax/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libfuz/1.0.0 of dependent libfax/1.0.0
+ trace: collect_build_prerequisites: end libfax/1.0.0
+ trace: collect_build_prerequisites: end libfix/1.0.0
+ trace: collect_build_prerequisites: end libfoo/1.0.0
+ trace: collect_build_prerequisites: end libbar/1.0.0
+ trace: execute_plan: simulate: yes
+ %.*
+ new libfuz/1.0.0 (required by libfax, libfaz)
+ new libfaz/1.0.0 (required by libfex, libfux)
+ new libfex/1.0.0 (required by libfux)
+ new libfux/1.0.0 (required by libfox)
+ new libfox/1.0.0 (required by libbaz)
+ new libbaz/1.0.0
+ new libfax/1.0.0 (required by libfix)
+ new libfix/1.0.0 (required by libfoo)
+ new libfoo/1.0.0 (required by libbar)
+ new libbar/1.0.0
+ trace: execute_plan: simulate: no
+ %.*
+ EOE
+
+ $pkg_status -r libbaz libbar >>EOO;
+ !libbaz configured 1.0.0
+ libfox configured 1.0.0 available 2.0.0 1.2.0
+ libfux configured 1.0.0 available 2.0.0
+ libfaz configured 1.0.0 available 2.0.0
+ libfuz configured 1.0.0 available 2.0.0
+ libfex configured 1.0.0 available 2.0.0
+ libfaz configured 1.0.0 available 2.0.0
+ libfuz configured 1.0.0 available 2.0.0
+ !libbar configured 1.0.0
+ libfoo configured 1.0.0 available 2.0.0
+ libfix configured 1.0.0 available 2.0.0
+ libfax configured 1.0.0 available 2.0.0
+ libfuz configured 1.0.0 available 2.0.0
+ EOO
+
+ $pkg_drop libbaz libbar
+ }
+ }
+
+ : replace-dependency
+ :
+ {
+ +$clone_cfg
+ +$rep_add $rep/t4i && $rep_fetch
+
+ : basics
+ :
+ : This test demonstrates a case when the dependency resolution
+ : machinery resolves unsatisfied dependency constraints by adding the
+ : package spec to the command line for an unsatisfactory dependency
+ : version.
+ :
+ {
+ $clone_cfg;
+
+ $* libfox/1.1.0 libbaz 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libfox/1.1.0
+ trace: collect_build: add libbaz/2.0.0
+ trace: collect_build_prerequisites: begin libfox/1.1.0
+ trace: collect_build: add libbar/2.1.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/2.1.0 of dependent libfox/1.1.0
+ trace: collect_build_prerequisites: begin libbar/2.1.0
+ trace: collect_build: add libbox/2.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbox/2.0.0 of dependent libbar/2.1.0
+ trace: collect_build_prerequisites: begin libbox/2.0.0
+ trace: collect_build: add libbax/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbax/1.0.0 of dependent libbox/2.0.0
+ trace: collect_build_prerequisites: begin libbax/1.0.0
+ trace: collect_build_prerequisites: end libbax/1.0.0
+ trace: collect_build_prerequisites: end libbox/2.0.0
+ trace: collect_build_prerequisites: end libbar/2.1.0
+ trace: collect_build_prerequisites: end libfox/1.1.0
+ trace: collect_build_prerequisites: begin libbaz/2.0.0
+ trace: collect_build: postpone failure for dependent libbaz unsatisfied with dependency libbar/2.1.0 (< 2.1.0)
+ trace: collect_build: pick libbar/2.1.0 over libbar/0.1.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/2.1.0 of dependent libbaz/2.0.0
+ trace: collect_build_prerequisites: end libbaz/2.0.0
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: pkg_build: try to replace unsatisfactory dependency libbar/2.1.0 with some other version
+ trace: try_replace_dependency: replace unsatisfactory dependency version libbar/2.1.0 with 1.2.0 by adding package spec '?libbar == 1.2.0' to command line
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libfox/1.1.0
+ trace: collect_build: add libbaz/2.0.0
+ trace: collect_build_prerequisites: begin libfox/1.1.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/1.2.0 of dependent libfox/1.1.0
+ trace: collect_build_prerequisites: begin libbar/1.2.0
+ trace: collect_build_prerequisites: end libbar/1.2.0
+ trace: collect_build_prerequisites: end libfox/1.1.0
+ trace: collect_build_prerequisites: begin libbaz/2.0.0
+ trace: collect_build: pick libbar/1.2.0 over libbar/0.1.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/1.2.0 of dependent libbaz/2.0.0
+ trace: collect_build_prerequisites: end libbaz/2.0.0
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: execute_plan: simulate: no
+ %.*
+ EOE
+
+ $pkg_status -ar >>EOO;
+ libbar configured 1.2.0 available 2.1.0
+ !libfox configured !1.1.0 available 2.0.0
+ libbar configured 1.2.0 available 2.1.0
+ !libbaz configured 2.0.0
+ libbar configured 1.2.0 available 2.1.0
+ EOO
+
+ $pkg_drop libfox libbaz
+ }
+
+ : reorder
+ :
+ : Similar to the above, but the order of the dependents on the command
+ : line is swapped.
+ :
+ {
+ $clone_cfg;
+
+ $* libbaz libfox/1.1.0 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libbaz/2.0.0
+ trace: collect_build: add libfox/1.1.0
+ trace: collect_build_prerequisites: begin libbaz/2.0.0
+ trace: collect_build: add libbar/0.1.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/0.1.0 of dependent libbaz/2.0.0
+ trace: collect_build_prerequisites: begin libbar/0.1.0
+ trace: collect_build_prerequisites: end libbar/0.1.0
+ trace: collect_build_prerequisites: end libbaz/2.0.0
+ trace: collect_build_prerequisites: begin libfox/1.1.0
+ trace: collect_build: postpone failure for dependent libfox unsatisfied with dependency libbar/0.1.0 (>= 1.0.0)
+ trace: collect_build: pick libbar/0.1.0 over libbar/2.1.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/0.1.0 of dependent libfox/1.1.0
+ trace: collect_build_prerequisites: end libfox/1.1.0
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: pkg_build: try to replace unsatisfactory dependency libbar/0.1.0 with some other version
+ trace: try_replace_dependency: replace unsatisfactory dependency version libbar/0.1.0 with 1.2.0 by adding package spec '?libbar == 1.2.0' to command line
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libbaz/2.0.0
+ trace: collect_build: add libfox/1.1.0
+ trace: collect_build_prerequisites: begin libbaz/2.0.0
+ trace: collect_build_prerequisites: rep-postpone dependent libbaz/2.0.0 due to dependency libbar < 2.1.0 and user-specified constraint == 1.2.0
+ trace: collect_build_prerequisites: postpone libbaz/2.0.0
+ trace: collect_build_prerequisites: begin libfox/1.1.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/1.2.0 of dependent libfox/1.1.0
+ trace: collect_build_prerequisites: begin libbar/1.2.0
+ trace: collect_build_prerequisites: end libbar/1.2.0
+ trace: collect_build_prerequisites: end libfox/1.1.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (0): collect rep-postponed libbaz/2.0.0
+ trace: collect_build_prerequisites: resume libbaz/2.0.0
+ trace: collect_build: pick libbar/1.2.0 over libbar/0.1.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/1.2.0 of dependent libbaz/2.0.0
+ trace: collect_build_prerequisites: end libbaz/2.0.0
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: execute_plan: simulate: no
+ %.*
+ EOE
+
+ $pkg_status -ar >>EOO;
+ libbar configured 1.2.0 available 2.1.0
+ !libbaz configured 2.0.0
+ libbar configured 1.2.0 available 2.1.0
+ !libfox configured !1.1.0 available 2.0.0
+ libbar configured 1.2.0 available 2.1.0
+ EOO
+
+ $pkg_drop libfox libbaz
+ }
+
+ : dependency
+ :
+ : Similar to the above, but the dependency is also specified on the
+ : command line.
+ :
+ {
+ $clone_cfg;
+
+ $* libbaz libfox/1.1.0 '?libbar' 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libbaz/2.0.0
+ trace: collect_build: add libfox/1.1.0
+ trace: collect_build_prerequisites: begin libbaz/2.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/0.1.0 of dependent libbaz/2.0.0
+ trace: collect_build_prerequisites: begin libbar/0.1.0
+ trace: collect_build_prerequisites: end libbar/0.1.0
+ trace: collect_build_prerequisites: end libbaz/2.0.0
+ trace: collect_build_prerequisites: begin libfox/1.1.0
+ trace: collect_build: postpone failure for dependent libfox unsatisfied with dependency libbar/0.1.0 (>= 1.0.0)
+ trace: collect_build: pick libbar/0.1.0 over libbar/2.1.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/0.1.0 of dependent libfox/1.1.0
+ trace: collect_build_prerequisites: end libfox/1.1.0
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: evaluate_dependency: libbar/0.1.0: update to libbar/1.2.0
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libbaz/2.0.0
+ trace: collect_build: add libfox/1.1.0
+ trace: collect_build_prerequisites: begin libbaz/2.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/0.1.0 of dependent libbaz/2.0.0
+ trace: collect_build_prerequisites: begin libbar/0.1.0
+ trace: collect_build_prerequisites: end libbar/0.1.0
+ trace: collect_build_prerequisites: end libbaz/2.0.0
+ trace: collect_build_prerequisites: begin libfox/1.1.0
+ trace: collect_build: postpone failure for dependent libfox unsatisfied with dependency libbar/0.1.0 (>= 1.0.0)
+ trace: collect_build: pick libbar/0.1.0 over libbar/2.1.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/0.1.0 of dependent libfox/1.1.0
+ trace: collect_build_prerequisites: end libfox/1.1.0
+ trace: collect_build: pick libbar/1.2.0 over libbar/0.1.0
+ trace: collect_build: libbar/0.1.0 package version needs to be replaced with libbar/1.2.0
+ trace: pkg_build: collection failed due to package version replacement, retry from scratch
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libbaz/2.0.0
+ trace: collect_build: add libfox/1.1.0
+ trace: collect_build_prerequisites: begin libbaz/2.0.0
+ trace: collect_build: apply version replacement for libbar/0.1.0
+ trace: collect_build: replacement: libbar/1.2.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/1.2.0 of dependent libbaz/2.0.0
+ trace: collect_build_prerequisites: begin libbar/1.2.0
+ trace: collect_build_prerequisites: end libbar/1.2.0
+ trace: collect_build_prerequisites: end libbaz/2.0.0
+ trace: collect_build_prerequisites: begin libfox/1.1.0
+ trace: collect_build: pick libbar/1.2.0 over libbar/2.1.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/1.2.0 of dependent libfox/1.1.0
+ trace: collect_build_prerequisites: end libfox/1.1.0
+ trace: execute_plan: simulate: yes
+ %.*
+ new libbar/1.2.0 (required by libbaz, libfox)
+ new libbaz/2.0.0
+ new libfox/1.1.0
+ trace: execute_plan: simulate: no
+ %.*
+ EOE
+
+ $pkg_status -ar >>EOO;
+ libbar configured 1.2.0 available 2.1.0
+ !libbaz configured 2.0.0
+ libbar configured 1.2.0 available 2.1.0
+ !libfox configured !1.1.0 available 2.0.0
+ libbar configured 1.2.0 available 2.1.0
+ EOO
+
+ $pkg_drop libfox libbaz libbar
+ }
+
+ : dependency-constr
+ :
+ : Similar to the above, but also specify the version constraint.
+ :
+ {
+ $clone_cfg;
+
+ $* libbaz libfox/1.1.0 '?libbar < 3.0.0' 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libbaz/2.0.0
+ trace: collect_build: add libfox/1.1.0
+ trace: collect_build_prerequisites: begin libbaz/2.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/0.1.0 of dependent libbaz/2.0.0
+ trace: collect_build_prerequisites: begin libbar/0.1.0
+ trace: collect_build_prerequisites: end libbar/0.1.0
+ trace: collect_build_prerequisites: end libbaz/2.0.0
+ trace: collect_build_prerequisites: begin libfox/1.1.0
+ trace: collect_build: postpone failure for dependent libfox unsatisfied with dependency libbar/0.1.0 (>= 1.0.0)
+ trace: collect_build: pick libbar/0.1.0 over libbar/2.1.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/0.1.0 of dependent libfox/1.1.0
+ trace: collect_build_prerequisites: end libfox/1.1.0
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: pkg_build: try to replace unsatisfactory dependency libbar/0.1.0 with some other version
+ trace: try_replace_dependency: replace unsatisfactory dependency version libbar/0.1.0 with 1.2.0 by overwriting constraint '?libbar < 3.0.0' -> '?libbar == 1.2.0' on command line
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libbaz/2.0.0
+ trace: collect_build: add libfox/1.1.0
+ trace: collect_build_prerequisites: begin libbaz/2.0.0
+ trace: collect_build_prerequisites: rep-postpone dependent libbaz/2.0.0 due to dependency libbar < 2.1.0 and user-specified constraint == 1.2.0
+ trace: collect_build_prerequisites: postpone libbaz/2.0.0
+ trace: collect_build_prerequisites: begin libfox/1.1.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/1.2.0 of dependent libfox/1.1.0
+ trace: collect_build_prerequisites: begin libbar/1.2.0
+ trace: collect_build_prerequisites: end libbar/1.2.0
+ trace: collect_build_prerequisites: end libfox/1.1.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (0): collect rep-postponed libbaz/2.0.0
+ trace: collect_build_prerequisites: resume libbaz/2.0.0
+ trace: collect_build: pick libbar/1.2.0 over libbar/0.1.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/1.2.0 of dependent libbaz/2.0.0
+ trace: collect_build_prerequisites: end libbaz/2.0.0
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ new libbar/1.2.0 (required by libbaz, libfox)
+ new libbaz/2.0.0
+ new libfox/1.1.0
+ trace: execute_plan: simulate: no
+ %.*
+ EOE
+
+ $pkg_status -ar >>EOO;
+ libbar configured !1.2.0 available 2.1.0
+ !libbaz configured 2.0.0
+ libbar configured !1.2.0 available 2.1.0
+ !libfox configured !1.1.0 available 2.0.0
+ libbar configured !1.2.0 available 2.1.0
+ EOO
+
+ $pkg_drop libfox libbaz libbar
+ }
+
+ : to-hold
+ :
+ : Similar to the dependency test, but the dependency is specified as
+ : build-to-hold.
+ :
+ {
+ $clone_cfg;
+
+ $* libbaz libfox/1.1.0 libbar 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libbaz/2.0.0
+ trace: collect_build: add libfox/1.1.0
+ trace: collect_build: add libbar/2.1.0
+ trace: collect_build_prerequisites: begin libbaz/2.0.0
+ trace: collect_build: pick libbar/0.1.0 over libbar/2.1.0
+ trace: collect_build: libbar/2.1.0 package version needs to be replaced with libbar/0.1.0
+ trace: pkg_build: collection failed due to package version replacement, retry from scratch
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libbaz/2.0.0
+ trace: collect_build: add libfox/1.1.0
+ trace: collect_build: apply version replacement for libbar/2.1.0
+ trace: collect_build: replacement: libbar/0.1.0
+ trace: collect_build: add libbar/0.1.0
+ trace: collect_build_prerequisites: begin libbaz/2.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/0.1.0 of dependent libbaz/2.0.0
+ trace: collect_build_prerequisites: begin libbar/0.1.0
+ trace: collect_build_prerequisites: end libbar/0.1.0
+ trace: collect_build_prerequisites: end libbaz/2.0.0
+ trace: collect_build_prerequisites: begin libfox/1.1.0
+ trace: collect_build: postpone failure for dependent libfox unsatisfied with dependency libbar/0.1.0 (>= 1.0.0)
+ trace: collect_build: pick libbar/0.1.0 over libbar/2.1.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/0.1.0 of dependent libfox/1.1.0
+ trace: collect_build_prerequisites: end libfox/1.1.0
+ trace: collect_build_prerequisites: resume libbar/0.1.0
+ trace: collect_build_prerequisites: end libbar/0.1.0
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: pkg_build: try to replace unsatisfactory dependency libbar/0.1.0 with some other version
+ trace: try_replace_dependency: replace unsatisfactory dependency version libbar/0.1.0 with 1.2.0 by adding constraint 'libbar' -> 'libbar == 1.2.0' on command line
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libbaz/2.0.0
+ trace: collect_build: add libfox/1.1.0
+ trace: collect_build: add libbar/1.2.0
+ trace: collect_build_prerequisites: begin libbaz/2.0.0
+ trace: collect_build: pick libbar/1.2.0 over libbar/0.1.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/1.2.0 of dependent libbaz/2.0.0
+ trace: collect_build_prerequisites: begin libbar/1.2.0
+ trace: collect_build_prerequisites: end libbar/1.2.0
+ trace: collect_build_prerequisites: end libbaz/2.0.0
+ trace: collect_build_prerequisites: begin libfox/1.1.0
+ trace: collect_build: pick libbar/1.2.0 over libbar/2.1.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/1.2.0 of dependent libfox/1.1.0
+ trace: collect_build_prerequisites: end libfox/1.1.0
+ trace: collect_build_prerequisites: resume libbar/1.2.0
+ trace: collect_build_prerequisites: end libbar/1.2.0
+ trace: execute_plan: simulate: yes
+ %.*
+ new libbar/1.2.0
+ new libbaz/2.0.0
+ new libfox/1.1.0
+ trace: execute_plan: simulate: no
+ %.*
+ EOE
+
+ $pkg_status -ar >>EOO;
+ !libbar configured 1.2.0 available 2.1.0
+ !libbaz configured 2.0.0
+ !libbar configured 1.2.0 available 2.1.0
+ !libfox configured !1.1.0 available 2.0.0
+ !libbar configured 1.2.0 available 2.1.0
+ EOO
+
+ $pkg_drop libfox libbaz libbar
+ }
+
+ : to-hold-constr
+ :
+ : Similar to the above, but also specify the version constraint.
+ :
+ {
+ $clone_cfg;
+
+ $* libbaz libfox/1.1.0 'libbar < 3.0.0' 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libbaz/2.0.0
+ trace: collect_build: add libfox/1.1.0
+ trace: collect_build: add libbar/2.1.0
+ trace: collect_build_prerequisites: begin libbaz/2.0.0
+ trace: collect_build: pick libbar/0.1.0 over libbar/2.1.0
+ trace: collect_build: libbar/2.1.0 package version needs to be replaced with libbar/0.1.0
+ trace: pkg_build: collection failed due to package version replacement, retry from scratch
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libbaz/2.0.0
+ trace: collect_build: add libfox/1.1.0
+ trace: collect_build: apply version replacement for libbar/2.1.0
+ trace: collect_build: replacement: libbar/0.1.0
+ trace: collect_build: add libbar/0.1.0
+ trace: collect_build_prerequisites: begin libbaz/2.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/0.1.0 of dependent libbaz/2.0.0
+ trace: collect_build_prerequisites: begin libbar/0.1.0
+ trace: collect_build_prerequisites: end libbar/0.1.0
+ trace: collect_build_prerequisites: end libbaz/2.0.0
+ trace: collect_build_prerequisites: begin libfox/1.1.0
+ trace: collect_build: postpone failure for dependent libfox unsatisfied with dependency libbar/0.1.0 (>= 1.0.0)
+ trace: collect_build: pick libbar/0.1.0 over libbar/2.1.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/0.1.0 of dependent libfox/1.1.0
+ trace: collect_build_prerequisites: end libfox/1.1.0
+ trace: collect_build_prerequisites: resume libbar/0.1.0
+ trace: collect_build_prerequisites: end libbar/0.1.0
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: pkg_build: try to replace unsatisfactory dependency libbar/0.1.0 with some other version
+ trace: try_replace_dependency: replace unsatisfactory dependency version libbar/0.1.0 with 1.2.0 by overwriting constraint 'libbar < 3.0.0' -> 'libbar == 1.2.0' on command line
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libbaz/2.0.0
+ trace: collect_build: add libfox/1.1.0
+ trace: collect_build: add libbar/1.2.0
+ trace: collect_build_prerequisites: begin libbaz/2.0.0
+ trace: collect_build: pick libbar/1.2.0 over libbar/0.1.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/1.2.0 of dependent libbaz/2.0.0
+ trace: collect_build_prerequisites: begin libbar/1.2.0
+ trace: collect_build_prerequisites: end libbar/1.2.0
+ trace: collect_build_prerequisites: end libbaz/2.0.0
+ trace: collect_build_prerequisites: begin libfox/1.1.0
+ trace: collect_build: pick libbar/1.2.0 over libbar/2.1.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/1.2.0 of dependent libfox/1.1.0
+ trace: collect_build_prerequisites: end libfox/1.1.0
+ trace: collect_build_prerequisites: resume libbar/1.2.0
+ trace: collect_build_prerequisites: end libbar/1.2.0
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: execute_plan: simulate: no
+ %.*
+ EOE
+
+ $pkg_status -ar >>EOO;
+ !libbar configured !1.2.0 available 2.1.0
+ !libbaz configured 2.0.0
+ !libbar configured !1.2.0 available 2.1.0
+ !libfox configured !1.1.0 available 2.0.0
+ !libbar configured !1.2.0 available 2.1.0
+ EOO
+
+ $pkg_drop libfox libbaz libbar
+ }
+
+ : configured
+ :
+ : Similar to replace-dependency but the dependency is already configured
+ : as built-to-hold.
+ :
+ {
+ $clone_cfg;
+
+ $* libbar 2>!;
+
+ $* libfox/1.1.0 libbaz 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libfox/1.1.0
+ trace: collect_build: add libbaz/2.0.0
+ trace: collect_build_prerequisites: begin libfox/1.1.0
+ trace: collect_build: add libbar/2.1.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/2.1.0 of dependent libfox/1.1.0
+ trace: collect_build_prerequisites: skip configured libbar/2.1.0
+ trace: collect_build_prerequisites: end libfox/1.1.0
+ trace: collect_build_prerequisites: begin libbaz/2.0.0
+ trace: collect_build: postpone failure for dependent libbaz unsatisfied with dependency libbar/2.1.0 (< 2.1.0)
+ trace: collect_build: pick libbar/2.1.0 over libbar/0.1.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/2.1.0 of dependent libbaz/2.0.0
+ trace: collect_build_prerequisites: end libbaz/2.0.0
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: pkg_build: try to replace unsatisfactory dependency libbar/2.1.0 with some other version
+ warning: package libbaz/2.0.0 dependency on (libbar < 2.1.0) is forcing downgrade of libbar/2.1.0 to 1.2.0
+ trace: try_replace_dependency: replace unsatisfactory dependency version libbar/2.1.0 with 1.2.0 by adding package spec 'libbar == 1.2.0' to command line
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libfox/1.1.0
+ trace: collect_build: add libbaz/2.0.0
+ trace: collect_build: add libbar/1.2.0
+ trace: collect_build_prerequisites: begin libfox/1.1.0
+ trace: collect_build: pick libbar/1.2.0 over libbar/2.1.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/1.2.0 of dependent libfox/1.1.0
+ trace: collect_build_prerequisites: begin libbar/1.2.0
+ trace: collect_build_prerequisites: end libbar/1.2.0
+ trace: collect_build_prerequisites: end libfox/1.1.0
+ trace: collect_build_prerequisites: begin libbaz/2.0.0
+ trace: collect_build: pick libbar/1.2.0 over libbar/0.1.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/1.2.0 of dependent libbaz/2.0.0
+ trace: collect_build_prerequisites: end libbaz/2.0.0
+ trace: collect_build_prerequisites: resume libbar/1.2.0
+ trace: collect_build_prerequisites: end libbar/1.2.0
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: evaluate_dependency: libbox/2.0.0: unused
+ trace: pkg_build: refine package collection/plan execution
+ trace: collect_drop: add libbox
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: evaluate_dependency: libbax/1.0.0: unused
+ trace: pkg_build: refine package collection/plan execution
+ trace: collect_drop: overwrite libbox
+ trace: collect_drop: add libbax
+ trace: execute_plan: simulate: yes
+ %.*
+ drop libbax/1.0.0 (unused)
+ drop libbox/2.0.0 (unused)
+ downgrade libbar/1.2.0
+ new libfox/1.1.0
+ new libbaz/2.0.0
+ trace: execute_plan: simulate: no
+ %.*
+ EOE
+
+ $pkg_status -ar >>EOO;
+ !libbar configured 1.2.0 available 2.1.0
+ !libfox configured !1.1.0 available 2.0.0
+ !libbar configured 1.2.0 available 2.1.0
+ !libbaz configured 2.0.0
+ !libbar configured 1.2.0 available 2.1.0
+ EOO
+
+ $pkg_drop libfox libbaz libbar
+ }
+
+ : unsatisfied-dependent
+ :
+ : Test that not being able to upgrade a dependency to a later version
+ : which doesn't satisfy an existing dependent, we don't upgrade the
+ : dependency.
+ :
+ {
+ $clone_cfg;
+
+ $* libfoo 2>!;
+
+ $pkg_status -ar >>EOO;
+ libbar configured 1.2.0 available 2.1.0
+ !libfoo configured 2.0.0
+ libbar configured 1.2.0 available 2.1.0
+ EOO
+
+ $* libbar 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libbar/2.1.0
+ trace: collect_build_prerequisites: skip unsatisfied existing dependent libfoo of dependency libbar/2.1.0 due to constraint (libbar == 1.2.0)
+ trace: collect_build_prerequisites: begin libbar/2.1.0
+ trace: collect_build: add libbox/2.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbox/2.0.0 of dependent libbar/2.1.0
+ trace: collect_build_prerequisites: begin libbox/2.0.0
+ trace: collect_build: add libbax/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbax/1.0.0 of dependent libbox/2.0.0
+ trace: collect_build_prerequisites: begin libbax/1.0.0
+ trace: collect_build_prerequisites: end libbax/1.0.0
+ trace: collect_build_prerequisites: end libbox/2.0.0
+ trace: collect_build_prerequisites: end libbar/2.1.0
+ trace: collect_dependents: postpone failure for existing dependent libfoo unsatisfied with dependency libbar/2.1.0 (== 1.2.0)
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: pkg_build: try to replace unsatisfactory dependency libbar/2.1.0 with some other version
+ trace: try_replace_dependency: replace unsatisfactory dependency version libbar/2.1.0 with 1.2.0 by adding constraint 'libbar' -> 'libbar == 1.2.0' on command line
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libbar/1.2.0
+ trace: collect_build_prerequisites: skip configured libbar/1.2.0
+ trace: execute_plan: simulate: yes
+ trace: execute_plan: simulate: no
+ %.*
+ EOE
+
+ $pkg_drop libfoo libbar
+ }
+ }
+ }
}
: upgrade
@@ -2591,7 +6126,20 @@ test.options += --no-progress
configured libbar/0.0.2
EOE
- clone_cfg = cp --no-cleanup -r ../cfg ./ &cfg/***
+ +$pkg_status -ar >>EOO
+ libfix configured 0.0.1 available 0.0.3
+ !libfoo configured !0.0.1 available 1.0.0
+ libfix configured 0.0.1 available 0.0.3
+ !libbaz configured !0.0.2 available 0.1.0 0.0.4 0.0.3
+ !libfoo configured !0.0.1 available 1.0.0
+ libfix configured 0.0.1 available 0.0.3
+ !libbar configured !0.0.2 available 1.0.0 0.0.3
+ !libbaz configured !0.0.2 available 0.1.0 0.0.4 0.0.3
+ !libfoo configured !0.0.1 available 1.0.0
+ libfix configured 0.0.1 available 0.0.3
+ EOO
+
+ clone_cfg = [cmdline] cp --no-cleanup -r ../cfg ./ &cfg/***
: immediate
:
@@ -2756,8 +6304,20233 @@ test.options += --no-progress
$* libbar --recursive --yes
}
+ : unavailable-masked
+ :
+ : As above but using --mask-repository* instead of rep-remove.
+ :
+ {
+ $clone_cfg;
+
+ $* libbar --mask-repository $rep/t0a --mask-repository $rep/t0b \
+ --mask-repository-uuid "$cfg_uuid=($rep/t0c)" --recursive --yes
+ }
+
-$pkg_drop libbar libbaz libfoo
}
+
+ : alternative
+ :
+ {
+ +$clone_root_cfg && $rep_add $rep/t8a && $rep_fetch
+
+ : multiple-dependencies
+ :
+ {
+ $clone_cfg;
+
+ $* foo --yes 2>>~%EOE%;
+ fetched libbaz/1.1.0
+ unpacked libbaz/1.1.0
+ fetched libbar/1.0.0
+ unpacked libbar/1.0.0
+ fetched foo/1.0.0
+ unpacked foo/1.0.0
+ configured libbaz/1.1.0
+ configured libbar/1.0.0
+ configured foo/1.0.0
+ %info: .+foo-1.0.0.+ is up to date%
+ updated foo/1.0.0
+ EOE
+
+ $pkg_drop foo
+ }
+
+ : multiple-alts
+ :
+ {
+ +$clone_cfg
+
+ test.arguments += --yes --plan 'build plan:'
+
+ : ambiguity
+ :
+ {
+ $clone_cfg;
+
+ $* fox 2>>EOE != 0
+ error: unable to select dependency alternative for package fox/1.0.0
+ info: explicitly specify dependency packages to manually select the alternative
+ info: alternative: libbar
+ info: alternative: libbaz
+ info: while satisfying fox/1.0.0
+ EOE
+ }
+
+ : reuse
+ :
+ {
+ +$clone_cfg
+
+ : specified-dep-build
+ :
+ {
+ $clone_cfg;
+
+ $* fox ?libbaz 2>>~%EOE%;
+ build plan:
+ new libbaz/1.1.0 (required by fox)
+ new fox/1.0.0
+ config.fox.backend=libbaz (set by fox)
+ fetched libbaz/1.1.0
+ unpacked libbaz/1.1.0
+ fetched fox/1.0.0
+ unpacked fox/1.0.0
+ configured libbaz/1.1.0
+ configured fox/1.0.0
+ %info: .+fox-1.0.0.+ is up to date%
+ updated fox/1.0.0
+ EOE
+
+ $pkg_drop fox
+ }
+
+ : hold-build
+ :
+ {
+ $clone_cfg;
+
+ $* fox libbaz 2>>~%EOE%;
+ build plan:
+ new libbaz/1.1.0
+ new fox/1.0.0
+ config.fox.backend=libbaz (set by fox)
+ fetched libbaz/1.1.0
+ unpacked libbaz/1.1.0
+ fetched fox/1.0.0
+ unpacked fox/1.0.0
+ configured libbaz/1.1.0
+ configured fox/1.0.0
+ %info: .+libbaz-1.1.0.+ is up to date%
+ %info: .+fox-1.0.0.+ is up to date%
+ updated libbaz/1.1.0
+ updated fox/1.0.0
+ EOE
+
+ $pkg_drop fox;
+ $pkg_drop libbaz
+ }
+
+ : configured
+ :
+ {
+ $clone_cfg;
+
+ $* libbaz 2>!;
+
+ $* fox 2>>~%EOE%;
+ build plan:
+ new fox/1.0.0
+ config.fox.backend=libbaz (set by fox)
+ fetched fox/1.0.0
+ unpacked fox/1.0.0
+ configured fox/1.0.0
+ %info: .+fox-1.0.0.+ is up to date%
+ updated fox/1.0.0
+ EOE
+
+ $pkg_drop fox;
+ $pkg_drop libbaz
+ }
+
+ : fetched
+ :
+ {
+ $clone_cfg;
+
+ $pkg_fetch libbaz/1.0.0;
+
+ $* fox 2>>~%EOE%;
+ build plan:
+ update libbaz/1.0.0 (required by fox)
+ new fox/1.0.0
+ config.fox.backend=libbaz (set by fox)
+ unpacked libbaz/1.0.0
+ fetched fox/1.0.0
+ unpacked fox/1.0.0
+ configured libbaz/1.0.0
+ configured fox/1.0.0
+ %info: .+fox-1.0.0.+ is up to date%
+ updated fox/1.0.0
+ EOE
+
+ $pkg_drop fox
+ }
+ }
+
+ : postpone
+ :
+ {
+ +$clone_cfg
+
+ : basic
+ :
+ {
+ $clone_cfg;
+
+ $* fox foo 2>>~%EOE%;
+ build plan:
+ new libbaz/1.1.0 (required by foo)
+ new libbar/1.0.0 (required by foo, fox)
+ new fox/1.0.0
+ config.fox.backend=libbar (set by fox)
+ new foo/1.0.0
+ fetched libbaz/1.1.0
+ unpacked libbaz/1.1.0
+ fetched libbar/1.0.0
+ unpacked libbar/1.0.0
+ fetched fox/1.0.0
+ unpacked fox/1.0.0
+ fetched foo/1.0.0
+ unpacked foo/1.0.0
+ configured libbaz/1.1.0
+ configured libbar/1.0.0
+ configured fox/1.0.0
+ configured foo/1.0.0
+ %info: .+fox-1.0.0.+ is up to date%
+ %info: .+foo-1.0.0.+ is up to date%
+ updated fox/1.0.0
+ updated foo/1.0.0
+ EOE
+
+ $pkg_drop fox;
+ $pkg_drop foo
+ }
+
+ : pick-libbaz
+ :
+ {
+ $clone_cfg;
+
+ $* baz fox bar 2>>~%EOE%;
+ build plan:
+ new libbaz/1.1.0 (required by baz)
+ new baz/1.0.0
+ new libbar/1.0.0 (required by bar, fox)
+ new fox/1.0.0
+ config.fox.backend=libbar (set by fox)
+ new bar/1.0.0
+ fetched libbaz/1.1.0
+ unpacked libbaz/1.1.0
+ fetched baz/1.0.0
+ unpacked baz/1.0.0
+ fetched libbar/1.0.0
+ unpacked libbar/1.0.0
+ fetched fox/1.0.0
+ unpacked fox/1.0.0
+ fetched bar/1.0.0
+ unpacked bar/1.0.0
+ configured libbaz/1.1.0
+ configured baz/1.0.0
+ configured libbar/1.0.0
+ configured fox/1.0.0
+ configured bar/1.0.0
+ %info: .+baz-1.0.0.+ is up to date%
+ %info: .+fox-1.0.0.+ is up to date%
+ %info: .+bar-1.0.0.+ is up to date%
+ updated baz/1.0.0
+ updated fox/1.0.0
+ updated bar/1.0.0
+ EOE
+
+ cat cfg/fox-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.fox.backend = libbar
+ %.*
+ EOO
+
+ $pkg_drop fox;
+ $pkg_drop bar;
+ $pkg_drop baz
+ }
+ }
+
+ : reconfigure-dependent
+ :
+ {
+ $clone_cfg;
+
+ $* fox ?libbaz/1.0.0 2>>~%EOE%;
+ build plan:
+ new libbaz/1.0.0 (required by fox)
+ new fox/1.0.0
+ config.fox.backend=libbaz (set by fox)
+ fetched libbaz/1.0.0
+ unpacked libbaz/1.0.0
+ fetched fox/1.0.0
+ unpacked fox/1.0.0
+ configured libbaz/1.0.0
+ configured fox/1.0.0
+ %info: .+fox-1.0.0.+ is up to date%
+ updated fox/1.0.0
+ EOE
+
+ $* ?libbaz 2>>~%EOE%;
+ build plan:
+ upgrade libbaz/1.1.0
+ reconfigure fox (dependent of libbaz)
+ disfigured fox/1.0.0
+ disfigured libbaz/1.0.0
+ fetched libbaz/1.1.0
+ unpacked libbaz/1.1.0
+ configured libbaz/1.1.0
+ configured fox/1.0.0
+ %info: .+fox-1.0.0.+ is up to date%
+ updated fox/1.0.0
+ EOE
+
+ cat cfg/fox-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.fox.backend = libbaz
+ %.*
+ EOO
+
+ $pkg_drop fox
+ }
+
+ : recreate-decision
+ :
+ {
+ +$clone_cfg
+
+ : reevaluate-alts
+ :
+ {
+ +$clone_cfg
+
+ : preserve
+ :
+ : Test that the existing libbox dependency is preserved even though
+ : libbiz is more preferable as a dependency.
+ :
+ {
+ $clone_cfg;
+
+ $* box libbox 2>!;
+
+ $* box +{ config.box.extras=true } ?libbiz 2>>~%EOE%;
+ build plan:
+ reconfigure/update box/1.0.0
+ config.box.extras=true (user configuration)
+ config.box.backend=libbox (set by box)
+ disfigured box/1.0.0
+ configured box/1.0.0
+ %info: .+box-1.0.0.+ is up to date%
+ updated box/1.0.0
+ EOE
+
+ $* box +{ config.box.extras=false } libbiz 2>>~%EOE%;
+ build plan:
+ reconfigure/update box/1.0.0
+ config.box.extras=false (user configuration)
+ config.box.backend=libbox (set by box)
+ new libbiz/1.0.0
+ disfigured box/1.0.0
+ fetched libbiz/1.0.0
+ unpacked libbiz/1.0.0
+ configured box/1.0.0
+ configured libbiz/1.0.0
+ %info: .+box-1.0.0.+ is up to date%
+ %info: .+libbiz-1.0.0.+ is up to date%
+ updated box/1.0.0
+ updated libbiz/1.0.0
+ EOE
+
+ $pkg_status -r box >>EOO;
+ !box configured 1.0.0
+ libbaz configured 1.1.0
+ !libbox configured 1.0.0
+ EOO
+
+ cat cfg/box-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.box.backend = libbox
+ %.*
+ EOO
+
+ $pkg_drop box;
+ $pkg_drop libbox;
+ $pkg_drop libbiz
+ }
+
+ : change-downgraded-dependency
+ :
+ : Test that libbiz is selected as a dependency since the existing
+ : dependency decision cannot be preserved (libbox is downgraded to
+ : 0.1.0 and becomes unsatisfactory for box).
+ :
+ {
+ $clone_cfg;
+
+ $* box libbox 2>!;
+
+ $* box +{ config.box.extras=true } ?libbox/0.1.0 2>>~%EOE%;
+ build plan:
+ drop libbox/1.0.0 (unused)
+ new libbiz/1.0.0 (required by box)
+ reconfigure/update box/1.0.0
+ config.box.extras=true (user configuration)
+ config.box.backend=libbiz (set by box)
+ disfigured box/1.0.0
+ disfigured libbox/1.0.0
+ purged libbox/1.0.0
+ fetched libbiz/1.0.0
+ unpacked libbiz/1.0.0
+ configured libbiz/1.0.0
+ configured box/1.0.0
+ %info: .+box-1.0.0.+ is up to date%
+ updated box/1.0.0
+ EOE
+
+ $pkg_status -r box >>EOO;
+ !box configured 1.0.0
+ libbaz configured 1.1.0
+ libbiz configured 1.0.0
+ EOO
+
+ cat cfg/box-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.box.backend = libbiz
+ %.*
+ EOO
+
+ $pkg_drop box
+ }
+
+ : change-downgraded-hold
+ :
+ : As above but libbox is downgraded to hold.
+ :
+ {
+ $clone_cfg;
+
+ $* box libbox 2>!;
+
+ # While at it, test the reused-only alternative selection mode.
+ # Also test the postponement of the 'unable to satisfy constraints
+ # on package' failure.
+ #
+ $* box +{ config.box.extras=true } libbox/0.1.0 2>>EOE != 0;
+ error: unable to satisfy constraints on package libbox
+ info: command line depends on (libbox == 0.1.0)
+ info: box/1.0.0 depends on (libbox >= 0.1.1)
+ info: available libbox/0.1.0
+ info: available libbox/1.0.0
+ info: while satisfying box/1.0.0
+ info: explicitly specify libbox version to manually satisfy both constraints
+ EOE
+
+ $* box +{ config.box.extras=true } libbox/0.1.0 ?libbiz 2>>~%EOE%;
+ build plan:
+ new libbiz/1.0.0 (required by box)
+ downgrade libbox/0.1.0
+ reconfigure/update box/1.0.0
+ config.box.extras=true (user configuration)
+ config.box.backend=libbiz (set by box)
+ disfigured box/1.0.0
+ disfigured libbox/1.0.0
+ fetched libbiz/1.0.0
+ unpacked libbiz/1.0.0
+ fetched libbox/0.1.0
+ unpacked libbox/0.1.0
+ configured libbiz/1.0.0
+ configured libbox/0.1.0
+ configured box/1.0.0
+ %info: .+libbox-0.1.0.+ is up to date%
+ %info: .+box-1.0.0.+ is up to date%
+ updated libbox/0.1.0
+ updated box/1.0.0
+ EOE
+
+ $pkg_status -r box >>EOO;
+ !box configured 1.0.0
+ libbaz configured 1.1.0
+ libbiz configured 1.0.0
+ EOO
+
+ cat cfg/box-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.box.backend = libbiz
+ %.*
+ EOO
+
+ $pkg_drop box;
+ $pkg_drop libbox
+ }
+
+ : postpone-unable-satisfy
+ :
+ : Similar to the above, but this time the postponement of the
+ : 'unable to satisfy constraints on package' failure ends up with
+ : downgrading of the unsatisfied dependent (bax).
+ :
+ {
+ $clone_cfg;
+
+ $* bax libbox 2>!;
+
+ $* bax +{ config.bax.extras=true } libbox/0.1.0 bix 2>>~%EOE%;
+ build plan:
+ downgrade libbox/0.1.0
+ downgrade bax/0.1.0
+ config.bax.extras=true (user configuration)
+ config.bax.backend=libbox (set by bax)
+ new bix/1.0.0
+ disfigured bax/1.0.0
+ disfigured libbox/1.0.0
+ fetched libbox/0.1.0
+ unpacked libbox/0.1.0
+ fetched bax/0.1.0
+ unpacked bax/0.1.0
+ fetched bix/1.0.0
+ unpacked bix/1.0.0
+ configured libbox/0.1.0
+ configured bax/0.1.0
+ configured bix/1.0.0
+ %info: .+libbox-0.1.0.+ is up to date%
+ %info: .+bax-0.1.0.+ is up to date%
+ %info: .+bix-1.0.0.+ is up to date%
+ updated libbox/0.1.0
+ updated bax/0.1.0
+ updated bix/1.0.0
+ EOE
+
+ $pkg_drop bix;
+ $pkg_drop bax;
+ $pkg_drop libbox
+ }
+
+ : postpone-unable-satisfy-dep
+ :
+ : Similar to the above, but the failure postponement ends up with
+ : downgrading on the next dependency refinement iteration.
+ :
+ {
+ $clone_cfg;
+
+ $* bax libbox bux ?bix/0.1.0 2>!;
+
+ $* bax +{ config.bax.extras=true } libbox/0.1.0 ?bix 2>>~%EOE%;
+ build plan:
+ downgrade libbox/0.1.0
+ downgrade bax/0.1.0
+ config.bax.extras=true (user configuration)
+ config.bax.backend=libbox (set by bax)
+ upgrade bix/1.0.0
+ reconfigure bux (dependent of bix)
+ disfigured bux/1.0.0
+ disfigured bix/0.1.0
+ disfigured bax/1.0.0
+ disfigured libbox/1.0.0
+ fetched libbox/0.1.0
+ unpacked libbox/0.1.0
+ fetched bax/0.1.0
+ unpacked bax/0.1.0
+ fetched bix/1.0.0
+ unpacked bix/1.0.0
+ configured libbox/0.1.0
+ configured bax/0.1.0
+ configured bix/1.0.0
+ configured bux/1.0.0
+ %info: .+libbox-0.1.0.+ is up to date%
+ %info: .+bax-0.1.0.+ is up to date%
+ %info: .+bux-1.0.0.+ is up to date%
+ updated libbox/0.1.0
+ updated bax/0.1.0
+ updated bux/1.0.0
+ EOE
+
+ $pkg_drop bux;
+ $pkg_drop bax;
+ $pkg_drop libbox
+ }
+ }
+
+ : reconfigure
+ :
+ {
+ $clone_cfg;
+
+ $* box ?libbiz/0.1.0 2>>~%EOE%;
+ build plan:
+ new libbox/1.0.0 (required by box)
+ new libbaz/1.1.0 (required by box)
+ new box/1.0.0
+ config.box.backend=libbox (set by box)
+ fetched libbox/1.0.0
+ unpacked libbox/1.0.0
+ fetched libbaz/1.1.0
+ unpacked libbaz/1.1.0
+ fetched box/1.0.0
+ unpacked box/1.0.0
+ configured libbox/1.0.0
+ configured libbaz/1.1.0
+ configured box/1.0.0
+ %info: .+box-1.0.0.+ is up to date%
+ updated box/1.0.0
+ EOE
+
+ $pkg_status -r >>EOO;
+ !box configured 1.0.0
+ libbaz configured 1.1.0
+ libbox configured 1.0.0
+ EOO
+
+ cat cfg/box-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.box.backend = libbox
+ %.*
+ EOO
+
+ # Downgrade libbaz to reconfigure box and make sure we still keep
+ # libbox as a prerequisite of box.
+ #
+ $* libbiz ?libbaz/1.0.0 2>>~%EOE%;
+ build plan:
+ new libbiz/1.0.0
+ downgrade libbaz/1.0.0
+ reconfigure box (dependent of libbaz)
+ disfigured box/1.0.0
+ disfigured libbaz/1.1.0
+ fetched libbiz/1.0.0
+ unpacked libbiz/1.0.0
+ fetched libbaz/1.0.0
+ unpacked libbaz/1.0.0
+ configured libbiz/1.0.0
+ configured libbaz/1.0.0
+ configured box/1.0.0
+ %info: .+libbiz-1.0.0.+ is up to date%
+ %info: .+box-1.0.0.+ is up to date%
+ updated libbiz/1.0.0
+ updated box/1.0.0
+ EOE
+
+ $pkg_status -r >>EOO;
+ !box configured 1.0.0
+ libbaz configured !1.0.0 available 1.1.0
+ libbox configured 1.0.0
+ !libbiz configured 1.0.0
+ EOO
+
+ cat cfg/box-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.box.backend = libbox
+ %.*
+ EOO
+
+ # Make sure the decision is hold for downgraded dependency either.
+ #
+ $* ?libbox/0.1.1 2>>~%EOE%;
+ build plan:
+ downgrade libbox/0.1.1
+ reconfigure box (dependent of libbox)
+ disfigured box/1.0.0
+ disfigured libbox/1.0.0
+ fetched libbox/0.1.1
+ unpacked libbox/0.1.1
+ configured libbox/0.1.1
+ configured box/1.0.0
+ %info: .+box-1.0.0.+ is up to date%
+ updated box/1.0.0
+ EOE
+
+ $pkg_status -r >>EOO;
+ !box configured 1.0.0
+ libbaz configured !1.0.0 available 1.1.0
+ libbox configured !0.1.1 available 1.0.0
+ !libbiz configured 1.0.0
+ EOO
+
+ cat cfg/box-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.box.backend = libbox
+ %.*
+ EOO
+
+ $pkg_drop box;
+ $pkg_drop libbiz
+ }
+ }
+ }
+
+ : enable-condition
+ :
+ {
+ +$cfg_create cxx $config_cxx -d cfg &cfg/***
+ +$rep_add $rep/t8a && $rep_fetch
+
+ test.arguments += --yes
+
+ backend = ($posix ? 'libbaz' : 'libbar')
+ backend_dep = ($posix ? 'libbaz/1.1.0' : 'libbar/1.0.0')
+ backend_configured = ($posix ? 'libbaz configured 1.1.0' : 'libbar configured 1.0.0')
+
+ : cxx-target
+ :
+ {
+ $clone_cfg;
+
+ $* fax 2>>~"%EOE%";
+ fetched $backend_dep
+ unpacked $backend_dep
+ fetched fax/1.0.0
+ unpacked fax/1.0.0
+ configured $backend_dep
+ configured fax/1.0.0
+ %info: .+fax-1.0.0.+ is up to date%
+ updated fax/1.0.0
+ EOE
+
+ $pkg_status -r >>"EOO";
+ !fax configured 1.0.0
+ $backend_configured
+ EOO
+
+ cat cfg/fax-1.0.0/build/config.build >>~"%EOO%";
+ %.*
+ config.fax.backend = $backend
+ config.fax.libbiz = false
+ %.*
+ EOO
+
+ $pkg_drop fax
+ }
+
+ : config-var
+ :
+ {
+ $clone_cfg;
+
+ $* config.fax.libbiz=true -- fax 2>>~"%EOE%";
+ fetched $backend_dep
+ unpacked $backend_dep
+ fetched libbiz/1.0.0
+ unpacked libbiz/1.0.0
+ fetched fax/1.0.0
+ unpacked fax/1.0.0
+ configured $backend_dep
+ configured libbiz/1.0.0
+ configured fax/1.0.0
+ %info: .+fax-1.0.0.+ is up to date%
+ updated fax/1.0.0
+ EOE
+
+ $pkg_status -r >>"EOO";
+ !fax configured 1.0.0
+ $backend_configured
+ libbiz configured 1.0.0
+ EOO
+
+ cat cfg/fax-1.0.0/build/config.build >>~"%EOO%";
+ %.*
+ config.fax.backend = $backend
+ config.fax.libbiz = true
+ %.*
+ EOO
+
+ $pkg_drop fax
+ }
+
+ : enable-indirect-dependency
+ :
+ {
+ $clone_cfg;
+
+ test.arguments += --plan "";
+
+ $* dax 2>>~%EOE%;
+ new libbaz/1.1.0 (required by dax)
+ new dax/1.0.0
+ fetched libbaz/1.1.0
+ unpacked libbaz/1.1.0
+ fetched dax/1.0.0
+ unpacked dax/1.0.0
+ configured libbaz/1.1.0
+ configured dax/1.0.0
+ %info: .+dax-1.0.0.+ is up to date%
+ updated dax/1.0.0
+ EOE
+
+ $pkg_status -r >>EOO;
+ !dax configured 1.0.0
+ libbaz configured 1.1.0
+ EOO
+
+ $* dix 2>>~%EOE%;
+ new libbar/1.0.0 (required by dax)
+ reconfigure/update dax/1.0.0 (required by dix)
+ config.dax.extras=true (set by dix)
+ new dix/1.0.0
+ disfigured dax/1.0.0
+ fetched libbar/1.0.0
+ unpacked libbar/1.0.0
+ fetched dix/1.0.0
+ unpacked dix/1.0.0
+ configured libbar/1.0.0
+ configured dax/1.0.0
+ configured dix/1.0.0
+ %info: .+dix-1.0.0.+ is up to date%
+ updated dix/1.0.0
+ EOE
+
+ $pkg_status -r >>EOO;
+ !dax configured 1.0.0
+ libbar configured 1.0.0
+ libbaz configured 1.1.0
+ !dix configured 1.0.0
+ !dax configured 1.0.0
+ libbar configured 1.0.0
+ libbaz configured 1.1.0
+ EOO
+
+ $pkg_drop dax dix
+ }
+
+ : repoint-enable-indirect-dependency
+ :
+ {
+ $clone_cfg;
+
+ test.arguments += --plan "";
+
+ $* dax dux ?dix/0.1.0 2>>~%EOE%;
+ new libbaz/1.1.0 (required by dax)
+ new dax/1.0.0
+ new dix/0.1.0 (required by dux)
+ new dux/1.0.0
+ fetched libbaz/1.1.0
+ unpacked libbaz/1.1.0
+ fetched dax/1.0.0
+ unpacked dax/1.0.0
+ fetched dix/0.1.0
+ unpacked dix/0.1.0
+ fetched dux/1.0.0
+ unpacked dux/1.0.0
+ configured libbaz/1.1.0
+ configured dax/1.0.0
+ configured dix/0.1.0
+ configured dux/1.0.0
+ %info: .+dax-1.0.0.+ is up to date%
+ %info: .+dux-1.0.0.+ is up to date%
+ updated dax/1.0.0
+ updated dux/1.0.0
+ EOE
+
+ $pkg_status -r >>EOO;
+ !dax configured 1.0.0
+ libbaz configured 1.1.0
+ !dux configured 1.0.0
+ dix configured !0.1.0 available 1.0.0
+ EOO
+
+ $cfg_create -d cfg2 --name cfg2 &cfg2/***;
+ $cfg_link -d cfg cfg2;
+ $rep_add -d cfg2 $rep/t8a && $rep_fetch -d cfg2;
+
+ $* libbaz +{ --config-name cfg2 } ?dix 2>>~%EOE%;
+ % new libbaz/1.1.0 \[cfg2.\]%
+ drop libbaz/1.1.0 (unused)
+ new libbar/1.0.0 (required by dax)
+ reconfigure/update dax/1.0.0 (required by dix)
+ config.dax.extras=true (set by dix)
+ upgrade dix/1.0.0
+ reconfigure dux (dependent of dix)
+ disfigured dux/1.0.0
+ disfigured dix/0.1.0
+ disfigured dax/1.0.0
+ disfigured libbaz/1.1.0
+ %fetched libbaz/1.1.0 \[cfg2.\]%
+ %unpacked libbaz/1.1.0 \[cfg2.\]%
+ purged libbaz/1.1.0
+ fetched libbar/1.0.0
+ unpacked libbar/1.0.0
+ fetched dix/1.0.0
+ unpacked dix/1.0.0
+ %configured libbaz/1.1.0 \[cfg2.\]%
+ configured libbar/1.0.0
+ configured dax/1.0.0
+ configured dix/1.0.0
+ configured dux/1.0.0
+ %info: .+libbaz-1.1.0.+ is up to date%
+ %info: .+dux-1.0.0.+ is up to date%
+ %updated libbaz/1.1.0 \[cfg2.\]%
+ updated dux/1.0.0
+ EOE
+
+ $pkg_status -r >>/EOO;
+ !dax configured 1.0.0
+ libbar configured 1.0.0
+ !libbaz [cfg2/] configured 1.1.0
+ !dux configured 1.0.0
+ dix configured 1.0.0
+ !dax configured 1.0.0
+ libbar configured 1.0.0
+ !libbaz [cfg2/] configured 1.1.0
+ EOO
+
+ $pkg_drop dux;
+ $pkg_drop dax
+ }
+
+ : reevaluate-alternatives
+ :
+ {
+ +$clone_cfg
+
+ : add-dependency
+ :
+ {
+ $clone_cfg;
+
+ $* fax 2>!;
+
+ $pkg_status -r >>"EOO";
+ !fax configured 1.0.0
+ $backend_configured
+ EOO
+
+ $* config.fax.libbiz=true -- fax 2>>~%EOE%;
+ disfigured fax/1.0.0
+ fetched libbiz/1.0.0
+ unpacked libbiz/1.0.0
+ configured libbiz/1.0.0
+ configured fax/1.0.0
+ %info: .+fax-1.0.0.+ is up to date%
+ updated fax/1.0.0
+ EOE
+
+ $pkg_status -r >>"EOO";
+ !fax configured 1.0.0
+ $backend_configured
+ libbiz configured 1.0.0
+ EOO
+
+ # While at it, tests that the dependency is properly removed.
+ #
+ $* config.fax.libbiz=false -- fax 2>>~%EOE%;
+ disfigured fax/1.0.0
+ disfigured libbiz/1.0.0
+ purged libbiz/1.0.0
+ configured fax/1.0.0
+ %info: .+fax-1.0.0.+ is up to date%
+ updated fax/1.0.0
+ EOE
+
+ $pkg_status -r >>"EOO";
+ !fax configured 1.0.0
+ $backend_configured
+ EOO
+
+ $pkg_drop fax
+ }
+
+ : downgrade-dependency
+ :
+ {
+ $clone_cfg;
+
+ $* fux 2>!;
+
+ $pkg_status -r >>"EOO";
+ !fux configured 1.0.0
+ libbiz configured 1.0.0
+ EOO
+
+ $* config.fux.libbiz_old=true -- fux 2>>~%EOE%;
+ disfigured fux/1.0.0
+ disfigured libbiz/1.0.0
+ fetched libbiz/0.1.0
+ unpacked libbiz/0.1.0
+ configured libbiz/0.1.0
+ configured fux/1.0.0
+ %info: .+fux-1.0.0.+ is up to date%
+ updated fux/1.0.0
+ EOE
+
+ $pkg_status -r >>"EOO";
+ !fux configured 1.0.0
+ libbiz configured 0.1.0 available 1.0.0
+ EOO
+
+ # While at it, test that the dependency is properly upgraded.
+ #
+ # Note that, unless requested, libbiz is not upgraded, since 0.1.0
+ # is still good for the selected alternative.
+ #
+ $* config.fux.libbiz_old=false -- fux 2>>~%EOE%;
+ disfigured fux/1.0.0
+ configured fux/1.0.0
+ %info: .+fux-1.0.0.+ is up to date%
+ updated fux/1.0.0
+ EOE
+
+ $pkg_status -r >>"EOO";
+ !fux configured 1.0.0
+ libbiz configured 0.1.0 available 1.0.0
+ EOO
+
+ $* fux +{ config.fux.libbiz_old=false } ?libbiz 2>>~%EOE%;
+ disfigured fux/1.0.0
+ disfigured libbiz/0.1.0
+ fetched libbiz/1.0.0
+ unpacked libbiz/1.0.0
+ configured libbiz/1.0.0
+ configured fux/1.0.0
+ %info: .+fux-1.0.0.+ is up to date%
+ updated fux/1.0.0
+ EOE
+
+ $pkg_status -r >>"EOO";
+ !fux configured 1.0.0
+ libbiz configured 1.0.0
+ EOO
+
+ $pkg_drop fux
+ }
+ }
+
+ : external-package
+ :
+ if! $remote
+ {
+ +$clone_cfg
+
+ +$tar -xzf $src/t8a/fax-1.0.0.tar.gz &fax-1.0.0/***
+ +mv fax-1.0.0 fax
+
+ : change-manifest
+ :
+ {
+ $clone_cfg;
+ cp -rp ../fax/ ./;
+
+ $* config.fax.libbiz=true -- fax/ 2>>~"%EOE%";
+ fetched $backend_dep
+ unpacked $backend_dep
+ fetched libbiz/1.0.0
+ unpacked libbiz/1.0.0
+ using fax/1.0.0 \(external\)
+ configured $backend_dep
+ configured libbiz/1.0.0
+ configured fax/1.0.0
+ %info: .+fax.+ is up to date%
+ updated fax/1.0.0
+ EOE
+
+ $pkg_status -r >>"EOO";
+ !fax configured !1.0.0
+ $backend_configured
+ libbiz configured 1.0.0
+ EOO
+
+ cat cfg/fax/build/config.build >>~"%EOO%";
+ %.*
+ config.fax.backend = $backend
+ config.fax.libbiz = true
+ %.*
+ EOO
+
+ # Upgrade the external package after changing its manifest and make
+ # sure the configuration is preserved.
+ #
+ echo '' >+fax/manifest;
+
+ $* fax/ 2>>~%EOE%;
+ disfigured fax/1.0.0
+ using fax/1.0.0#1 (external)
+ configured fax/1.0.0#1
+ %info: .+fax.+ is up to date%
+ updated fax/1.0.0#1
+ EOE
+
+ $pkg_status -r >>"EOO";
+ !fax configured !1.0.0#1
+ $backend_configured
+ libbiz configured 1.0.0
+ EOO
+
+ cat cfg/fax/build/config.build >>~"%EOO%";
+ %.*
+ config.fax.backend = $backend
+ config.fax.libbiz = true
+ %.*
+ EOO
+
+ # While at it, test that it's ok for out root directory to not
+ # exist.
+ #
+ # Note that this testing is only meaningful when we replace an
+ # external package with another external (see
+ # build_package::external() for details).
+ #
+ echo '' >+fax/manifest;
+
+ rm -r cfg/fax/;
+
+ $* fax/ 2>>~%EOE%;
+ disfigured fax/1.0.0#1
+ disfigured libbiz/1.0.0
+ purged libbiz/1.0.0
+ using fax/1.0.0#2 (external)
+ configured fax/1.0.0#2
+ %info: .+fax.+ is up to date%
+ updated fax/1.0.0#2
+ EOE
+
+ $pkg_status -r >>"EOO";
+ !fax configured !1.0.0#2
+ $backend_configured
+ EOO
+
+ cat cfg/fax/build/config.build >>~"%EOO%";
+ %.*
+ config.fax.backend = $backend
+ config.fax.libbiz = false
+ %.*
+ EOO
+
+ # Also tests that the depends value location is printed on the
+ # enable condition evaluation failure for an external package.
+ #
+ sed -i -e 's/(depends: libbiz).+/\1 ? (config.fax.libbiz = true)/' fax/manifest;
+
+ $* fax/ 2>>~%EOE% != 0;
+ <depends-enable-clause>:1: error: invalid bool value: multiple names
+ info: enable condition: (config.fax.libbiz = true)
+ % fax.manifest:10:10: info: in depends manifest value defined here%
+ info: while satisfying fax/1.0.0#3
+ EOE
+
+ $pkg_drop fax
+ }
+
+ : change-buildfile
+ :
+ {
+ +$clone_cfg
+
+ : package-directory
+ :
+ {
+ $clone_cfg;
+ cp -rp ../../fax/ ./;
+
+ $* fax 2>>~"%EOE%";
+ fetched $backend_dep
+ unpacked $backend_dep
+ fetched fax/1.0.0
+ unpacked fax/1.0.0
+ configured $backend_dep
+ configured fax/1.0.0
+ %info: .+fax-1.0.0.+ is up to date%
+ updated fax/1.0.0
+ EOE
+
+ $pkg_status -r >>"EOO";
+ !fax configured 1.0.0
+ $backend_configured
+ EOO
+
+ # Iteration increment and upgrade after turning a package from the
+ # archive-based repo into an external package.
+ #
+ $* fax/ 2>>~%EOE%;
+ disfigured fax/1.0.0
+ using fax/1.0.0#1 (external)
+ configured fax/1.0.0#1
+ %info: .+dir.fax.+ is up to date%
+ updated fax/1.0.0#1
+ EOE
+
+ $pkg_status -r >>"EOO";
+ !fax configured !1.0.0#1
+ $backend_configured
+ EOO
+
+ # Further upgrade after the package' buildfile is edited.
+ #
+ echo '' >+fax/build/root.build;
+
+ $* fax/ 2>>~%EOE%;
+ disfigured fax/1.0.0#1
+ using fax/1.0.0#2 (external)
+ configured fax/1.0.0#2
+ %info: .+fax.+ is up to date%
+ updated fax/1.0.0#2
+ EOE
+
+ $pkg_status -r >>"EOO";
+ !fax configured !1.0.0#2
+ $backend_configured
+ EOO
+
+ # No upgrade if the buildfile is not edited.
+ #
+ $* fax/ 2>>~%EOE%;
+ disfigured fax/1.0.0#2
+ using fax/1.0.0#2 (external)
+ configured fax/1.0.0#2
+ %info: .+fax.+ is up to date%
+ updated fax/1.0.0#2
+ EOE
+
+ $pkg_status -r >>"EOO";
+ !fax configured !1.0.0#2
+ $backend_configured
+ EOO
+
+ $pkg_drop fax
+ }
+
+ : directory-repo
+ :
+ {
+ $clone_cfg;
+ cp -rp ../../fax/ ./;
+
+ $* fax 2>>~"%EOE%";
+ fetched $backend_dep
+ unpacked $backend_dep
+ fetched fax/1.0.0
+ unpacked fax/1.0.0
+ configured $backend_dep
+ configured fax/1.0.0
+ %info: .+fax-1.0.0.+ is up to date%
+ updated fax/1.0.0
+ EOE
+
+ $pkg_status -r >>"EOO";
+ !fax configured 1.0.0
+ $backend_configured
+ EOO
+
+ # Iteration increment and upgrade after turning a package from the
+ # archive-based repo into an external package.
+ #
+ $rep_add --type dir fax/ && $rep_fetch;
+
+ $* fax 2>>~%EOE%;
+ disfigured fax/1.0.0
+ using fax/1.0.0#1 (external)
+ configured fax/1.0.0#1
+ %info: .+dir.fax.+ is up to date%
+ updated fax/1.0.0#1
+ EOE
+
+ $pkg_status -r >>"EOO";
+ !fax configured 1.0.0#1
+ $backend_configured
+ EOO
+
+ # Further upgrade after the package' buildfile is edited.
+ #
+ echo '' >+fax/build/root.build;
+
+ $rep_fetch;
+
+ $* fax 2>>~%EOE%;
+ disfigured fax/1.0.0#1
+ using fax/1.0.0#2 (external)
+ configured fax/1.0.0#2
+ %info: .+fax.+ is up to date%
+ updated fax/1.0.0#2
+ EOE
+
+ $pkg_status -r >>"EOO";
+ !fax configured 1.0.0#2
+ $backend_configured
+ EOO
+
+ # No upgrade if the buildfile is not edited.
+ #
+ $rep_fetch;
+
+ $* fax 2>>~%EOE%;
+ %info: .+fax.+ is up to date%
+ updated fax/1.0.0#2
+ EOE
+
+ $pkg_status -r >>"EOO";
+ !fax configured 1.0.0#2
+ $backend_configured
+ EOO
+
+ $pkg_drop fax
+ }
+ }
+
+ : reconfigure-reflect-vars
+ :
+ {
+ $clone_cfg;
+ cp -rp ../fax/ ./;
+
+ $* config.fax.libbiz=true -- fax/ 2>>~"%EOE%";
+ fetched $backend_dep
+ unpacked $backend_dep
+ fetched libbiz/1.0.0
+ unpacked libbiz/1.0.0
+ using fax/1.0.0 \(external\)
+ configured $backend_dep
+ configured libbiz/1.0.0
+ configured fax/1.0.0
+ %info: .+fax.+ is up to date%
+ updated fax/1.0.0
+ EOE
+
+ $pkg_status -r >>"EOO";
+ !fax configured !1.0.0
+ $backend_configured
+ libbiz configured 1.0.0
+ EOO
+
+ cat cfg/fax/build/config.build >>~"%EOO%";
+ %.*
+ config.fax.backend = $backend
+ config.fax.libbiz = true
+ %config.fax.extras = '.+'%
+ config.fax.libbox = false
+ EOO
+
+ # While at it, make sure none of the reflect variables are
+ # unexpectedly wiped out on reconfiguration due to the dependency
+ # upgrade.
+ #
+ $* fax/ "?sys:$backend/*" 2>>~"%EOE%";
+ disfigured fax/1.0.0
+ %disfigured $backend/.+%
+ %purged $backend/.+%
+ using fax/1.0.0 \(external\)
+ configured sys:$backend/*
+ configured fax/1.0.0
+ %info: .+fax.+ is up to date%
+ updated fax/1.0.0
+ EOE
+
+ $pkg_status -r >>~"%EOO%";
+ !fax configured !1.0.0
+ % $backend configured,system .+%
+ libbiz configured 1.0.0
+ EOO
+
+ cat cfg/fax/build/config.build >>~"%EOO%";
+ %.*
+ config.fax.backend = $backend
+ config.fax.libbiz = true
+ %config.fax.extras = '.+'%
+ config.fax.libbox = false
+ EOO
+
+ # Now make sure that dependency clauses re-evaluation is properly
+ # reflected in the configuration.
+ #
+ $* config.fax.libbiz=false -- fax/ 2>>~"%EOE%";
+ disfigured fax/1.0.0
+ disfigured libbiz/1.0.0
+ purged libbiz/1.0.0
+ using fax/1.0.0 \(external\)
+ configured fax/1.0.0
+ %info: .+fax.+ is up to date%
+ updated fax/1.0.0
+ EOE
+
+ $pkg_status -r >>~"%EOO%";
+ !fax configured !1.0.0
+ % $backend configured,system .+%
+ EOO
+
+ cat cfg/fax/build/config.build >>~"%EOO%";
+ %.*
+ config.fax.backend = $backend
+ config.fax.libbiz = false
+ config.fax.extras = [null]
+ config.fax.libbox = false
+ EOO
+
+ $pkg_drop fax
+ }
+ }
+
+ : evaluate-reflect-vars
+ :
+ {
+ $clone_cfg;
+
+ $* config.fax.libbox=true config.fax.libbiz=true -- fax 2>!;
+
+ if $posix
+ $pkg_status -r >>EOO
+ !fax configured 1.0.0
+ libbaz configured 1.1.0
+ libbiz configured 1.0.0
+ libbox configured 1.0.0
+ EOO
+ else
+ $pkg_status -r >>EOO
+ !fax configured 1.0.0
+ libbar configured 1.0.0
+ libbiz configured 1.0.0
+ EOO
+ end;
+
+ $pkg_drop fax
+ }
+ }
+
+ : reconfigure-dependent
+ :
+ : Test some cases when a dependent needs to be reconfigured due to an
+ : upgraded dependency.
+ :
+ {
+ +$clone_cfg
+
+ test.arguments += --yes
+
+ : keep-alternative
+ :
+ {
+ $clone_cfg;
+
+ $* tax ?libfoo/1.0.0 2>!;
+
+ $pkg_status -r >>EOO;
+ !tax configured 1.0.0
+ libfoo configured !1.0.0 available 2.0.0
+ EOO
+
+ $* --upgrade --recursive 2>!; # Noop.
+
+ $pkg_status -r >>EOO;
+ !tax configured 1.0.0
+ libfoo configured !1.0.0 available 2.0.0
+ EOO
+
+ $* ?libfoo 2>!; # Noop.
+
+ $pkg_status -r >>EOO;
+ !tax configured 1.0.0
+ libfoo configured !1.0.0 available 2.0.0
+ EOO
+
+ $* ?libfoo/2.0.0 2>>EOE != 0;
+ error: package libfoo doesn't satisfy its dependents
+ info: libfoo/2.0.0 doesn't satisfy tax/1.0.0
+ EOE
+
+ $pkg_status -r >>EOO;
+ !tax configured 1.0.0
+ libfoo configured !1.0.0 available 2.0.0
+ EOO
+
+ $pkg_drop tax
+ }
+
+ : re-evaluate-dependent
+ :
+ {
+ $clone_cfg;
+
+ $* tex ?libfoo/1.0.0 2>!;
+
+ $pkg_status -r >>EOO;
+ !tex configured 1.0.0
+ libfoo configured !1.0.0 available 2.0.0
+ EOO
+
+ cat cfg/tex-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.tex.libfoo_protocol = 1
+ %.*
+ EOO
+
+ # @@ Strangely, if upgrade with -ur instead of ?libfoo, then status
+ # prints 'libfoo configured !2.0.0' instead of
+ # 'libfoo configured 2.0.0'.
+ #
+ $* ?libfoo 2>>~%EOE%;
+ disfigured tex/1.0.0
+ disfigured libfoo/1.0.0
+ fetched libfoo/2.0.0
+ unpacked libfoo/2.0.0
+ configured libfoo/2.0.0
+ configured tex/1.0.0
+ %info: .+tex-1.0.0.+ is up to date%
+ updated tex/1.0.0
+ EOE
+
+ cat cfg/tex-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.tex.libfoo_protocol = 2
+ %.*
+ EOO
+
+ $pkg_status -r >>EOO;
+ !tex configured 1.0.0
+ libfoo configured 2.0.0
+ EOO
+
+ $pkg_drop tex
+ }
+
+ : re-evaluate-reflect
+ :
+ {
+ $clone_cfg;
+
+ # @@ The fact that `$* tix` fails as follows but `$* tix ?libfoo`
+ # doesn't looks confusing:
+ #
+ # error: unable to select dependency alternative for package tix/1.0.0
+ # info: explicitly specify dependency packages to manually select the alternative
+ # info: alternative: libfoo
+ # info: alternative: libfoo
+ # info: while satisfying tix/1.0.0
+ #
+ # Note:
+ #
+ # tix -> libfoo>=2.0.0 reflect{...} | libfoo>=1.0.0 reflect{...}
+ #
+ $* tix ?libfoo/1.0.0 2>!;
+
+ $pkg_status -r >>EOO;
+ !tix configured 1.0.0
+ libfoo configured !1.0.0 available 2.0.0
+ EOO
+
+ cat cfg/tix-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.tix.reflect = 1
+ %.*
+ EOO
+
+ $* ?libfoo 2>>~%EOE%;
+ disfigured tix/1.0.0
+ disfigured libfoo/1.0.0
+ fetched libfoo/2.0.0
+ unpacked libfoo/2.0.0
+ configured libfoo/2.0.0
+ configured tix/1.0.0
+ %info: .+tix-1.0.0.+ is up to date%
+ updated tix/1.0.0
+ EOE
+
+ $pkg_status -r >>EOO;
+ !tix configured 1.0.0
+ libfoo configured 2.0.0
+ EOO
+
+ cat cfg/tix-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.tix.reflect = 2
+ %.*
+ EOO
+
+ $pkg_drop tix
+ }
+
+ : select-alt-with-reflect
+ :
+ {
+ $clone_cfg;
+
+ $* tox ?libfoo/1.0.0 2>!;
+
+ $pkg_status -r >>EOO;
+ !tox configured 1.0.0
+ libfoo configured !1.0.0 available 2.0.0
+ EOO
+
+ cat cfg/tox-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.tox.libfoo_protocol = '1 or 2'
+ %.*
+ EOO
+
+ $* ?libfoo 2>>~%EOE%;
+ disfigured tox/1.0.0
+ disfigured libfoo/1.0.0
+ fetched libfoo/2.0.0
+ unpacked libfoo/2.0.0
+ configured libfoo/2.0.0
+ configured tox/1.0.0
+ %info: .+tox-1.0.0.+ is up to date%
+ updated tox/1.0.0
+ EOE
+
+ $pkg_status -r >>EOO;
+ !tox configured 1.0.0
+ libfoo configured 2.0.0
+ EOO
+
+ cat cfg/tox-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.tox.libfoo_protocol = 2
+ %.*
+ EOO
+
+ $pkg_drop tox
+ }
+
+ : re-evaluate-from
+ :
+ {
+ +$clone_cfg
+
+ : earlier-depends
+ :
+ {
+ $clone_cfg;
+
+ $* tux ?libbox/0.1.0 2>!;
+
+ $pkg_status -r >>EOO;
+ !tux configured 1.0.0
+ libbox configured !0.1.0 available 1.0.0 0.1.1
+ libfoo configured 2.0.0
+ EOO
+
+ cat cfg/libfoo-2.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.libfoo.protocol = 1
+ %.*
+ EOO
+
+ $* ?libbox 2>>~%EOE%;
+ disfigured tux/1.0.0
+ disfigured libbox/0.1.0
+ fetched libbox/1.0.0
+ unpacked libbox/1.0.0
+ configured libbox/1.0.0
+ configured tux/1.0.0
+ %info: .+tux-1.0.0.+ is up to date%
+ updated tux/1.0.0
+ EOE
+
+ $pkg_status -r >>EOO;
+ !tux configured 1.0.0
+ libbox configured 1.0.0
+ libfoo configured 2.0.0
+ EOO
+
+ cat cfg/libfoo-2.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.libfoo.protocol = 1
+ %.*
+ EOO
+
+ $pkg_drop tux
+ }
+
+ : later-depends
+ :
+ {
+ $clone_cfg;
+
+ $* twx ?libbiz/0.1.0 2>!;
+
+ $pkg_status -r >>EOO;
+ !twx configured 1.0.0
+ libbiz configured !0.1.0 available 1.0.0
+ libbox configured 1.0.0
+ libfoo configured 2.0.0
+ EOO
+
+ cat cfg/libfoo-2.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.libfoo.protocol = 1
+ %.*
+ EOO
+
+ $* ?libbiz 2>>~%EOE%;
+ disfigured twx/1.0.0
+ disfigured libbiz/0.1.0
+ fetched libbiz/1.0.0
+ unpacked libbiz/1.0.0
+ configured libbiz/1.0.0
+ configured twx/1.0.0
+ %info: .+twx-1.0.0.+ is up to date%
+ updated twx/1.0.0
+ EOE
+
+ $pkg_status -r >>EOO;
+ !twx configured 1.0.0
+ libbiz configured 1.0.0
+ libbox configured 1.0.0
+ libfoo configured 2.0.0
+ EOO
+
+ cat cfg/libfoo-2.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.libfoo.protocol = 1
+ %.*
+ EOO
+
+ $pkg_drop twx
+ }
+
+ : same-depends
+ :
+ {
+ $clone_cfg;
+
+ $* tvx ?libfoo/1.0.0 2>!;
+
+ $pkg_status -r >>EOO;
+ !tvx configured 1.0.0
+ libfoo configured !1.0.0 available 2.0.0
+ libfox configured 1.0.0
+ EOO
+
+ cat cfg/libfox-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.libfox.level = 1
+ %.*
+ EOO
+
+ $* ?libfoo 2>>~%EOE%;
+ disfigured tvx/1.0.0
+ disfigured libfoo/1.0.0
+ disfigured libfox/1.0.0
+ fetched libfoo/2.0.0
+ unpacked libfoo/2.0.0
+ configured libfox/1.0.0
+ configured libfoo/2.0.0
+ configured tvx/1.0.0
+ %info: .+tvx-1.0.0.+ is up to date%
+ updated tvx/1.0.0
+ EOE
+
+ $pkg_status -r >>EOO;
+ !tvx configured 1.0.0
+ libfoo configured 2.0.0
+ libfox configured 1.0.0
+ EOO
+
+ cat cfg/libfox-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.libfox.level = 2
+ %.*
+ EOO
+
+ $pkg_drop tvx
+ }
+ }
+
+ : change-alternative
+ :
+ {
+ $clone_cfg;
+
+ $* tpx ?libfoo/1.0.0 2>!;
+
+ $pkg_status -r >>EOO;
+ !tpx configured 1.0.0
+ libfoo configured !1.0.0 available 2.0.0
+ EOO
+
+ cat cfg/tpx-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.tpx.libfoo_protocol = 1
+ %.*
+ EOO
+
+ $* ?libfoo 2>>~%EOE%;
+ disfigured tpx/1.0.0
+ disfigured libfoo/1.0.0
+ fetched libfoo/2.0.0
+ unpacked libfoo/2.0.0
+ configured libfoo/2.0.0
+ configured tpx/1.0.0
+ %info: .+tpx-1.0.0.+ is up to date%
+ updated tpx/1.0.0
+ EOE
+
+ $pkg_status -r >>EOO;
+ !tpx configured 1.0.0
+ libfoo configured 2.0.0
+ EOO
+
+ cat cfg/tpx-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.tpx.libfoo_protocol = 2
+ %.*
+ EOO
+
+ $pkg_drop tpx
+ }
+
+ : fail-change-unsatisfactory-alternative
+ :
+ {
+ $clone_cfg;
+
+ $* tpx ?libfoo 2>!;
+
+ $pkg_status -r >>EOO;
+ !tpx configured 1.0.0
+ libfoo configured 2.0.0
+ EOO
+
+ cat cfg/tpx-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.tpx.libfoo_protocol = 2
+ %.*
+ EOO
+
+ $* tax 2>>EOE != 0;
+ error: unable to downgrade package libfoo/2.0.0 to 1.0.0
+ info: because configured package tpx/1.0.0 depends on (libfoo >= 2.0.0)
+ info: package libfoo/1.0.0 required by
+ tax/1.0.0 (libfoo == 1.0.0)
+ info: re-run with -v for additional dependency information
+ info: consider re-trying with --upgrade|-u potentially combined with --recursive|-r
+ info: or explicitly request up/downgrade of package tpx
+ info: or explicitly specify package libfoo version to manually satisfy these constraints
+ EOE
+
+ # @@ Note that the above advises doesn't really work here since the
+ # tpx package is not re-collected recursively. We should probably
+ # invent the package-specific --rebuild option to re-collect a
+ # configured package.
+ #
+ $* tax tpx ?libfoo/1.0.0 2>>EOE != 0;
+ error: unable to downgrade package libfoo/2.0.0 to 1.0.0
+ info: because configured package tpx/1.0.0 depends on (libfoo >= 2.0.0)
+ info: re-run with -v for additional dependency information
+ info: consider re-trying with --upgrade|-u potentially combined with --recursive|-r
+ info: or explicitly request up/downgrade of package tpx
+ info: or explicitly specify package libfoo version to manually satisfy these constraints
+ EOE
+
+ $pkg_drop tpx
+ }
+ }
+ }
+
+ : version-replacement
+ :
+ {
+ +$clone_root_cfg && $rep_add $rep/t12b && $rep_fetch
+
+ test.arguments += --yes
+
+ : not-replaced
+ :
+ {
+ $clone_cfg;
+
+ $* bar foo 2>!;
+
+ $pkg_status -r >>EOO;
+ !bar configured 1.0.0
+ libbar configured 0.1.0 available 1.0.0
+ libbaz configured 1.0.0
+ !foo configured 1.0.0
+ libbar configured 0.1.0 available 1.0.0
+ libbaz configured 1.0.0
+ EOO
+
+ $pkg_drop bar foo
+ }
+
+ : replaced-scratch
+ :
+ : Test that changing package order on the command line does not result
+ : in a sub-optimal choice of the libbaz version (0.1.0).
+ :
+ : Note that this was not the case until we implemented the builds
+ : re-collection on the package version change.
+ :
+ {
+ $clone_cfg;
+
+ $* foo bar --verbose 5 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ %.*
+ trace: collect_build: add foo/1.0.0
+ trace: collect_build: add bar/1.0.0
+ trace: collect_build_prerequisites: begin foo/1.0.0
+ %.*
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/1.0.0 of dependent foo/1.0.0
+ trace: collect_build_prerequisites: begin libbar/1.0.0
+ %.*
+ trace: collect_build: add libbaz/0.1.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbaz/0.1.0 of dependent libbar/1.0.0
+ trace: collect_build_prerequisites: begin libbaz/0.1.0
+ trace: collect_build_prerequisites: end libbaz/0.1.0
+ trace: collect_build_prerequisites: end libbar/1.0.0
+ trace: collect_build_prerequisites: end foo/1.0.0
+ trace: collect_build_prerequisites: begin bar/1.0.0
+ %.*
+ trace: collect_build: pick libbar/0.1.0 over libbar/1.0.0
+ trace: collect_build: libbar/1.0.0 package version needs to be replaced with libbar/0.1.0
+ trace: pkg_build: collection failed due to package version replacement, retry from scratch
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ %.*
+ trace: collect_build: add foo/1.0.0
+ trace: collect_build: add bar/1.0.0
+ trace: collect_build_prerequisites: begin foo/1.0.0
+ %.*
+ trace: collect_build: apply version replacement for libbar/1.0.0
+ trace: collect_build: replacement: libbar/0.1.0
+ trace: collect_build: add libbar/0.1.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/0.1.0 of dependent foo/1.0.0
+ trace: collect_build_prerequisites: begin libbar/0.1.0
+ %.*
+ trace: collect_build: add libbaz/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbaz/1.0.0 of dependent libbar/0.1.0
+ trace: collect_build_prerequisites: begin libbaz/1.0.0
+ trace: collect_build_prerequisites: end libbaz/1.0.0
+ trace: collect_build_prerequisites: end libbar/0.1.0
+ trace: collect_build_prerequisites: end foo/1.0.0
+ trace: collect_build_prerequisites: begin bar/1.0.0
+ %.*
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/0.1.0 of dependent bar/1.0.0
+ trace: collect_build_prerequisites: end bar/1.0.0
+ trace: execute_plan: simulate: yes
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !foo configured 1.0.0
+ libbar configured 0.1.0 available 1.0.0
+ libbaz configured 1.0.0
+ !bar configured 1.0.0
+ libbar configured 0.1.0 available 1.0.0
+ libbaz configured 1.0.0
+ EOO
+
+ $pkg_drop foo bar
+ }
+
+ : replaced-inplace
+ :
+ : Test the version replacement optimization. Here libbaz/1.0.0 get
+ : replaced with 0.1.0 but without re-collection from scratch since it
+ : does not have any dependencies.
+ :
+ : Note that the inplace replacement has been disabled for now (see
+ : build_packages::collect_build() for details).
+ :
+ {
+ $clone_cfg && $rep_add $rep/t12a && $rep_fetch;
+
+#\
+ $* libbaz libbar --verbose 5 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ %.*
+ trace: collect_build: add libbaz/1.0.0
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: begin libbaz/1.0.0
+ %.*
+ trace: collect_build_prerequisites: end libbaz/1.0.0
+ trace: collect_build_prerequisites: begin libbar/1.0.0
+ %.*
+ trace: collect_build: pick libbaz/0.1.0 over libbaz/1.0.0
+ trace: collect_build: libbaz/1.0.0 package version needs to be replaced in-place with libbaz/0.1.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbaz/0.1.0 of dependent libbar/1.0.0
+ trace: collect_build_prerequisites: begin libbaz/0.1.0
+ trace: collect_build_prerequisites: end libbaz/0.1.0
+ trace: collect_build_prerequisites: end libbar/1.0.0
+ trace: execute_plan: simulate: yes
+ %.*
+ EOE
+#\
+
+ $* libbaz libbar --verbose 5 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ %.*
+ trace: collect_build: add libbaz/1.0.0
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: begin libbaz/1.0.0
+ %.*
+ trace: collect_build_prerequisites: end libbaz/1.0.0
+ trace: collect_build_prerequisites: begin libbar/1.0.0
+ %.*
+ trace: collect_build: pick libbaz/0.1.0 over libbaz/1.0.0
+ trace: collect_build: libbaz/1.0.0 package version needs to be replaced with libbaz/0.1.0
+ trace: pkg_build: collection failed due to package version replacement, retry from scratch
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ %.*
+ trace: collect_build: apply version replacement for libbaz/1.0.0
+ trace: collect_build: replacement: libbaz/0.1.0
+ trace: collect_build: add libbaz/0.1.0
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: begin libbaz/0.1.0
+ trace: collect_build_prerequisites: end libbaz/0.1.0
+ trace: collect_build_prerequisites: begin libbar/1.0.0
+ %.*
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbaz/0.1.0 of dependent libbar/1.0.0
+ trace: collect_build_prerequisites: end libbar/1.0.0
+ trace: execute_plan: simulate: yes
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !libbaz configured 0.1.0 available 1.0.0
+ !libbar configured 1.0.0
+ !libbaz configured 0.1.0 available 1.0.0
+ EOO
+
+ $pkg_drop libbaz libbar
+ }
+ }
+
+ : drop-dependent
+ :
+ {
+ +$clone_root_cfg && $rep_add $rep/t12b && $rep_fetch
+
+ test.arguments += --yes
+
+ : unhold
+ :
+ : Test that the being dropped dependent does not constrain a dependency
+ : anymore.
+ :
+ {
+ $clone_cfg;
+
+ $* libbar 2>!;
+
+ $pkg_status -r >>EOO;
+ !libbar configured 1.0.0
+ libbaz configured 0.1.0 available [1.0.0]
+ EOO
+
+ $* baz/0.1.0 ?libbar ?libbaz 2>!;
+
+ $pkg_status -r >>EOO;
+ !baz configured !0.1.0 available 1.0.0
+ libbaz configured 1.0.0
+ EOO
+
+ $pkg_drop baz
+ }
+
+ : unuse
+ :
+ : Unlike the previous test, at the time we check the constraint applied
+ : by libbar on libbaz (== 0.1.0) there is no evidence that libbar will be
+ : dropped, which will happen some later execution plan refinement
+ : iteration.
+ :
+ {
+ $clone_cfg;
+
+ $* foo 2>!;
+
+ $pkg_status -r >>EOO;
+ !foo configured 1.0.0
+ libbar configured 1.0.0
+ libbaz configured 0.1.0 available [1.0.0]
+ EOO
+
+ $* baz foo/0.1.0 2>!;
+
+ $pkg_status -r >>EOO;
+ !foo configured !0.1.0 available 1.0.0
+ !baz configured 1.0.0
+ libbaz configured 1.0.0
+ EOO
+
+ $pkg_drop baz foo
+ }
+ }
+
+ : config-negotiation-order
+ :
+ {
+ +$clone_root_cfg && $rep_add $rep/t11a && $rep_fetch
+
+ test.arguments += --yes --plan='build plan:' --verbose 5 --build-option --quiet
+
+ # Note that on some platforms matching bpkg's stderr using a regular
+ # expression which contains too may '%.*' lines ends up with the
+ # regex_error exception with the error_complexity code. To fix that we
+ # pipe bpkg's stderr through the sed-based pipeline filtering the stream
+ # content and simplify the stderr-matching regular expressions.
+ #
+ filter = [cmdline] sed -e "'"'s/^mkdir -p .*//'"'" | \
+ sed -n -e "'"'s/(.+)/\1/p'"'" >&2
+
+ : initial-collection
+ :
+ {
+ +$clone_cfg
+
+ : postpone
+ :
+ {
+ $clone_cfg;
+
+ # Dependencies:
+ #
+ # foo: depends: libfoo(c)
+ #
+ # fox: depends: libfoo(c)
+ #
+ # fux: depends: libfoo
+ #
+ $* foo fox fux 2>&1 | $filter 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add foo/1.0.0
+ trace: collect_build: add fox/1.0.0
+ trace: collect_build: add fux/1.0.0
+ trace: collect_build_prerequisites: begin foo/1.0.0
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent foo/1.0.0
+ trace: postponed_configurations::add: create {foo | libfoo->{foo/1,1}}
+ trace: collect_build_prerequisites: postpone foo/1.0.0
+ trace: collect_build_prerequisites: begin fox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent fox/1.0.0
+ trace: postponed_configurations::add: add {fox 1,1: libfoo} to {foo | libfoo->{foo/1,1}}
+ trace: collect_build_prerequisites: postpone fox/1.0.0
+ trace: collect_build_prerequisites: begin fux/1.0.0
+ trace: collect_build_prerequisites: dep-postpone dependency libfoo/1.0.0 of dependent fux/1.0.0 since already in cluster {foo fox | libfoo->{foo/1,1 fox/1,1}}
+ trace: collect_build_prerequisites: end fux/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {foo fox | libfoo->{foo/1,1 fox/1,1}}
+ trace: collect_build_postponed (1): cfg-negotiate begin {foo fox | libfoo->{foo/1,1 fox/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libfoo/1.0.0
+ trace: collect_build_prerequisites: end libfoo/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent foo/1.0.0
+ trace: collect_build_prerequisites: resume foo/1.0.0
+ trace: collect_build_prerequisites: end foo/1.0.0
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent fox/1.0.0
+ trace: collect_build_prerequisites: resume fox/1.0.0
+ trace: collect_build_prerequisites: end fox/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {foo fox | libfoo->{foo/1,1 fox/1,1}}!
+ trace: collect_build_postponed (1): end {foo fox | libfoo->{foo/1,1 fox/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ build plan:
+ new libfoo/1.0.0 (required by foo, fox, fux)
+ config.libfoo.extras=true (set by foo)
+ new foo/1.0.0
+ config.foo.libfoo_extras=true (set by foo)
+ new fox/1.0.0
+ config.fox.libfoo_extras=true (set by fox)
+ new fux/1.0.0
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !foo configured 1.0.0
+ libfoo configured 1.0.0
+ !fox configured 1.0.0
+ libfoo configured 1.0.0
+ !fux configured 1.0.0
+ libfoo configured 1.0.0
+ EOO
+
+ $pkg_drop foo fox fux
+ }
+
+ : postpone-system
+ :
+ {
+ $clone_cfg;
+
+ $* foo fox '?sys:libfoo/*' 2>&1 | $filter 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add foo/1.0.0
+ trace: collect_build: add fox/1.0.0
+ trace: collect_build_prerequisites: begin foo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency sys:libfoo/* of dependent foo/1.0.0
+ trace: postponed_configurations::add: create {foo | libfoo->{foo/1,1}}
+ trace: collect_build_prerequisites: postpone foo/1.0.0
+ trace: collect_build_prerequisites: begin fox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency sys:libfoo/* of dependent fox/1.0.0
+ trace: postponed_configurations::add: add {fox 1,1: libfoo} to {foo | libfoo->{foo/1,1}}
+ trace: collect_build_prerequisites: postpone fox/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {foo fox | libfoo->{foo/1,1 fox/1,1}}
+ trace: collect_build_postponed (1): cfg-negotiate begin {foo fox | libfoo->{foo/1,1 fox/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip system sys:libfoo/*
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent foo/1.0.0
+ trace: collect_build_prerequisites: resume foo/1.0.0
+ trace: collect_build_prerequisites: end foo/1.0.0
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent fox/1.0.0
+ trace: collect_build_prerequisites: resume fox/1.0.0
+ trace: collect_build_prerequisites: end fox/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {foo fox | libfoo->{foo/1,1 fox/1,1}}!
+ trace: collect_build_postponed (1): end {foo fox | libfoo->{foo/1,1 fox/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ build plan:
+ configure sys:libfoo/* (required by foo, fox)
+ config.libfoo.extras=true (expected by foo)
+ new foo/1.0.0
+ config.foo.libfoo_extras=true (set by foo)
+ new fox/1.0.0
+ config.fox.libfoo_extras=true (set by fox)
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !foo configured 1.0.0
+ libfoo configured,system !* available 1.0.0 0.1.0
+ !fox configured 1.0.0
+ libfoo configured,system !* available 1.0.0 0.1.0
+ EOO
+
+ $pkg_drop foo fox
+ }
+
+ : postpone-merge
+ :
+ {
+ $clone_cfg;
+
+ $* foo bar baz 2>&1 | $filter 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add foo/1.0.0
+ trace: collect_build: add bar/1.0.0
+ trace: collect_build: add baz/1.0.0
+ trace: collect_build_prerequisites: begin foo/1.0.0
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent foo/1.0.0
+ trace: postponed_configurations::add: create {foo | libfoo->{foo/1,1}}
+ trace: collect_build_prerequisites: postpone foo/1.0.0
+ trace: collect_build_prerequisites: begin bar/1.0.0
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bar/1.0.0
+ trace: postponed_configurations::add: create {bar | libbar->{bar/1,1}}
+ trace: collect_build_prerequisites: postpone bar/1.0.0
+ trace: collect_build_prerequisites: begin baz/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent baz/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent baz/1.0.0
+ trace: postponed_configurations::add: add {baz 1,1: libbar libfoo} to {foo | libfoo->{foo/1,1}}
+ trace: postponed_configurations::add: merge {bar | libbar->{bar/1,1}} into {baz foo | libfoo->{baz/1,1 foo/1,1} libbar->{baz/1,1}}
+ trace: collect_build_prerequisites: postpone baz/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {bar baz foo | libfoo->{baz/1,1 foo/1,1} libbar->{bar/1,1 baz/1,1}}
+ trace: collect_build_postponed (1): cfg-negotiate begin {bar baz foo | libfoo->{baz/1,1 foo/1,1} libbar->{bar/1,1 baz/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libfoo/1.0.0
+ trace: collect_build_prerequisites: end libfoo/1.0.0
+ trace: collect_build_prerequisites: begin libbar/1.0.0
+ trace: collect_build_prerequisites: end libbar/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent bar/1.0.0
+ trace: collect_build_prerequisites: resume bar/1.0.0
+ trace: collect_build_prerequisites: end bar/1.0.0
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent baz/1.0.0
+ trace: collect_build_prerequisites: resume baz/1.0.0
+ trace: collect_build_prerequisites: end baz/1.0.0
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent foo/1.0.0
+ trace: collect_build_prerequisites: resume foo/1.0.0
+ trace: collect_build_prerequisites: end foo/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {bar baz foo | libfoo->{baz/1,1 foo/1,1} libbar->{bar/1,1 baz/1,1}}!
+ trace: collect_build_postponed (1): end {bar baz foo | libfoo->{baz/1,1 foo/1,1} libbar->{bar/1,1 baz/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ build plan:
+ new libfoo/1.0.0 (required by baz, foo)
+ config.libfoo.extras=true (set by baz)
+ new foo/1.0.0
+ config.foo.libfoo_extras=true (set by foo)
+ new libbar/1.0.0 (required by bar, baz)
+ config.libbar.extras=true (set by bar)
+ new bar/1.0.0
+ new baz/1.0.0
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !foo configured 1.0.0
+ libfoo configured 1.0.0
+ !bar configured 1.0.0
+ libbar configured 1.0.0
+ !baz configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured 1.0.0
+ EOO
+
+ $pkg_drop foo bar baz
+ }
+
+ : postpone-dependency-dependent
+ :
+ {
+ $clone_cfg;
+
+ $* fex 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ %.*
+ trace: collect_build: add fex/1.0.0
+ trace: collect_build_prerequisites: begin fex/1.0.0
+ %.*
+ trace: collect_build: add foo/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency foo/1.0.0 of dependent fex/1.0.0
+ trace: collect_build_prerequisites: begin foo/1.0.0
+ %.*
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent foo/1.0.0
+ trace: postponed_configurations::add: create {foo | libfoo->{foo/1,1}}
+ trace: collect_build_prerequisites: postpone foo/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent fex/1.0.0
+ trace: postponed_configurations::add: add {fex 2,1: libfoo} to {foo | libfoo->{foo/1,1}}
+ trace: collect_build_prerequisites: postpone fex/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {fex foo | libfoo->{fex/2,1 foo/1,1}}
+ %.*
+ trace: collect_build_postponed (1): cfg-negotiate begin {fex foo | libfoo->{fex/2,1 foo/1,1}}
+ %.*
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libfoo/1.0.0
+ trace: collect_build_prerequisites: end libfoo/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent fex/1.0.0
+ trace: collect_build_prerequisites: resume fex/1.0.0
+ trace: collect_build_prerequisites: end fex/1.0.0
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent foo/1.0.0
+ trace: collect_build_prerequisites: resume foo/1.0.0
+ trace: collect_build_prerequisites: end foo/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {fex foo | libfoo->{fex/2,1 foo/1,1}}!
+ trace: collect_build_postponed (1): end {fex foo | libfoo->{fex/2,1 foo/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !fex configured 1.0.0
+ foo configured 1.0.0
+ libfoo configured 1.0.0
+ libfoo configured 1.0.0
+ EOO
+
+ $pkg_drop fex
+ }
+
+ : reconf-existing-dept
+ :
+ {
+ +$clone_cfg
+
+ : no-build-clause
+ :
+ {
+ +$clone_cfg
+
+ : no-config
+ :
+ {
+ $clone_cfg;
+
+ # Dependencies:
+ #
+ # fex: depends: fux(c)
+ #
+ # fux: depends: libfoo
+ #
+ $* fex/0.1.0 fux libfoo 2>!;
+
+ $pkg_status -r >>EOO;
+ !libfoo configured 1.0.0
+ !fux configured 1.0.0
+ !libfoo configured 1.0.0
+ !fex configured !0.1.0 available 1.0.0
+ !fux configured 1.0.0
+ !libfoo configured 1.0.0
+ EOO
+
+ cat cfg/fux-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.fux.extras = true
+ config.fux.network = false
+ %.*
+ EOO
+
+ $* libfoo/0.1.0 2>!;
+
+ $pkg_status -r >>EOO;
+ !libfoo configured !0.1.0 available 1.0.0
+ !fux configured 1.0.0
+ !libfoo configured !0.1.0 available 1.0.0
+ !fex configured !0.1.0 available 1.0.0
+ !fux configured 1.0.0
+ !libfoo configured !0.1.0 available 1.0.0
+ EOO
+
+ cat cfg/fux-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.fux.extras = true
+ config.fux.network = false
+ %.*
+ EOO
+
+ $pkg_drop fex fux libfoo
+ }
+
+ : dept-upgrade
+ :
+ {
+ $clone_cfg;
+
+ # Dependencies:
+ #
+ # fux: depends: libfoo ?
+ #
+ $* fux/0.1.0 +{ config.fux.extras=true } 2>!;
+
+ $pkg_status -r >>EOO;
+ !fux configured !0.1.0 available 1.0.0 0.2.0 0.1.1
+ libfoo configured 1.0.0
+ EOO
+
+ cat cfg/fux-0.1.0/build/config.build >>~%EOO%;
+ %.*
+ config.fux.extras = true
+ config.fux.network = false
+ %.*
+ EOO
+
+ $* fux/0.1.1 2>!;
+
+ $pkg_status -r >>EOO;
+ !fux configured !0.1.1 available 1.0.0 0.2.0
+ libfoo configured 1.0.0
+ EOO
+
+ cat cfg/fux-0.1.1/build/config.build >>~%EOO%;
+ %.*
+ config.fux.extras = true
+ config.fux.network = false
+ %.*
+ EOO
+
+ $pkg_drop fux
+ }
+
+ : dept-depc
+ :
+ {
+ $clone_cfg;
+
+ # Dependencies:
+ #
+ # fux: depends: libfoo
+ #
+ $* fux +{ config.fux.extras=true } ?libfoo +{ config.libfoo.extras=true } 2>!;
+
+ $pkg_status -r >>EOO;
+ !fux configured 1.0.0
+ libfoo configured 1.0.0
+ EOO
+
+ cat cfg/fux-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.fux.extras = true
+ config.fux.network = false
+ %.*
+ EOO
+
+ cat cfg/libfoo-1.0.0/build2/config.build2 >>~%EOO%;
+ %.*
+ config.libfoo.extras = true
+ config.libfoo.network = false
+ %.*
+ EOO
+
+ $* ?libfoo/0.1.0 2>!;
+
+ $pkg_status -r >>EOO;
+ !fux configured 1.0.0
+ libfoo configured !0.1.0 available 1.0.0
+ EOO
+
+ cat cfg/fux-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.fux.extras = true
+ config.fux.network = false
+ %.*
+ EOO
+
+ cat cfg/libfoo-0.1.0/build/config.build >>~%EOO%;
+ %.*
+ config.libfoo.extras = true
+ EOO
+
+ $pkg_drop fux
+ }
+
+ : dept-depc-no-plan
+ :
+ : As above but the configuration is not printed (as a part of the
+ : plan) before the plan execution.
+ :
+ {
+ $clone_cfg;
+
+ # Dependencies:
+ #
+ # fux: depends: libfoo
+ #
+ $* fux +{ config.fux.extras=true } ?libfoo +{ config.libfoo.extras=true } 2>!;
+
+ $pkg_status -r >>EOO;
+ !fux configured 1.0.0
+ libfoo configured 1.0.0
+ EOO
+
+ cat cfg/fux-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.fux.extras = true
+ config.fux.network = false
+ %.*
+ EOO
+
+ cat cfg/libfoo-1.0.0/build2/config.build2 >>~%EOO%;
+ %.*
+ config.libfoo.extras = true
+ config.libfoo.network = false
+ EOO
+
+ test.arguments = $regex.filter_out_match($test.arguments, '--plan=.*');
+
+ $* ?libfoo/0.1.0 2>!;
+
+ $pkg_status -r >>EOO;
+ !fux configured 1.0.0
+ libfoo configured !0.1.0 available 1.0.0
+ EOO
+
+ cat cfg/fux-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.fux.extras = true
+ config.fux.network = false
+ %.*
+ EOO
+
+ cat cfg/libfoo-0.1.0/build/config.build >>~%EOO%;
+ %.*
+ config.libfoo.extras = true
+ EOO
+
+ $pkg_drop fux
+ }
+ }
+
+ : enabled-clause
+ :
+ {
+ $clone_cfg;
+
+ # Dependencies:
+ #
+ # fex: depends: fux(c)
+ #
+ # fux: depends: libfoo ?
+ #
+ $* fex/0.1.0 fux/0.1.0 2>!;
+
+ $pkg_status -r >>EOO;
+ !fux configured !0.1.0 available 1.0.0 0.2.0 0.1.1
+ libfoo configured 1.0.0
+ !fex configured !0.1.0 available 1.0.0
+ !fux configured !0.1.0 available 1.0.0 0.2.0 0.1.1
+ libfoo configured 1.0.0
+ EOO
+
+ cat cfg/fux-0.1.0/build/config.build >>~%EOO%;
+ %.*
+ config.fux.extras = true
+ config.fux.network = false
+ %.*
+ EOO
+
+ $* ?libfoo/0.1.0 2>!;
+
+ $pkg_status -r >>EOO;
+ !fux configured !0.1.0 available 1.0.0 0.2.0 0.1.1
+ libfoo configured !0.1.0 available 1.0.0
+ !fex configured !0.1.0 available 1.0.0
+ !fux configured !0.1.0 available 1.0.0 0.2.0 0.1.1
+ libfoo configured !0.1.0 available 1.0.0
+ EOO
+
+ cat cfg/fux-0.1.0/build/config.build >>~%EOO%;
+ %.*
+ config.fux.extras = true
+ config.fux.network = false
+ %.*
+ EOO
+
+ $pkg_drop fex fux
+ }
+
+ : require-clause
+ :
+ {
+ $clone_cfg;
+
+ # Dependencies:
+ #
+ # fex: depends: fux(c)
+ #
+ # fux: depends: libfoo(c) ?
+ #
+ $* fex/0.1.0 fux/0.2.0 2>!;
+
+ $pkg_status -r >>EOO;
+ !fux configured !0.2.0 available 1.0.0
+ libfoo configured 1.0.0
+ !fex configured !0.1.0 available 1.0.0
+ !fux configured !0.2.0 available 1.0.0
+ libfoo configured 1.0.0
+ EOO
+
+ cat cfg/fux-0.2.0/build/config.build >>~%EOO%;
+ %.*
+ config.fux.extras = true
+ config.fux.network = false
+ %.*
+ EOO
+
+ cat cfg/libfoo-1.0.0/build2/config.build2 >>~%EOO%;
+ %.*
+ config.libfoo.extras = true
+ config.libfoo.network = false
+ %.*
+ EOO
+
+ $* ?libfoo/0.1.0 2>&1 | $filter 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: execute_plan: simulate: yes
+ trace: evaluate_dependency: libfoo/1.0.0: update to libfoo/0.1.0
+ trace: pkg_build: refine package collection/plan execution
+ trace: collect_build_prerequisites: pre-reeval fux/0.2.0
+ trace: collect_build_prerequisites: pre-reevaluated fux/0.2.0: 1,1
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/0.1.0 of existing dependent fux/0.2.0 due to dependency libfoo/0.1.0
+ trace: collect_build: add fux/0.2.0
+ trace: postponed_configurations::add: create {fux^ | libfoo->{fux/1,1}}
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {fux^ | libfoo->{fux/1,1}}
+ trace: collect_build_prerequisites: pre-reeval fux/0.2.0
+ trace: collect_build_prerequisites: pre-reevaluated fux/0.2.0: 1,1
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {fux^ | libfoo->{fux/1,1}}
+ trace: collect_build_prerequisites: reeval fux/0.2.0
+ trace: collect_build: pick libfoo/0.1.0 over libfoo/1.0.0
+ trace: postponed_configurations::add: add {fux^ 1,1: libfoo} to {fux^ | libfoo->{fux/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent fux/0.2.0 results in {fux^ | libfoo->{fux/1,1}}
+ trace: collect_build_prerequisites: re-evaluated fux/0.2.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {fux^ | libfoo->{fux/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libfoo/0.1.0
+ trace: collect_build_prerequisites: end libfoo/0.1.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent fux/0.2.0
+ trace: collect_build_prerequisites: resume fux/0.2.0
+ trace: collect_build_prerequisites: end fux/0.2.0
+ trace: collect_build_postponed (1): cfg-negotiate end {fux^ | libfoo->{fux/1,1}}!
+ trace: collect_build_postponed (1): end {fux^ | libfoo->{fux/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ build plan:
+ downgrade libfoo/0.1.0
+ config.libfoo.extras=true (set by fux)
+ reconfigure fux/0.2.0 (dependent of libfoo)
+ config.fux.extras=true (dependent configuration)
+ reconfigure fex (dependent of fux)
+ trace: execute_plan: simulate: no
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !fux configured !0.2.0 available 1.0.0
+ libfoo configured !0.1.0 available 1.0.0
+ !fex configured !0.1.0 available 1.0.0
+ !fux configured !0.2.0 available 1.0.0
+ libfoo configured !0.1.0 available 1.0.0
+ EOO
+
+ cat cfg/fux-0.2.0/build/config.build >>~%EOO%;
+ %.*
+ config.fux.extras = true
+ config.fux.network = false
+ %.*
+ EOO
+
+ cat cfg/libfoo-0.1.0/build/config.build >>~%EOO%;
+ %.*
+ config.libfoo.extras = true
+ EOO
+
+ $* ?fex ?fux libfoo/0.1.0 2>&1 | $filter 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libfoo/0.1.0
+ trace: collect_build_prerequisites: skip configured libfoo/0.1.0
+ trace: execute_plan: simulate: yes
+ trace: evaluate_dependency: fux/0.2.0: update to fux/1.0.0
+ trace: evaluate_dependency: fex/0.1.0: unused
+ trace: pkg_build: refine package collection/plan execution
+ trace: collect_build_prerequisites: pre-reeval fex/0.1.0
+ trace: collect_build_prerequisites: pre-reevaluated fex/0.1.0: 1,1
+ trace: collect_build_prerequisites: cfg-postpone dependency fux/1.0.0 of existing dependent fex/0.1.0 due to dependency fux/1.0.0
+ trace: postponed_configurations::add: create {fex^ | fux->{fex/1,1}}
+ trace: collect_drop: fex/0.1.0 package version needs to be replaced with drop
+ trace: pkg_build: collection failed due to package version replacement, retry from scratch
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libfoo/0.1.0
+ trace: collect_build_prerequisites: skip configured libfoo/0.1.0
+ trace: collect_build_prerequisites: skip expected to be dropped existing dependent fex of dependency fux
+ trace: collect_build_prerequisites: begin fux/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libfoo/0.1.0 of dependent fux/1.0.0
+ trace: collect_build_prerequisites: end fux/1.0.0
+ trace: collect_drop: overwrite fex
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: evaluate_dependency: fux/1.0.0: unused
+ trace: pkg_build: one of dependency evaluation decisions has changed, re-collecting from scratch
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libfoo/0.1.0
+ trace: collect_build_prerequisites: skip configured libfoo/0.1.0
+ trace: collect_drop: overwrite fex
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: evaluate_dependency: fux/0.2.0: unused
+ trace: pkg_build: refine package collection/plan execution
+ trace: collect_drop: overwrite fex
+ trace: collect_drop: overwrite fux
+ trace: execute_plan: simulate: yes
+ %.*
+ build plan:
+ drop fux/0.2.0 (unused)
+ drop fex/0.1.0 (unused)
+ update libfoo/0.1.0
+ trace: execute_plan: simulate: no
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !libfoo configured !0.1.0 available 1.0.0
+ EOO
+
+ cat cfg/libfoo-0.1.0/build/config.build >>~%EOO%;
+ %.*
+ config.libfoo.extras = true
+ EOO
+
+ $pkg_drop libfoo
+ }
+
+ : require-clause-user-config
+ :
+ {
+ $clone_cfg;
+
+ # Dependencies:
+ #
+ # fex: depends: fux(c)
+ #
+ # fux: depends: libfoo(c) ?
+ #
+ $* fex/0.1.0 fux/0.2.0 +{ config.fux.network=true } 2>!;
+
+ $pkg_status -r >>EOO;
+ !fux configured !0.2.0 available 1.0.0
+ libfoo configured 1.0.0
+ !fex configured !0.1.0 available 1.0.0
+ !fux configured !0.2.0 available 1.0.0
+ libfoo configured 1.0.0
+ EOO
+
+ cat cfg/fux-0.2.0/build/config.build >>~%EOO%;
+ %.*
+ config.fux.extras = true
+ config.fux.network = true
+ %.*
+ EOO
+
+ cat cfg/libfoo-1.0.0/build2/config.build2 >>~%EOO%;
+ %.*
+ config.libfoo.extras = true
+ config.libfoo.network = false
+ %.*
+ EOO
+
+ $* ?libfoo/0.1.0 2>&1 | $filter 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: execute_plan: simulate: yes
+ trace: evaluate_dependency: libfoo/1.0.0: update to libfoo/0.1.0
+ trace: pkg_build: refine package collection/plan execution
+ trace: collect_build_prerequisites: pre-reeval fux/0.2.0
+ trace: collect_build_prerequisites: pre-reevaluated fux/0.2.0: 1,1
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/0.1.0 of existing dependent fux/0.2.0 due to dependency libfoo/0.1.0
+ trace: collect_build: add fux/0.2.0
+ trace: postponed_configurations::add: create {fux^ | libfoo->{fux/1,1}}
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {fux^ | libfoo->{fux/1,1}}
+ trace: collect_build_prerequisites: pre-reeval fux/0.2.0
+ trace: collect_build_prerequisites: pre-reevaluated fux/0.2.0: 1,1
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {fux^ | libfoo->{fux/1,1}}
+ trace: collect_build_prerequisites: reeval fux/0.2.0
+ trace: collect_build: pick libfoo/0.1.0 over libfoo/1.0.0
+ trace: postponed_configurations::add: add {fux^ 1,1: libfoo} to {fux^ | libfoo->{fux/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent fux/0.2.0 results in {fux^ | libfoo->{fux/1,1}}
+ trace: collect_build_prerequisites: re-evaluated fux/0.2.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {fux^ | libfoo->{fux/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libfoo/0.1.0
+ trace: collect_build_prerequisites: end libfoo/0.1.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent fux/0.2.0
+ trace: collect_build_prerequisites: resume fux/0.2.0
+ trace: collect_build_prerequisites: end fux/0.2.0
+ trace: collect_build_postponed (1): cfg-negotiate end {fux^ | libfoo->{fux/1,1}}!
+ trace: collect_build_postponed (1): end {fux^ | libfoo->{fux/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ build plan:
+ downgrade libfoo/0.1.0
+ config.libfoo.extras=true (set by fux)
+ reconfigure fux/0.2.0 (dependent of libfoo)
+ config.fux.network=true (user configuration)
+ config.fux.extras=true (dependent configuration)
+ reconfigure fex (dependent of fux)
+ trace: execute_plan: simulate: no
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !fux configured !0.2.0 available 1.0.0
+ libfoo configured !0.1.0 available 1.0.0
+ !fex configured !0.1.0 available 1.0.0
+ !fux configured !0.2.0 available 1.0.0
+ libfoo configured !0.1.0 available 1.0.0
+ EOO
+
+ cat cfg/fux-0.2.0/build/config.build >>~%EOO%;
+ %.*
+ config.fux.extras = true
+ config.fux.network = true
+ %.*
+ EOO
+
+ cat cfg/libfoo-0.1.0/build/config.build >>~%EOO%;
+ %.*
+ config.libfoo.extras = true
+ %.*
+ EOO
+
+ $* ?fex ?libfoo 2>&1 | $filter 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: execute_plan: simulate: yes
+ trace: evaluate_dependency: libfoo/0.1.0: update to libfoo/1.0.0
+ trace: evaluate_dependency: fex/0.1.0: unused
+ trace: pkg_build: refine package collection/plan execution
+ trace: collect_build_prerequisites: pre-reeval fux/0.2.0
+ trace: collect_build_prerequisites: pre-reevaluated fux/0.2.0: 1,1
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of existing dependent fux/0.2.0 due to dependency libfoo/1.0.0
+ trace: collect_build: add fux/0.2.0
+ trace: postponed_configurations::add: create {fux^ | libfoo->{fux/1,1}}
+ trace: collect_drop: overwrite fex
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {fux^ | libfoo->{fux/1,1}}
+ trace: collect_build_prerequisites: pre-reeval fux/0.2.0
+ trace: collect_build_prerequisites: pre-reevaluated fux/0.2.0: 1,1
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {fux^ | libfoo->{fux/1,1}}
+ trace: collect_build_prerequisites: reeval fux/0.2.0
+ trace: collect_build: pick libfoo/1.0.0 over libfoo/0.1.0
+ trace: postponed_configurations::add: add {fux^ 1,1: libfoo} to {fux^ | libfoo->{fux/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent fux/0.2.0 results in {fux^ | libfoo->{fux/1,1}}
+ trace: collect_build_prerequisites: re-evaluated fux/0.2.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {fux^ | libfoo->{fux/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libfoo/1.0.0
+ trace: collect_build_prerequisites: end libfoo/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent fux/0.2.0
+ trace: collect_build_prerequisites: resume fux/0.2.0
+ trace: collect_build_prerequisites: end fux/0.2.0
+ trace: collect_build_postponed (1): cfg-negotiate end {fux^ | libfoo->{fux/1,1}}!
+ trace: collect_build_postponed (1): end {fux^ | libfoo->{fux/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ build plan:
+ upgrade libfoo/1.0.0
+ config.libfoo.extras=true (set by fux)
+ reconfigure fux/0.2.0 (dependent of libfoo)
+ config.fux.network=true (user configuration)
+ config.fux.extras=true (user configuration)
+ drop fex/0.1.0 (unused)
+ trace: execute_plan: simulate: no
+ %.*
+ EOE
+
+ $pkg_drop fux
+ }
+ }
+
+ : premature
+ :
+ {
+ $clone_cfg;
+
+ $* fux foo fox 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ %.*
+ trace: collect_build: add fux/1.0.0
+ trace: collect_build: add foo/1.0.0
+ trace: collect_build: add fox/1.0.0
+ trace: collect_build_prerequisites: begin fux/1.0.0
+ %.*
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libfoo/1.0.0 of dependent fux/1.0.0
+ trace: collect_build_prerequisites: begin libfoo/1.0.0
+ trace: collect_build_prerequisites: end libfoo/1.0.0
+ trace: collect_build_prerequisites: end fux/1.0.0
+ trace: collect_build_prerequisites: begin foo/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cannot cfg-postpone dependency libfoo/1.0.0 of dependent foo/1.0.0 (collected prematurely), throwing postpone_dependency
+ trace: pkg_build: collection failed due to prematurely collected dependency (libfoo), retry from scratch
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ %.*
+ trace: collect_build: add fux/1.0.0
+ trace: collect_build: add foo/1.0.0
+ trace: collect_build: add fox/1.0.0
+ trace: collect_build_prerequisites: begin fux/1.0.0
+ %.*
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: dep-postpone dependency libfoo/1.0.0 of dependent fux/1.0.0
+ trace: collect_build_prerequisites: end fux/1.0.0
+ trace: collect_build_prerequisites: begin foo/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent foo/1.0.0
+ trace: postponed_configurations::add: create {foo | libfoo->{foo/1,1}}
+ trace: collect_build_prerequisites: postpone foo/1.0.0
+ trace: collect_build_prerequisites: begin fox/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent fox/1.0.0
+ trace: postponed_configurations::add: add {fox 1,1: libfoo} to {foo | libfoo->{foo/1,1}}
+ trace: collect_build_prerequisites: postpone fox/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {foo fox | libfoo->{foo/1,1 fox/1,1}}
+ %.*
+ trace: collect_build_postponed (1): cfg-negotiate begin {foo fox | libfoo->{foo/1,1 fox/1,1}}
+ %.*
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libfoo/1.0.0
+ trace: collect_build_prerequisites: end libfoo/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent foo/1.0.0
+ trace: collect_build_prerequisites: resume foo/1.0.0
+ trace: collect_build_prerequisites: end foo/1.0.0
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent fox/1.0.0
+ trace: collect_build_prerequisites: resume fox/1.0.0
+ trace: collect_build_prerequisites: end fox/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {foo fox | libfoo->{foo/1,1 fox/1,1}}!
+ trace: collect_build_postponed (1): end {foo fox | libfoo->{foo/1,1 fox/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !fux configured 1.0.0
+ libfoo configured 1.0.0
+ !foo configured 1.0.0
+ libfoo configured 1.0.0
+ !fox configured 1.0.0
+ libfoo configured 1.0.0
+ EOO
+
+ $pkg_drop fux foo fox
+ }
+
+ : bogus-postponement
+ :
+ {
+ $clone_cfg;
+
+ # Dependencies:
+ #
+ # fux: depends: libfoo
+ #
+ # foo: depends: libfoo(c)
+ #
+ # fix: depends: foo(c)
+ #
+ $* fux foo fix 2>&1 | $filter 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add fux/1.0.0
+ trace: collect_build: add foo/1.0.0
+ trace: collect_build: add fix/1.0.0
+ trace: collect_build_prerequisites: begin fux/1.0.0
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libfoo/1.0.0 of dependent fux/1.0.0
+ trace: collect_build_prerequisites: begin libfoo/1.0.0
+ trace: collect_build_prerequisites: end libfoo/1.0.0
+ trace: collect_build_prerequisites: end fux/1.0.0
+ trace: collect_build_prerequisites: begin foo/1.0.0
+ trace: collect_build_prerequisites: cannot cfg-postpone dependency libfoo/1.0.0 of dependent foo/1.0.0 (collected prematurely), throwing postpone_dependency
+ trace: pkg_build: collection failed due to prematurely collected dependency (libfoo), retry from scratch
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add fux/1.0.0
+ trace: collect_build: add foo/1.0.0
+ trace: collect_build: add fix/1.0.0
+ trace: collect_build_prerequisites: begin fux/1.0.0
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: dep-postpone dependency libfoo/1.0.0 of dependent fux/1.0.0
+ trace: collect_build_prerequisites: end fux/1.0.0
+ trace: collect_build_prerequisites: begin foo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent foo/1.0.0
+ trace: postponed_configurations::add: create {foo | libfoo->{foo/1,1}}
+ trace: collect_build_prerequisites: postpone foo/1.0.0
+ trace: collect_build_prerequisites: begin fix/1.0.0
+ trace: collect_build_prerequisites: cannot cfg-postpone dependency foo/1.0.0 of dependent fix/1.0.0 (collected prematurely), throwing postpone_dependency
+ trace: pkg_build: collection failed due to prematurely collected dependency (foo), retry from scratch
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add fux/1.0.0
+ trace: collect_build: add foo/1.0.0
+ trace: collect_build: add fix/1.0.0
+ trace: collect_build_prerequisites: begin fux/1.0.0
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: dep-postpone dependency libfoo/1.0.0 of dependent fux/1.0.0
+ trace: collect_build_prerequisites: end fux/1.0.0
+ trace: pkg_build: dep-postpone user-specified foo
+ trace: collect_build_prerequisites: begin fix/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency foo/1.0.0 of dependent fix/1.0.0
+ trace: postponed_configurations::add: create {fix | foo->{fix/1,1}}
+ trace: collect_build_prerequisites: postpone fix/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {fix | foo->{fix/1,1}}
+ trace: collect_build_postponed (1): cfg-negotiate begin {fix | foo->{fix/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin foo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent foo/1.0.0
+ trace: postponed_configurations::add: create {foo | libfoo->{foo/1,1}}
+ trace: collect_build_prerequisites: postpone foo/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent fix/1.0.0
+ trace: collect_build_prerequisites: resume fix/1.0.0
+ trace: collect_build_prerequisites: end fix/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {fix | foo->{fix/1,1}}!
+ trace: collect_build_postponed (2): begin {foo | libfoo->{foo/1,1}}
+ trace: collect_build_postponed (2): cfg-negotiate begin {foo | libfoo->{foo/1,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libfoo/1.0.0
+ trace: collect_build_prerequisites: end libfoo/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent foo/1.0.0
+ trace: collect_build_prerequisites: resume foo/1.0.0
+ trace: collect_build_prerequisites: end foo/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {foo | libfoo->{foo/1,1}}!
+ trace: collect_build_postponed (2): end {foo | libfoo->{foo/1,1}}
+ trace: collect_build_postponed (1): end {fix | foo->{fix/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ build plan:
+ new libfoo/1.0.0 (required by foo, fux)
+ config.libfoo.extras=true (set by foo)
+ new fux/1.0.0
+ new foo/1.0.0
+ config.foo.extras=true (set by fix)
+ config.foo.libfoo_extras=true (set by foo)
+ new fix/1.0.0
+ trace: execute_plan: simulate: no
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !fux configured 1.0.0
+ libfoo configured 1.0.0
+ !foo configured 1.0.0
+ libfoo configured 1.0.0
+ !fix configured 1.0.0
+ !foo configured 1.0.0
+ libfoo configured 1.0.0
+ EOO
+
+ $pkg_drop fux foo fix
+ }
+ }
+
+ : existing
+ :
+ {
+ +$clone_cfg
+
+ : dependency
+ :
+ {
+ $clone_cfg;
+
+ $* libfoo --verbose 1 2>!;
+
+ $* foo 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ %.*
+ trace: collect_build: add foo/1.0.0
+ trace: collect_build_prerequisites: begin foo/1.0.0
+ %.*
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent foo/1.0.0
+ trace: postponed_configurations::add: create {foo | libfoo->{foo/1,1}}
+ trace: collect_build_prerequisites: postpone foo/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {foo | libfoo->{foo/1,1}}
+ %.*
+ trace: collect_build_postponed (1): cfg-negotiate begin {foo | libfoo->{foo/1,1}}
+ %.*
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libfoo/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent foo/1.0.0
+ trace: collect_build_prerequisites: resume foo/1.0.0
+ trace: collect_build_prerequisites: end foo/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {foo | libfoo->{foo/1,1}}!
+ trace: collect_build_postponed (1): end {foo | libfoo->{foo/1,1}}
+ trace: collect_build_postponed (0): end
+ %.*
+ trace: execute_plan: simulate: yes
+ %.*
+ build plan:
+ reconfigure/update libfoo/1.0.0 (required by foo)
+ config.libfoo.extras=true (set by foo)
+ new foo/1.0.0
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !libfoo configured 1.0.0
+ !foo configured 1.0.0
+ !libfoo configured 1.0.0
+ EOO
+
+ $pkg_drop foo libfoo
+ }
+
+ : dependent-single-pos
+ :
+ {
+ +$clone_cfg
+
+ : basic
+ :
+ {
+ $clone_cfg;
+
+ # Dependencies:
+ #
+ # foo/1.0.0: depends: libfoo(c)
+ #
+ $* foo +{ config.foo.extras=true } 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ %.*
+ trace: collect_build: add foo/1.0.0
+ trace: collect_build_prerequisites: begin foo/1.0.0
+ %.*
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent foo/1.0.0
+ trace: postponed_configurations::add: create {foo | libfoo->{foo/1,1}}
+ trace: collect_build_prerequisites: postpone foo/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {foo | libfoo->{foo/1,1}}
+ %.*
+ trace: collect_build_postponed (1): cfg-negotiate begin {foo | libfoo->{foo/1,1}}
+ %.*
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libfoo/1.0.0
+ trace: collect_build_prerequisites: end libfoo/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent foo/1.0.0
+ trace: collect_build_prerequisites: resume foo/1.0.0
+ trace: collect_build_prerequisites: end foo/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {foo | libfoo->{foo/1,1}}!
+ trace: collect_build_postponed (1): end {foo | libfoo->{foo/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ build plan:
+ new libfoo/1.0.0 (required by foo)
+ config.libfoo.extras=true (set by foo)
+ new foo/1.0.0
+ config.foo.extras=true (user configuration)
+ config.foo.libfoo_extras=true (set by foo)
+ trace: execute_plan: simulate: no
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !foo configured 1.0.0
+ libfoo configured 1.0.0
+ EOO
+
+ cat cfg/foo-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.foo.extras = true
+ config.foo.libfoo_extras = true
+ %.*
+ EOO
+
+ cat cfg/libfoo-1.0.0/build2/config.build2 >>~%EOO%;
+ %.*
+ config.libfoo.extras = true
+ %.*
+ EOO
+
+ # Downgrade the dependency.
+ #
+ $* ?libfoo/0.1.0 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ %.*
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: evaluate_dependency: libfoo/1.0.0: update to libfoo/0.1.0
+ %.*
+ trace: pkg_build: refine package collection/plan execution
+ %.*
+ trace: collect_build_prerequisites: pre-reeval foo/1.0.0
+ %.*
+ trace: collect_build_prerequisites: pre-reevaluated foo/1.0.0: 1,1
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/0.1.0 of existing dependent foo/1.0.0 due to dependency libfoo/0.1.0
+ trace: collect_build: add foo/1.0.0
+ trace: postponed_configurations::add: create {foo^ | libfoo->{foo/1,1}}
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {foo^ | libfoo->{foo/1,1}}
+ %.*
+ trace: collect_build_prerequisites: pre-reeval foo/1.0.0
+ %.*
+ trace: collect_build_prerequisites: pre-reevaluated foo/1.0.0: 1,1
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {foo^ | libfoo->{foo/1,1}}
+ trace: collect_build_prerequisites: reeval foo/1.0.0
+ %.*
+ trace: collect_build: pick libfoo/0.1.0 over libfoo/1.0.0
+ trace: postponed_configurations::add: add {foo^ 1,1: libfoo} to {foo^ | libfoo->{foo/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent foo/1.0.0 results in {foo^ | libfoo->{foo/1,1}}
+ trace: collect_build_prerequisites: re-evaluated foo/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {foo^ | libfoo->{foo/1,1}}
+ %.*
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libfoo/0.1.0
+ trace: collect_build_prerequisites: end libfoo/0.1.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent foo/1.0.0
+ trace: collect_build_prerequisites: resume foo/1.0.0
+ trace: collect_build_prerequisites: end foo/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {foo^ | libfoo->{foo/1,1}}!
+ trace: collect_build_postponed (1): end {foo^ | libfoo->{foo/1,1}}
+ trace: collect_build_postponed (0): end
+ %.*
+ trace: execute_plan: simulate: yes
+ %.*
+ build plan:
+ downgrade libfoo/0.1.0
+ config.libfoo.extras=true (set by foo)
+ reconfigure foo/1.0.0 (dependent of libfoo)
+ %.*
+ disfigured foo/1.0.0
+ %.*
+ disfigured libfoo/1.0.0
+ %.*
+ fetched libfoo/0.1.0
+ %.*
+ unpacked libfoo/0.1.0
+ %.*
+ configured libfoo/0.1.0
+ %.*
+ configured foo/1.0.0
+ %.*
+ updated foo/1.0.0
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !foo configured 1.0.0
+ libfoo configured !0.1.0 available 1.0.0
+ EOO
+
+ cat cfg/foo-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.foo.extras = true
+ config.foo.libfoo_extras = true
+ %.*
+ EOO
+
+ cat cfg/libfoo-0.1.0/build/config.build >>~%EOO%;
+ %.*
+ config.libfoo.extras = true
+ %.*
+ EOO
+
+ # Reconfigure the dependency and hold.
+ #
+ $* libfoo/0.1.0 +{ config.libfoo.extras=true } 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ %.*
+ trace: collect_build: add libfoo/0.1.0
+ %.*
+ trace: collect_build_prerequisites: pre-reeval foo/1.0.0
+ %.*
+ trace: collect_build_prerequisites: pre-reevaluated foo/1.0.0: 1,1
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/0.1.0 of existing dependent foo/1.0.0 due to dependency libfoo/0.1.0
+ trace: collect_build: add foo/1.0.0
+ trace: postponed_configurations::add: create {foo^ | libfoo->{foo/1,1}}
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {foo^ | libfoo->{foo/1,1}}
+ %.*
+ trace: collect_build_prerequisites: pre-reeval foo/1.0.0
+ %.*
+ trace: collect_build_prerequisites: pre-reevaluated foo/1.0.0: 1,1
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {foo^ | libfoo->{foo/1,1}}
+ trace: collect_build_prerequisites: reeval foo/1.0.0
+ %.*
+ trace: postponed_configurations::add: add {foo^ 1,1: libfoo} to {foo^ | libfoo->{foo/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent foo/1.0.0 results in {foo^ | libfoo->{foo/1,1}}
+ trace: collect_build_prerequisites: re-evaluated foo/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {foo^ | libfoo->{foo/1,1}}
+ %.*
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libfoo/0.1.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent foo/1.0.0
+ trace: collect_build_prerequisites: resume foo/1.0.0
+ trace: collect_build_prerequisites: end foo/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {foo^ | libfoo->{foo/1,1}}!
+ trace: collect_build_postponed (1): end {foo^ | libfoo->{foo/1,1}}
+ trace: collect_build_postponed (0): end
+ %.*
+ trace: execute_plan: simulate: yes
+ %.*
+ build plan:
+ reconfigure/update libfoo/0.1.0
+ config.libfoo.extras=true (user configuration)
+ reconfigure foo/1.0.0 (dependent of libfoo)
+ %.*
+ disfigured foo/1.0.0
+ %.*
+ disfigured libfoo/0.1.0
+ %.*
+ configured libfoo/0.1.0
+ %.*
+ configured foo/1.0.0
+ %.*
+ updated libfoo/0.1.0
+ %.*
+ updated foo/1.0.0
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !libfoo configured !0.1.0 available 1.0.0
+ !foo configured 1.0.0
+ !libfoo configured !0.1.0 available 1.0.0
+ EOO
+
+ cat cfg/foo-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.foo.extras = true
+ config.foo.libfoo_extras = true
+ %.*
+ EOO
+
+ cat cfg/libfoo-0.1.0/build/config.build >>~%EOO%;
+ %.*
+ config.libfoo.extras = true
+ %.*
+ EOO
+
+ # Upgrade the dependency and unhold existing dependent.
+ #
+ $* libfoo ?foo 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ %.*
+ trace: collect_build: add libfoo/1.0.0
+ %.*
+ trace: collect_build_prerequisites: pre-reeval foo/1.0.0
+ %.*
+ trace: collect_build_prerequisites: pre-reevaluated foo/1.0.0: 1,1
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of existing dependent foo/1.0.0 due to dependency libfoo/1.0.0
+ trace: postponed_configurations::add: create {foo^ | libfoo->{foo/1,1}}
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {foo^ | libfoo->{foo/1,1}}
+ %.*
+ trace: collect_build_prerequisites: pre-reeval foo/1.0.0
+ %.*
+ trace: collect_build_prerequisites: pre-reevaluated foo/1.0.0: 1,1
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {foo^ | libfoo->{foo/1,1}}
+ trace: collect_build_prerequisites: reeval foo/1.0.0
+ %.*
+ trace: collect_build: pick libfoo/1.0.0 over libfoo/0.1.0
+ trace: postponed_configurations::add: add {foo^ 1,1: libfoo} to {foo^ | libfoo->{foo/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent foo/1.0.0 results in {foo^ | libfoo->{foo/1,1}}
+ trace: collect_build_prerequisites: re-evaluated foo/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {foo^ | libfoo->{foo/1,1}}
+ %.*
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libfoo/1.0.0
+ trace: collect_build_prerequisites: end libfoo/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent foo/1.0.0
+ trace: collect_build_prerequisites: resume foo/1.0.0
+ trace: collect_build_prerequisites: end foo/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {foo^ | libfoo->{foo/1,1}}!
+ trace: collect_build_postponed (1): end {foo^ | libfoo->{foo/1,1}}
+ trace: collect_build_postponed (0): end
+ %.*
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: evaluate_dependency: foo/1.0.0: unused
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ %.*
+ trace: collect_build: add libfoo/1.0.0
+ %.*
+ trace: collect_build_prerequisites: pre-reeval foo/1.0.0
+ %.*
+ trace: collect_build_prerequisites: pre-reevaluated foo/1.0.0: 1,1
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of existing dependent foo/1.0.0 due to dependency libfoo/1.0.0
+ trace: postponed_configurations::add: create {foo^ | libfoo->{foo/1,1}}
+ trace: collect_drop: foo/1.0.0 package version needs to be replaced with drop
+ trace: pkg_build: collection failed due to package version replacement, retry from scratch
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ %.*
+ trace: collect_build: add libfoo/1.0.0
+ %.*
+ trace: collect_build_prerequisites: skip expected to be dropped existing dependent foo of dependency libfoo
+ trace: collect_build_prerequisites: begin libfoo/1.0.0
+ trace: collect_build_prerequisites: end libfoo/1.0.0
+ trace: collect_drop: overwrite foo
+ %.*
+ trace: execute_plan: simulate: yes
+ %.*
+ build plan:
+ upgrade libfoo/1.0.0
+ config.libfoo.extras=true (user configuration)
+ drop foo/1.0.0 (unused)
+ %.*
+ disfigured foo/1.0.0
+ %.*
+ disfigured libfoo/0.1.0
+ %.*
+ fetched libfoo/1.0.0
+ %.*
+ unpacked libfoo/1.0.0
+ %.*
+ purged foo/1.0.0
+ %.*
+ configured libfoo/1.0.0
+ %.*
+ updated libfoo/1.0.0
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !libfoo configured 1.0.0
+ EOO
+
+ cat cfg/libfoo-1.0.0/build2/config.build2 >>~%EOO%;
+ %.*
+ config.libfoo.extras = true
+ %.*
+ EOO
+
+ $pkg_drop libfoo
+ }
+
+ : multiple-dependents
+ :
+ {
+ $clone_cfg;
+
+ # Dependencies:
+ #
+ # foo/1.0.0: depends: libfoo(c)
+ # fox/1.0.0: depends: libfoo(c)
+ #
+ $* libfoo/0.1.0 foo/1.0.0 fox/1.0.0 --verbose 1 2>!;
+
+ $pkg_status -r >>EOO;
+ !libfoo configured !0.1.0 available 1.0.0
+ !foo configured !1.0.0
+ !libfoo configured !0.1.0 available 1.0.0
+ !fox configured !1.0.0
+ !libfoo configured !0.1.0 available 1.0.0
+ EOO
+
+ $* libfoo 2>&1 | $filter 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: pre-reeval foo/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated foo/1.0.0: 1,1
+ trace: collect_build_prerequisites: pre-reeval fox/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated fox/1.0.0: 1,1
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of existing dependent foo/1.0.0 due to dependency libfoo/1.0.0
+ trace: collect_build: add foo/1.0.0
+ trace: postponed_configurations::add: create {foo^ | libfoo->{foo/1,1}}
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of existing dependent fox/1.0.0 due to dependency libfoo/1.0.0
+ trace: collect_build: add fox/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {foo^ | libfoo->{foo/1,1}}
+ trace: collect_build_prerequisites: pre-reeval foo/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated foo/1.0.0: 1,1
+ trace: collect_build_prerequisites: pre-reeval fox/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated fox/1.0.0: 1,1
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {foo^ | libfoo->{foo/1,1}}
+ trace: collect_build_prerequisites: reeval foo/1.0.0
+ trace: collect_build: pick libfoo/1.0.0 over libfoo/0.1.0
+ trace: postponed_configurations::add: add {foo^ 1,1: libfoo} to {foo^ | libfoo->{foo/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent foo/1.0.0 results in {foo^ | libfoo->{foo/1,1}}
+ trace: collect_build_prerequisites: re-evaluated foo/1.0.0
+ trace: collect_build_prerequisites: reeval fox/1.0.0
+ trace: collect_build: pick libfoo/1.0.0 over libfoo/0.1.0
+ trace: postponed_configurations::add: add {fox^ 1,1: libfoo} to {foo^ | libfoo->{foo/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent fox/1.0.0 results in {foo^ fox^ | libfoo->{foo/1,1 fox/1,1}}
+ trace: collect_build_prerequisites: re-evaluated fox/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {foo^ fox^ | libfoo->{foo/1,1 fox/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libfoo/1.0.0
+ trace: collect_build_prerequisites: end libfoo/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent foo/1.0.0
+ trace: collect_build_prerequisites: resume foo/1.0.0
+ trace: collect_build_prerequisites: end foo/1.0.0
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent fox/1.0.0
+ trace: collect_build_prerequisites: resume fox/1.0.0
+ trace: collect_build_prerequisites: end fox/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {foo^ fox^ | libfoo->{foo/1,1 fox/1,1}}!
+ trace: collect_build_postponed (1): end {foo^ | libfoo->{foo/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ build plan:
+ upgrade libfoo/1.0.0
+ config.libfoo.extras=true (set by foo)
+ reconfigure fox/1.0.0 (dependent of libfoo)
+ config.fox.libfoo_extras=true (set by fox)
+ reconfigure foo/1.0.0 (dependent of libfoo)
+ config.foo.libfoo_extras=true (set by foo)
+ trace: execute_plan: simulate: no
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !libfoo configured 1.0.0
+ !foo configured !1.0.0
+ !libfoo configured 1.0.0
+ !fox configured !1.0.0
+ !libfoo configured 1.0.0
+ EOO
+
+ $pkg_drop libfoo foo fox
+ }
+
+ : postpone-existing-dependency
+ :
+ {
+ $clone_cfg;
+
+ # Dependencies:
+ #
+ # bus: depends: foo(c)
+ #
+ # fix: depends: foo == 0.1.0
+ #
+ # biz/0.1.0: depends: libbiz == 0.1.0
+ #
+ # libbiz/1.0.0: depends: libbar
+ # libbiz/0.1.0:
+ #
+ # foo: depends: libfoo(c)
+ #
+ $* bus/0.1.0 --verbose 1 2>!;
+
+ $pkg_status -r >>EOO;
+ !bus configured !0.1.0 available 1.0.0
+ foo configured 1.0.0
+ libfoo configured 1.0.0
+ EOO
+
+ $* fix/0.1.0 libbiz biz/0.1.0 2>&1 | $filter 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add fix/0.1.0
+ trace: collect_build: add libbiz/1.0.0
+ trace: collect_build: add biz/0.1.0
+ trace: collect_build_prerequisites: begin fix/0.1.0
+ trace: collect_build: add foo/0.1.0
+ info: package fix dependency on (foo == 0.1.0) is forcing downgrade of foo/1.0.0 to 0.1.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency foo/0.1.0 of dependent fix/0.1.0
+ trace: collect_build_prerequisites: pre-reeval bus/0.1.0
+ trace: collect_build_prerequisites: pre-reevaluated bus/0.1.0: 1,1
+ trace: collect_build_prerequisites: cfg-postpone dependency foo/0.1.0 of existing dependent bus/0.1.0 due to dependency foo/0.1.0
+ trace: collect_build: add bus/0.1.0
+ trace: postponed_configurations::add: create {bus^ | foo->{bus/1,1}}
+ trace: collect_build_prerequisites: end fix/0.1.0
+ trace: collect_build_prerequisites: begin libbiz/1.0.0
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/1.0.0 of dependent libbiz/1.0.0
+ trace: collect_build_prerequisites: begin libbar/1.0.0
+ trace: collect_build_prerequisites: end libbar/1.0.0
+ trace: collect_build_prerequisites: end libbiz/1.0.0
+ trace: collect_build_prerequisites: begin biz/0.1.0
+ trace: collect_build: pick libbiz/0.1.0 over libbiz/1.0.0
+ trace: collect_build: libbiz/1.0.0 package version needs to be replaced with libbiz/0.1.0
+ trace: pkg_build: collection failed due to package version replacement, retry from scratch
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add fix/0.1.0
+ trace: collect_build: apply version replacement for libbiz/1.0.0
+ trace: collect_build: replacement: libbiz/0.1.0
+ trace: collect_build: add libbiz/0.1.0
+ trace: collect_build: add biz/0.1.0
+ trace: collect_build_prerequisites: begin fix/0.1.0
+ trace: collect_build: add foo/0.1.0
+ info: package fix dependency on (foo == 0.1.0) is forcing downgrade of foo/1.0.0 to 0.1.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency foo/0.1.0 of dependent fix/0.1.0
+ trace: collect_build_prerequisites: pre-reeval bus/0.1.0
+ trace: collect_build_prerequisites: pre-reevaluated bus/0.1.0: 1,1
+ trace: collect_build_prerequisites: cfg-postpone dependency foo/0.1.0 of existing dependent bus/0.1.0 due to dependency foo/0.1.0
+ trace: collect_build: add bus/0.1.0
+ trace: postponed_configurations::add: create {bus^ | foo->{bus/1,1}}
+ trace: collect_build_prerequisites: end fix/0.1.0
+ trace: collect_build_prerequisites: begin libbiz/0.1.0
+ trace: collect_build_prerequisites: end libbiz/0.1.0
+ trace: collect_build_prerequisites: begin biz/0.1.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbiz/0.1.0 of dependent biz/0.1.0
+ trace: collect_build_prerequisites: end biz/0.1.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {bus^ | foo->{bus/1,1}}
+ trace: collect_build_prerequisites: pre-reeval bus/0.1.0
+ trace: collect_build_prerequisites: pre-reevaluated bus/0.1.0: 1,1
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {bus^ | foo->{bus/1,1}}
+ trace: collect_build_prerequisites: reeval bus/0.1.0
+ trace: collect_build: pick foo/0.1.0 over foo/1.0.0
+ trace: postponed_configurations::add: add {bus^ 1,1: foo} to {bus^ | foo->{bus/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent bus/0.1.0 results in {bus^ | foo->{bus/1,1}}
+ trace: collect_build_prerequisites: re-evaluated bus/0.1.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {bus^ | foo->{bus/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin foo/0.1.0
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent foo/0.1.0
+ trace: postponed_configurations::add: create {foo | libfoo->{foo/1,1}}
+ trace: collect_build_prerequisites: postpone foo/0.1.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent bus/0.1.0
+ trace: collect_build_prerequisites: resume bus/0.1.0
+ trace: collect_build_prerequisites: end bus/0.1.0
+ trace: collect_build_postponed (1): cfg-negotiate end {bus^ | foo->{bus/1,1}}!
+ trace: collect_build_postponed (2): begin {foo | libfoo->{foo/1,1}}
+ trace: collect_build_postponed (2): skip being built existing dependent foo of dependency libfoo
+ trace: collect_build_postponed (2): cfg-negotiate begin {foo | libfoo->{foo/1,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libfoo/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent foo/0.1.0
+ trace: collect_build_prerequisites: resume foo/0.1.0
+ trace: collect_build_prerequisites: end foo/0.1.0
+ trace: collect_build_postponed (2): cfg-negotiate end {foo | libfoo->{foo/1,1}}!
+ trace: collect_build_postponed (2): end {foo | libfoo->{foo/1,1}}
+ trace: collect_build_postponed (1): end {bus^ | foo->{bus/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ build plan:
+ downgrade foo/0.1.0 (required by bus, fix)
+ config.foo.extras=true (set by bus)
+ reconfigure bus/0.1.0 (dependent of foo)
+ config.bus.foo_extras=true (set by bus)
+ new fix/0.1.0
+ new libbiz/0.1.0
+ new biz/0.1.0
+ trace: execute_plan: simulate: no
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !bus configured !0.1.0 available 1.0.0
+ foo configured 0.1.0 available 1.0.0 0.2.0
+ libfoo configured 1.0.0
+ !fix configured !0.1.0 available 1.0.0
+ foo configured 0.1.0 available 1.0.0 0.2.0
+ libfoo configured 1.0.0
+ !libbiz configured 0.1.0 available 1.0.0
+ !biz configured !0.1.0 available 1.0.0
+ !libbiz configured 0.1.0 available 1.0.0
+ EOO
+
+ $pkg_drop bus fix libbiz biz
+ }
+
+ : postpone-existing
+ :
+ {
+ $clone_cfg;
+
+ # Dependencies:
+ #
+ # fix/1.0.0: depends: foo(c)
+ # foo/1.0.0: depends: libfoo(c)
+ #
+ # fix/0.1.0: depends: foo == 0.1.0
+ # foo/0.1.0: depends: libfoo(c)
+ #
+ $* fix --verbose 1 2>!;
+
+ $pkg_status -r >>EOO;
+ !fix configured 1.0.0
+ foo configured 1.0.0
+ libfoo configured 1.0.0
+ EOO
+
+ $* libfoo/0.1.0 fix/0.1.0 2>&1 | $filter 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libfoo/0.1.0
+ trace: collect_build: add fix/0.1.0
+ trace: collect_build_prerequisites: pre-reeval foo/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated foo/1.0.0: 1,1
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/0.1.0 of existing dependent foo/1.0.0 due to dependency libfoo/0.1.0
+ trace: collect_build: add foo/1.0.0
+ trace: postponed_configurations::add: create {foo^ | libfoo->{foo/1,1}}
+ trace: collect_build_prerequisites: begin fix/0.1.0
+ trace: collect_build: pick foo/0.1.0 over foo/1.0.0
+ trace: collect_build: foo/1.0.0 package version needs to be replaced with foo/0.1.0
+ trace: pkg_build: collection failed due to package version replacement, retry from scratch
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libfoo/0.1.0
+ trace: collect_build: add fix/0.1.0
+ trace: collect_build_prerequisites: skip expected to be built existing dependent foo of dependency libfoo
+ trace: collect_build_prerequisites: begin libfoo/0.1.0
+ trace: collect_build_prerequisites: end libfoo/0.1.0
+ trace: collect_build_prerequisites: begin fix/0.1.0
+ trace: collect_build: apply version replacement for foo/0.1.0
+ trace: collect_build: replacement: foo/0.1.0
+ trace: collect_build: add foo/0.1.0
+ info: package fix dependency on (foo == 0.1.0) is forcing downgrade of foo/1.0.0 to 0.1.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency foo/0.1.0 of dependent fix/0.1.0
+ trace: collect_build_prerequisites: skip being built existing dependent fix of dependency foo
+ trace: collect_build_prerequisites: begin foo/0.1.0
+ trace: collect_build: pick libfoo/0.1.0 over libfoo/1.0.0
+ trace: collect_build_prerequisites: cannot cfg-postpone dependency libfoo/0.1.0 of dependent foo/0.1.0 (collected prematurely), throwing postpone_dependency
+ trace: pkg_build: collection failed due to prematurely collected dependency (libfoo), retry from scratch
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libfoo/0.1.0
+ trace: collect_build: add fix/0.1.0
+ trace: pkg_build: dep-postpone user-specified libfoo
+ trace: collect_build_prerequisites: begin fix/0.1.0
+ trace: collect_build: apply version replacement for foo/0.1.0
+ trace: collect_build: replacement: foo/0.1.0
+ trace: collect_build: add foo/0.1.0
+ info: package fix dependency on (foo == 0.1.0) is forcing downgrade of foo/1.0.0 to 0.1.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency foo/0.1.0 of dependent fix/0.1.0
+ trace: collect_build_prerequisites: skip being built existing dependent fix of dependency foo
+ trace: collect_build_prerequisites: begin foo/0.1.0
+ trace: collect_build: pick libfoo/0.1.0 over libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/0.1.0 of dependent foo/0.1.0
+ trace: postponed_configurations::add: create {foo | libfoo->{foo/1,1}}
+ trace: collect_build_prerequisites: postpone foo/0.1.0
+ trace: collect_build_prerequisites: end fix/0.1.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {foo | libfoo->{foo/1,1}}
+ trace: collect_build_postponed (1): skip being built existing dependent foo of dependency libfoo
+ trace: collect_build_postponed (1): cfg-negotiate begin {foo | libfoo->{foo/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libfoo/0.1.0
+ trace: collect_build_prerequisites: end libfoo/0.1.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent foo/0.1.0
+ trace: collect_build_prerequisites: resume foo/0.1.0
+ trace: collect_build_prerequisites: end foo/0.1.0
+ trace: collect_build_postponed (1): cfg-negotiate end {foo | libfoo->{foo/1,1}}!
+ trace: collect_build_postponed (1): end {foo | libfoo->{foo/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ build plan:
+ downgrade libfoo/0.1.0
+ config.libfoo.extras=true (set by foo)
+ downgrade foo/0.1.0 (required by fix)
+ config.foo.extras=true (dependent configuration)
+ downgrade fix/0.1.0
+ trace: execute_plan: simulate: no
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !libfoo configured !0.1.0 available 1.0.0
+ !fix configured !0.1.0 available 1.0.0
+ foo configured 0.1.0 available 1.0.0 0.2.0
+ !libfoo configured !0.1.0 available 1.0.0
+ EOO
+
+ $pkg_drop fix libfoo --drop-dependent
+ }
+ }
+
+ : dependent-mult-pos
+ :
+ {
+ +$clone_cfg
+
+ : non-negotiated
+ :
+ {
+ $clone_cfg;
+
+ # Dependencies:
+ #
+ # tex: depends: libbar(c)
+ # depends: libfoo(c)
+ #
+ $* tex --verbose 1 2>!;
+
+ $pkg_status -r >>EOO;
+ !tex configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured 1.0.0
+ EOO
+
+ $* libfoo/0.1.0 libbar/0.1.0 2>&1 | $filter 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libfoo/0.1.0
+ trace: collect_build: add libbar/0.1.0
+ trace: collect_build_prerequisites: pre-reeval tex/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tex/1.0.0: 1,1
+ trace: collect_build: pick libbar/0.1.0 over libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/0.1.0 of existing dependent tex/1.0.0 due to dependency libfoo/0.1.0
+ trace: collect_build: add tex/1.0.0
+ trace: postponed_configurations::add: create {tex^ | libbar->{tex/1,1}}
+ trace: pkg_build: dep-postpone user-specified libbar since already in cluster {tex^ | libbar->{tex/1,1}}
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {tex^ | libbar->{tex/1,1}}
+ trace: collect_build_prerequisites: pre-reeval tex/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tex/1.0.0: 1,1
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {tex^ | libbar->{tex/1,1}}
+ trace: collect_build_prerequisites: reeval tex/1.0.0
+ trace: collect_build: pick libbar/0.1.0 over libbar/1.0.0
+ trace: postponed_configurations::add: add {tex^ 1,1: libbar} to {tex^ | libbar->{tex/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent tex/1.0.0 results in {tex^ | libbar->{tex/1,1}}
+ trace: collect_build_prerequisites: re-evaluated tex/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {tex^ | libbar->{tex/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbar/0.1.0
+ trace: collect_build_prerequisites: end libbar/0.1.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tex/1.0.0
+ trace: collect_build_prerequisites: resume tex/1.0.0
+ trace: collect_build: pick libfoo/0.1.0 over libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/0.1.0 of dependent tex/1.0.0
+ trace: postponed_configurations::add: create {tex | libfoo->{tex/2,1}}
+ trace: collect_build_prerequisites: postpone tex/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {tex^ | libbar->{tex/1,1}}!
+ trace: collect_build_postponed (2): begin {tex | libfoo->{tex/2,1}}
+ trace: collect_build_postponed (2): skip being built existing dependent tex of dependency libfoo
+ trace: collect_build_postponed (2): cfg-negotiate begin {tex | libfoo->{tex/2,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libfoo/0.1.0
+ trace: collect_build_prerequisites: end libfoo/0.1.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent tex/1.0.0
+ trace: collect_build_prerequisites: resume tex/1.0.0
+ trace: collect_build_prerequisites: end tex/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {tex | libfoo->{tex/2,1}}!
+ trace: collect_build_postponed (2): end {tex | libfoo->{tex/2,1}}
+ trace: collect_build_postponed (1): end {tex^ | libbar->{tex/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ build plan:
+ downgrade libfoo/0.1.0
+ config.libfoo.extras=true (set by tex)
+ downgrade libbar/0.1.0
+ config.libbar.extras=true (set by tex)
+ reconfigure tex/1.0.0 (dependent of libbar, libfoo)
+ config.tex.libfoo_extras=true (set by tex)
+ trace: execute_plan: simulate: no
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !libbar configured !0.1.0 available 1.0.0
+ !libfoo configured !0.1.0 available 1.0.0
+ !tex configured 1.0.0
+ !libbar configured !0.1.0 available 1.0.0
+ !libfoo configured !0.1.0 available 1.0.0
+ EOO
+
+ $pkg_drop tex libfoo libbar
+ }
+
+ : negotiated
+ :
+ {
+ $clone_cfg;
+
+ # Dependencies:
+ #
+ # tex: depends: libbar(c)
+ # depends: libfoo(c)
+ #
+ # bar: depends: libbar == 0.1.0 (c)
+ #
+ $* tex --verbose 1 2>!;
+
+ $pkg_status -r >>EOO;
+ !tex configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured 1.0.0
+ EOO
+
+ $* libfoo/0.1.0 bar/0.1.0 2>&1 | $filter 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libfoo/0.1.0
+ trace: collect_build: add bar/0.1.0
+ trace: collect_build_prerequisites: pre-reeval tex/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tex/1.0.0: 1,1
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of existing dependent tex/1.0.0 due to dependency libfoo/0.1.0
+ trace: collect_build: add tex/1.0.0
+ trace: postponed_configurations::add: create {tex^ | libbar->{tex/1,1}}
+ trace: collect_build_prerequisites: begin bar/0.1.0
+ trace: collect_build: pick libbar/0.1.0 over libbar/1.0.0
+ trace: collect_build: libbar/1.0.0 package version needs to be replaced with libbar/0.1.0
+ trace: pkg_build: collection failed due to package version replacement, retry from scratch
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libfoo/0.1.0
+ trace: collect_build: add bar/0.1.0
+ trace: collect_build_prerequisites: pre-reeval tex/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tex/1.0.0: 1,1
+ trace: collect_build: apply version replacement for libbar/1.0.0
+ trace: collect_build: replacement: libbar/0.1.0
+ trace: collect_build: add libbar/0.1.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/0.1.0 of existing dependent tex/1.0.0 due to dependency libfoo/0.1.0
+ trace: collect_build: add tex/1.0.0
+ trace: postponed_configurations::add: create {tex^ | libbar->{tex/1,1}}
+ trace: collect_build_prerequisites: begin bar/0.1.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/0.1.0 of dependent bar/0.1.0
+ trace: postponed_configurations::add: add {bar 1,1: libbar} to {tex^ | libbar->{tex/1,1}}
+ trace: collect_build_prerequisites: postpone bar/0.1.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {bar tex^ | libbar->{bar/1,1 tex/1,1}}
+ trace: collect_build_prerequisites: pre-reeval tex/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tex/1.0.0: 1,1
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {bar tex^ | libbar->{bar/1,1 tex/1,1}}
+ trace: collect_build_prerequisites: reeval tex/1.0.0
+ trace: collect_build: pick libbar/0.1.0 over libbar/1.0.0
+ trace: postponed_configurations::add: add {tex^ 1,1: libbar} to {bar tex^ | libbar->{bar/1,1 tex/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent tex/1.0.0 results in {bar tex^ | libbar->{bar/1,1 tex/1,1}}
+ trace: collect_build_prerequisites: re-evaluated tex/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {bar tex^ | libbar->{bar/1,1 tex/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbar/0.1.0
+ trace: collect_build_prerequisites: end libbar/0.1.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent bar/0.1.0
+ trace: collect_build_prerequisites: resume bar/0.1.0
+ trace: collect_build_prerequisites: end bar/0.1.0
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tex/1.0.0
+ trace: collect_build_prerequisites: resume tex/1.0.0
+ trace: collect_build: pick libfoo/0.1.0 over libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/0.1.0 of dependent tex/1.0.0
+ trace: postponed_configurations::add: create {tex | libfoo->{tex/2,1}}
+ trace: collect_build_prerequisites: postpone tex/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {bar tex^ | libbar->{bar/1,1 tex/1,1}}!
+ trace: collect_build_postponed (2): begin {tex | libfoo->{tex/2,1}}
+ trace: collect_build_postponed (2): skip being built existing dependent tex of dependency libfoo
+ trace: collect_build_postponed (2): cfg-negotiate begin {tex | libfoo->{tex/2,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libfoo/0.1.0
+ trace: collect_build_prerequisites: end libfoo/0.1.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent tex/1.0.0
+ trace: collect_build_prerequisites: resume tex/1.0.0
+ trace: collect_build_prerequisites: end tex/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {tex | libfoo->{tex/2,1}}!
+ trace: collect_build_postponed (2): end {tex | libfoo->{tex/2,1}}
+ trace: collect_build_postponed (1): end {bar tex^ | libbar->{bar/1,1 tex/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ build plan:
+ downgrade libfoo/0.1.0
+ config.libfoo.extras=true (set by tex)
+ downgrade libbar/0.1.0 (required by bar, tex)
+ config.libbar.extras=true (set by bar)
+ reconfigure tex/1.0.0 (dependent of libbar, libfoo)
+ config.tex.libfoo_extras=true (set by tex)
+ new bar/0.1.0
+ trace: execute_plan: simulate: no
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !libfoo configured !0.1.0 available 1.0.0
+ !tex configured 1.0.0
+ libbar configured 0.1.0 available 1.0.0
+ !libfoo configured !0.1.0 available 1.0.0
+ !bar configured !0.1.0 available 1.0.0
+ libbar configured 0.1.0 available 1.0.0
+ EOO
+
+ $pkg_drop tex libfoo bar
+ }
+
+ : up-negotiate
+ :
+ {
+ $clone_cfg;
+
+ # Dependencies:
+ #
+ # tex: depends: libbar(c)
+ # depends: libfoo(c)
+ #
+ # baz: depends: {libbar libfoo} == 0.1.0 (c)
+ #
+ $* tex --verbose 1 2>!;
+
+ $* baz/0.1.0 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ %.*
+ trace: collect_build: add baz/0.1.0
+ trace: collect_build_prerequisites: begin baz/0.1.0
+ %.*
+ trace: collect_build: add libbar/0.1.0
+ info: package baz dependency on (libbar == 0.1.0) is forcing downgrade of libbar/1.0.0 to 0.1.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/0.1.0 of dependent baz/0.1.0
+ trace: collect_build: add libfoo/0.1.0
+ info: package baz dependency on (libfoo == 0.1.0) is forcing downgrade of libfoo/1.0.0 to 0.1.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/0.1.0 of dependent baz/0.1.0
+ trace: postponed_configurations::add: create {baz | libbar->{baz/1,1} libfoo->{baz/1,1}}
+ trace: collect_build_prerequisites: postpone baz/0.1.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {baz | libbar->{baz/1,1} libfoo->{baz/1,1}}
+ %.*
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {baz | libbar->{baz/1,1} libfoo->{baz/1,1}}
+ %.*
+ trace: collect_build: add tex/1.0.0
+ trace: collect_build_prerequisites: reeval tex/1.0.0
+ %.*
+ trace: collect_build: pick libbar/0.1.0 over libbar/1.0.0
+ trace: postponed_configurations::add: add {tex^ 1,1: libbar} to {baz | libbar->{baz/1,1} libfoo->{baz/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent tex/1.0.0 results in {baz tex^ | libbar->{baz/1,1 tex/1,1} libfoo->{baz/1,1}}
+ trace: collect_build_prerequisites: re-evaluated tex/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {baz tex^ | libbar->{baz/1,1 tex/1,1} libfoo->{baz/1,1}}
+ %.*
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbar/0.1.0
+ trace: collect_build_prerequisites: end libbar/0.1.0
+ trace: collect_build_prerequisites: begin libfoo/0.1.0
+ trace: collect_build_prerequisites: end libfoo/0.1.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent baz/0.1.0
+ trace: collect_build_prerequisites: resume baz/0.1.0
+ trace: collect_build_prerequisites: end baz/0.1.0
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tex/1.0.0
+ trace: collect_build_prerequisites: resume tex/1.0.0
+ %.*
+ trace: collect_build: pick libfoo/0.1.0 over libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/0.1.0 of dependent tex/1.0.0
+ trace: postponed_configurations::add: add {tex 2,1: libfoo} to {baz tex^ | libbar->{baz/1,1 tex/1,1} libfoo->{baz/1,1}}?
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent tex/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libfoo/0.1.0 of dependent tex/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end tex/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {baz tex^ | libbar->{baz/1,1 tex/1,1} libfoo->{baz/1,1 tex/2,1}}!
+ trace: collect_build_postponed (1): end {baz | libbar->{baz/1,1} libfoo->{baz/1,1}}
+ trace: collect_build_postponed (0): end
+ %.*
+ trace: execute_plan: simulate: yes
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !tex configured 1.0.0
+ libbar configured 0.1.0 available 1.0.0
+ libfoo configured 0.1.0 available 1.0.0
+ !baz configured !0.1.0 available 1.0.0
+ libbar configured 0.1.0 available 1.0.0
+ libfoo configured 0.1.0 available 1.0.0
+ EOO
+
+ $pkg_drop tex baz
+ }
+
+ : replace-reeval
+ :
+ {
+ +$clone_cfg
+
+ : initial-collection
+ :
+ {
+ $clone_cfg;
+
+ # Dependencies:
+ #
+ # bax: depends: libfoo(c)
+ # depends: {libbox libbar} (c)
+ #
+ # baz: depends: {libbar libfoo} (c)
+ #
+ $* bax baz --verbose 1 2>!;
+
+ $* libbox/0.1.0 2>&1 | $filter 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libbox/0.1.0
+ trace: collect_build_prerequisites: pre-reeval bax/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bax/1.0.0: 1,1
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of existing dependent bax/1.0.0 due to dependency libbox/0.1.0
+ trace: collect_build: add bax/1.0.0
+ trace: postponed_configurations::add: create {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: pre-reeval bax/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bax/1.0.0: 1,1
+ trace: collect_build_prerequisites: pre-reeval baz/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated baz/1.0.0: 1,1
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: reeval bax/1.0.0
+ trace: postponed_configurations::add: add {bax^ 1,1: libfoo} to {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent bax/1.0.0 results in {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluated bax/1.0.0
+ trace: collect_build: add baz/1.0.0
+ trace: collect_build_prerequisites: reeval baz/1.0.0
+ trace: collect_build: add libbar/1.0.0
+ trace: postponed_configurations::add: add {baz^ 1,1: libbar libfoo} to {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent baz/1.0.0 results in {bax^ baz^ | libfoo->{bax/1,1 baz/1,1} libbar->{baz/1,1}}
+ trace: collect_build_prerequisites: re-evaluated baz/1.0.0
+ trace: collect_build_postponed (1): skip being built existing dependent bax of dependency libbar
+ trace: collect_build_postponed (1): skip being built existing dependent baz of dependency libbar
+ trace: collect_build_postponed (1): cfg-negotiate begin {bax^ baz^ | libfoo->{bax/1,1 baz/1,1} libbar->{baz/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libfoo/1.0.0
+ trace: collect_build_prerequisites: skip configured libbar/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent bax/1.0.0
+ trace: collect_build_prerequisites: resume bax/1.0.0
+ trace: collect_build: pick libbox/0.1.0 over libbox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/0.1.0 of dependent bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bax/1.0.0
+ trace: postponed_configurations::add: add {bax 2,1: libbox libbar} to {bax^ baz^ | libfoo->{bax/1,1 baz/1,1} libbar->{baz/1,1}}?
+ trace: collect_build_prerequisites: skip being built existing dependent bax of dependency libbox
+ trace: collect_build_prerequisites: cfg-postponing dependent bax/1.0.0 involves (being) negotiated configurations and results in {bax^ baz^ | libfoo->{bax/1,1 baz/1,1} libbar->{bax/2,1 baz/1,1} libbox->{bax/2,1}}?, throwing retry_configuration
+ trace: collect_build_postponed (0): cfg-negotiation of {bax^ | libfoo->{bax/1,1}} failed due to dependent bax, refining configuration
+ trace: collect_build_postponed (1): begin {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: pre-reeval bax/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bax/1.0.0: 1,1
+ trace: collect_build_prerequisites: pre-reeval baz/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated baz/1.0.0: 1,1
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: reeval bax/1.0.0
+ trace: postponed_configurations::add: add {bax^ 1,1: libfoo} to {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent bax/1.0.0 results in {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluated bax/1.0.0
+ trace: collect_build: add baz/1.0.0
+ trace: collect_build_prerequisites: reeval baz/1.0.0
+ trace: collect_build: add libbar/1.0.0
+ trace: postponed_configurations::add: add {baz^ 1,1: libbar libfoo} to {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent baz/1.0.0 results in {bax^ baz^ | libfoo->{bax/1,1 baz/1,1} libbar->{baz/1,1}}
+ trace: collect_build_prerequisites: re-evaluated baz/1.0.0
+ trace: collect_build_postponed (1): skip being built existing dependent bax of dependency libbar
+ trace: collect_build_postponed (1): skip being built existing dependent baz of dependency libbar
+ trace: collect_build_postponed (1): cfg-negotiate begin {bax^ baz^ | libfoo->{bax/1,1 baz/1,1} libbar->{baz/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libfoo/1.0.0
+ trace: collect_build_prerequisites: skip configured libbar/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent bax/1.0.0
+ trace: collect_build_prerequisites: resume bax/1.0.0
+ trace: collect_build: pick libbox/0.1.0 over libbox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/0.1.0 of dependent bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bax/1.0.0
+ trace: postponed_configurations::add: add {bax 2,1: libbox libbar} to {bax^ baz^ | libfoo->{bax/1,1 baz/1,1} libbar->{baz/1,1}}?
+ trace: collect_build_prerequisites: skip being built existing dependent bax of dependency libbox
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent bax/1.0.0 is negotiated
+ trace: collect_build_prerequisites: collecting cfg-postponed dependency libbox/0.1.0 of dependent bax/1.0.0
+ trace: collect_build_prerequisites: begin libbox/0.1.0
+ trace: collect_build_prerequisites: end libbox/0.1.0
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent bax/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end bax/1.0.0
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent baz/1.0.0
+ trace: collect_build_prerequisites: resume baz/1.0.0
+ trace: collect_build_prerequisites: end baz/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {bax^ baz^ | libfoo->{bax/1,1 baz/1,1} libbar->{bax/2,1 baz/1,1} libbox->{bax/2,1}}!
+ trace: collect_build_postponed (1): end {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ build plan:
+ downgrade libbox/0.1.0
+ config.libbox.extras=true (set by bax)
+ reconfigure bax/1.0.0 (dependent of libbox, libfoo)
+ config.bax.libfoo_extras=true (set by bax)
+ trace: execute_plan: simulate: no
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !libbox configured !0.1.0 available 1.0.0
+ !bax configured 1.0.0
+ libbar configured 1.0.0
+ !libbox configured !0.1.0 available 1.0.0
+ libfoo configured 1.0.0
+ !baz configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured 1.0.0
+ EOO
+
+ $pkg_drop bax baz libbox
+ }
+
+ : collect-postponed
+ :
+ {
+ $clone_cfg;
+
+ # Dependencies:
+ #
+ # bax: depends: libfoo(c)
+ # depends: {libbox libbar} (c)
+ #
+ # baz: depends: {libbar libfoo} (c)
+ #
+ # box: depends: libbox == 0.1.0 (c)
+ #
+ $* bax baz --verbose 1 2>!;
+
+ $* box/0.1.0 2>&1 | $filter 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add box/0.1.0
+ trace: collect_build_prerequisites: begin box/0.1.0
+ trace: collect_build: add libbox/0.1.0
+ info: package box dependency on (libbox == 0.1.0) is forcing downgrade of libbox/1.0.0 to 0.1.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/0.1.0 of dependent box/0.1.0
+ trace: postponed_configurations::add: create {box | libbox->{box/1,1}}
+ trace: collect_build_prerequisites: postpone box/0.1.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {box | libbox->{box/1,1}}
+ trace: collect_build_prerequisites: pre-reeval bax/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bax/1.0.0: 1,1
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {box | libbox->{box/1,1}}
+ trace: collect_build: add bax/1.0.0
+ trace: collect_build_prerequisites: reeval bax/1.0.0
+ trace: postponed_configurations::add: create {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent bax/1.0.0 results in {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluated bax/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {box | libbox->{box/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbox/0.1.0
+ trace: collect_build_prerequisites: end libbox/0.1.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent box/0.1.0
+ trace: collect_build_prerequisites: resume box/0.1.0
+ trace: collect_build_prerequisites: end box/0.1.0
+ trace: collect_build_postponed (1): cfg-negotiate end {box | libbox->{box/1,1}}!
+ trace: collect_build_postponed (2): begin {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_postponed (2): skip being built existing dependent bax of dependency libfoo
+ trace: collect_build_prerequisites: pre-reeval baz/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated baz/1.0.0: 1,1
+ trace: collect_build_postponed (2): re-evaluate existing dependents for {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build: add baz/1.0.0
+ trace: collect_build_prerequisites: reeval baz/1.0.0
+ trace: collect_build: add libbar/1.0.0
+ trace: postponed_configurations::add: add {baz^ 1,1: libbar libfoo} to {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent baz/1.0.0 results in {bax^ baz^ | libfoo->{bax/1,1 baz/1,1} libbar->{baz/1,1}}
+ trace: collect_build_prerequisites: re-evaluated baz/1.0.0
+ trace: collect_build_postponed (2): skip being built existing dependent bax of dependency libbar
+ trace: collect_build_postponed (2): skip being built existing dependent baz of dependency libbar
+ trace: collect_build_postponed (2): cfg-negotiate begin {bax^ baz^ | libfoo->{bax/1,1 baz/1,1} libbar->{baz/1,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libfoo/1.0.0
+ trace: collect_build_prerequisites: skip configured libbar/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent bax/1.0.0
+ trace: collect_build_prerequisites: resume bax/1.0.0
+ trace: collect_build: pick libbox/0.1.0 over libbox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/0.1.0 of dependent bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bax/1.0.0
+ trace: postponed_configurations::add: add {bax 2,1: libbox libbar} to {box | libbox->{box/1,1}}!
+ trace: postponed_configurations::add: merge {bax^ baz^ | libfoo->{bax/1,1 baz/1,1} libbar->{baz/1,1}}? into {bax box | libbox->{bax/2,1 box/1,1} libbar->{bax/2,1}}!
+ trace: collect_build_prerequisites: cfg-postponing dependent bax/1.0.0 merges non-negotiated and/or being negotiated configurations in and results in {bax baz^ box | libbox->{bax/2,1 box/1,1} libbar->{bax/2,1 baz/1,1} libfoo->{bax/1,1 baz/1,1}}!, throwing merge_configuration
+ trace: collect_build_postponed (0): cfg-negotiation of {box | libbox->{box/1,1}} failed due to non-negotiated clusters, force-merging based on shadow cluster {bax baz^ box | libbox->{bax/2,1 box/1,1} libbar->{bax/2,1 baz/1,1} libfoo->{bax/1,1 baz/1,1}}!
+ trace: collect_build_postponed (1): begin {box | libbox->{box/1,1}}
+ trace: collect_build_prerequisites: pre-reeval bax/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bax/1.0.0: 1,1
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {box | libbox->{box/1,1}}
+ trace: collect_build: add bax/1.0.0
+ trace: collect_build_prerequisites: reeval bax/1.0.0
+ trace: postponed_configurations::add: add {bax^ 1,1: libfoo} to {box | libbox->{box/1,1}} (shadow cluster-based)
+ trace: collect_build_prerequisites: re-evaluating dependent bax/1.0.0 results in {bax^ box | libbox->{box/1,1} libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluated bax/1.0.0
+ trace: collect_build_postponed (1): skip being built existing dependent bax of dependency libfoo
+ trace: collect_build_prerequisites: pre-reeval baz/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated baz/1.0.0: 1,1
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {bax^ box | libbox->{box/1,1} libfoo->{bax/1,1}}
+ trace: collect_build: add baz/1.0.0
+ trace: collect_build_prerequisites: reeval baz/1.0.0
+ trace: collect_build: add libbar/1.0.0
+ trace: postponed_configurations::add: add {baz^ 1,1: libbar libfoo} to {bax^ box | libbox->{box/1,1} libfoo->{bax/1,1}} (shadow cluster-based)
+ trace: collect_build_prerequisites: re-evaluating dependent baz/1.0.0 results in {bax^ baz^ box | libbox->{box/1,1} libfoo->{bax/1,1 baz/1,1} libbar->{baz/1,1}}
+ trace: collect_build_prerequisites: re-evaluated baz/1.0.0
+ trace: collect_build_postponed (1): skip being built existing dependent bax of dependency libbar
+ trace: collect_build_postponed (1): skip being built existing dependent baz of dependency libbar
+ trace: collect_build_postponed (1): cfg-negotiate begin {bax^ baz^ box | libbox->{box/1,1} libfoo->{bax/1,1 baz/1,1} libbar->{baz/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbox/0.1.0
+ trace: collect_build_prerequisites: end libbox/0.1.0
+ trace: collect_build_prerequisites: skip configured libfoo/1.0.0
+ trace: collect_build_prerequisites: skip configured libbar/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent bax/1.0.0
+ trace: collect_build_prerequisites: resume bax/1.0.0
+ trace: collect_build: pick libbox/0.1.0 over libbox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/0.1.0 of dependent bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bax/1.0.0
+ trace: postponed_configurations::add: add {bax 2,1: libbox libbar} to {bax^ baz^ box | libbox->{box/1,1} libfoo->{bax/1,1 baz/1,1} libbar->{baz/1,1}}? (shadow cluster-based)
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent bax/1.0.0 is shadow-negotiated
+ trace: collect_build_prerequisites: dependency libbox/0.1.0 of dependent bax/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent bax/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end bax/1.0.0
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent baz/1.0.0
+ trace: collect_build_prerequisites: resume baz/1.0.0
+ trace: collect_build_prerequisites: end baz/1.0.0
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent box/0.1.0
+ trace: collect_build_prerequisites: resume box/0.1.0
+ trace: collect_build_prerequisites: end box/0.1.0
+ trace: collect_build_postponed (1): cfg-negotiate end {bax^ baz^ box | libbox->{bax/2,1 box/1,1} libfoo->{bax/1,1 baz/1,1} libbar->{bax/2,1 baz/1,1}}!
+ trace: collect_build_postponed (1): end {box | libbox->{box/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ build plan:
+ downgrade libbox/0.1.0 (required by bax, box)
+ config.libbox.extras=true (set by box)
+ reconfigure bax/1.0.0 (dependent of libbox)
+ config.bax.libfoo_extras=true (set by bax)
+ new box/0.1.0
+ trace: execute_plan: simulate: no
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !bax configured 1.0.0
+ libbar configured 1.0.0
+ libbox configured 0.1.0 available 1.0.0
+ libfoo configured 1.0.0
+ !baz configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured 1.0.0
+ !box configured !0.1.0 available 1.0.0 0.2.0
+ libbox configured 0.1.0 available 1.0.0
+ EOO
+
+ $pkg_drop bax baz box
+ }
+
+ : bogus-harmless
+ :
+ {
+ $clone_cfg;
+
+ # Dependencies:
+ #
+ # bax: depends: libfoo(c)
+ # depends: {libbox libbar} (c)
+ #
+ # baz: depends: {libbar libfoo} (c)
+ #
+ $* bax baz --verbose 1 2>!;
+
+ $pkg_status -r >>EOO;
+ !bax configured 1.0.0
+ libbar configured 1.0.0
+ libbox configured 1.0.0
+ libfoo configured 1.0.0
+ !baz configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured 1.0.0
+ EOO
+
+ $* libbox/0.1.0 ?baz 2>&1 | $filter 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libbox/0.1.0
+ trace: collect_build_prerequisites: pre-reeval bax/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bax/1.0.0: 1,1
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of existing dependent bax/1.0.0 due to dependency libbox/0.1.0
+ trace: collect_build: add bax/1.0.0
+ trace: postponed_configurations::add: create {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: pre-reeval bax/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bax/1.0.0: 1,1
+ trace: collect_build_prerequisites: pre-reeval baz/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated baz/1.0.0: 1,1
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: reeval bax/1.0.0
+ trace: postponed_configurations::add: add {bax^ 1,1: libfoo} to {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent bax/1.0.0 results in {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluated bax/1.0.0
+ trace: collect_build_prerequisites: reeval baz/1.0.0
+ trace: collect_build: add libbar/1.0.0
+ trace: postponed_configurations::add: add {baz^ 1,1: libbar libfoo} to {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent baz/1.0.0 results in {bax^ baz^ | libfoo->{bax/1,1 baz/1,1} libbar->{baz/1,1}}
+ trace: collect_build_prerequisites: re-evaluated baz/1.0.0
+ trace: collect_build_postponed (1): skip being built existing dependent bax of dependency libbar
+ trace: collect_build_postponed (1): skip being built existing dependent baz of dependency libbar
+ trace: collect_build_postponed (1): cfg-negotiate begin {bax^ baz^ | libfoo->{bax/1,1 baz/1,1} libbar->{baz/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libfoo/1.0.0
+ trace: collect_build_prerequisites: skip configured libbar/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent bax/1.0.0
+ trace: collect_build_prerequisites: resume bax/1.0.0
+ trace: collect_build: pick libbox/0.1.0 over libbox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/0.1.0 of dependent bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bax/1.0.0
+ trace: postponed_configurations::add: add {bax 2,1: libbox libbar} to {bax^ baz^ | libfoo->{bax/1,1 baz/1,1} libbar->{baz/1,1}}?
+ trace: collect_build_prerequisites: skip being built existing dependent bax of dependency libbox
+ trace: collect_build_prerequisites: cfg-postponing dependent bax/1.0.0 involves (being) negotiated configurations and results in {bax^ baz^ | libfoo->{bax/1,1 baz/1,1} libbar->{bax/2,1 baz/1,1} libbox->{bax/2,1}}?, throwing retry_configuration
+ trace: collect_build_postponed (0): cfg-negotiation of {bax^ | libfoo->{bax/1,1}} failed due to dependent bax, refining configuration
+ trace: collect_build_postponed (1): begin {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: pre-reeval bax/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bax/1.0.0: 1,1
+ trace: collect_build_prerequisites: pre-reeval baz/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated baz/1.0.0: 1,1
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: reeval bax/1.0.0
+ trace: postponed_configurations::add: add {bax^ 1,1: libfoo} to {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent bax/1.0.0 results in {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluated bax/1.0.0
+ trace: collect_build_prerequisites: reeval baz/1.0.0
+ trace: collect_build: add libbar/1.0.0
+ trace: postponed_configurations::add: add {baz^ 1,1: libbar libfoo} to {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent baz/1.0.0 results in {bax^ baz^ | libfoo->{bax/1,1 baz/1,1} libbar->{baz/1,1}}
+ trace: collect_build_prerequisites: re-evaluated baz/1.0.0
+ trace: collect_build_postponed (1): skip being built existing dependent bax of dependency libbar
+ trace: collect_build_postponed (1): skip being built existing dependent baz of dependency libbar
+ trace: collect_build_postponed (1): cfg-negotiate begin {bax^ baz^ | libfoo->{bax/1,1 baz/1,1} libbar->{baz/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libfoo/1.0.0
+ trace: collect_build_prerequisites: skip configured libbar/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent bax/1.0.0
+ trace: collect_build_prerequisites: resume bax/1.0.0
+ trace: collect_build: pick libbox/0.1.0 over libbox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/0.1.0 of dependent bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bax/1.0.0
+ trace: postponed_configurations::add: add {bax 2,1: libbox libbar} to {bax^ baz^ | libfoo->{bax/1,1 baz/1,1} libbar->{baz/1,1}}?
+ trace: collect_build_prerequisites: skip being built existing dependent bax of dependency libbox
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent bax/1.0.0 is negotiated
+ trace: collect_build_prerequisites: collecting cfg-postponed dependency libbox/0.1.0 of dependent bax/1.0.0
+ trace: collect_build_prerequisites: begin libbox/0.1.0
+ trace: collect_build_prerequisites: end libbox/0.1.0
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent bax/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end bax/1.0.0
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent baz/1.0.0
+ trace: collect_build_prerequisites: resume baz/1.0.0
+ trace: collect_build_prerequisites: end baz/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {bax^ baz^ | libfoo->{bax/1,1 baz/1,1} libbar->{bax/2,1 baz/1,1} libbox->{bax/2,1}}!
+ trace: collect_build_postponed (1): end {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: evaluate_dependency: baz/1.0.0: unused
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libbox/0.1.0
+ trace: collect_build_prerequisites: pre-reeval bax/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bax/1.0.0: 1,1
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of existing dependent bax/1.0.0 due to dependency libbox/0.1.0
+ trace: collect_build: add bax/1.0.0
+ trace: postponed_configurations::add: create {bax^ | libfoo->{bax/1,1}}
+ trace: collect_drop: overwrite baz
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: pre-reeval bax/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bax/1.0.0: 1,1
+ trace: collect_build_postponed (1): skip being dropped existing dependent baz of dependency libfoo
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: reeval bax/1.0.0
+ trace: postponed_configurations::add: add {bax^ 1,1: libfoo} to {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent bax/1.0.0 results in {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluated bax/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libfoo/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent bax/1.0.0
+ trace: collect_build_prerequisites: resume bax/1.0.0
+ trace: collect_build: pick libbox/0.1.0 over libbox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/0.1.0 of dependent bax/1.0.0
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bax/1.0.0
+ trace: postponed_configurations::add: create {bax | libbox->{bax/2,1} libbar->{bax/2,1}}
+ trace: collect_build_prerequisites: postpone bax/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {bax^ | libfoo->{bax/1,1}}!
+ trace: collect_build_postponed (2): begin {bax | libbox->{bax/2,1} libbar->{bax/2,1}}
+ trace: collect_build_postponed (2): skip being built existing dependent bax of dependency libbox
+ trace: collect_build_postponed (2): skip being built existing dependent bax of dependency libbar
+ trace: collect_build_postponed (2): skip being dropped existing dependent baz of dependency libbar
+ trace: collect_build_postponed (2): cfg-negotiate begin {bax | libbox->{bax/2,1} libbar->{bax/2,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbox/0.1.0
+ trace: collect_build_prerequisites: end libbox/0.1.0
+ trace: collect_build_prerequisites: skip configured libbar/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent bax/1.0.0
+ trace: collect_build_prerequisites: resume bax/1.0.0
+ trace: collect_build_prerequisites: end bax/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {bax | libbox->{bax/2,1} libbar->{bax/2,1}}!
+ trace: collect_build_postponed (2): end {bax | libbox->{bax/2,1} libbar->{bax/2,1}}
+ trace: collect_build_postponed (1): end {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ build plan:
+ drop baz/1.0.0 (unused)
+ downgrade libbox/0.1.0
+ config.libbox.extras=true (set by bax)
+ reconfigure bax/1.0.0 (dependent of libbox, libfoo)
+ config.bax.libfoo_extras=true (set by bax)
+ trace: execute_plan: simulate: no
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !libbox configured !0.1.0 available 1.0.0
+ !bax configured 1.0.0
+ libbar configured 1.0.0
+ !libbox configured !0.1.0 available 1.0.0
+ libfoo configured 1.0.0
+ EOO
+
+ $pkg_drop bax libbox
+ }
+ }
+ }
+ }
+
+ : postponed-collection
+ :
+ {
+ +$clone_cfg
+
+ : backtrace
+ :
+ {
+ $clone_cfg;
+
+ $* foo bar box 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ %.*
+ trace: collect_build: add foo/1.0.0
+ trace: collect_build: add bar/1.0.0
+ trace: collect_build: add box/1.0.0
+ trace: collect_build_prerequisites: begin foo/1.0.0
+ %.*
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent foo/1.0.0
+ trace: postponed_configurations::add: create {foo | libfoo->{foo/1,1}}
+ trace: collect_build_prerequisites: postpone foo/1.0.0
+ trace: collect_build_prerequisites: begin bar/1.0.0
+ %.*
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bar/1.0.0
+ trace: postponed_configurations::add: create {bar | libbar->{bar/1,1}}
+ trace: collect_build_prerequisites: postpone bar/1.0.0
+ trace: collect_build_prerequisites: begin box/1.0.0
+ %.*
+ trace: collect_build_prerequisites: alt-postpone dependent box/1.0.0 since max index is reached: 0
+ info: dependency alternative: {libbar libfoo}
+ {
+ require
+ {
+ config.libbar.extras = true
+ config.libfoo.extras = true
+ }
+ }
+ trace: collect_build_prerequisites: postpone box/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {foo | libfoo->{foo/1,1}}
+ %.*
+ trace: collect_build_postponed (1): cfg-negotiate begin {foo | libfoo->{foo/1,1}}
+ %.*
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libfoo/1.0.0
+ trace: collect_build_prerequisites: end libfoo/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent foo/1.0.0
+ trace: collect_build_prerequisites: resume foo/1.0.0
+ trace: collect_build_prerequisites: end foo/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {foo | libfoo->{foo/1,1}}!
+ trace: collect_build_postponed (2): begin {bar | libbar->{bar/1,1}}
+ %.*
+ trace: collect_build_postponed (2): cfg-negotiate begin {bar | libbar->{bar/1,1}}
+ %.*
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbar/1.0.0
+ trace: collect_build_prerequisites: end libbar/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent bar/1.0.0
+ trace: collect_build_prerequisites: resume bar/1.0.0
+ trace: collect_build_prerequisites: end bar/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {bar | libbar->{bar/1,1}}!
+ trace: collect_build_postponed (2): index 1 collect alt-postponed box/1.0.0
+ trace: collect_build_prerequisites: resume box/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent box/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent box/1.0.0
+ %.*
+ trace: postponed_configurations::add: add {box 1,1: libbar libfoo} to {foo | libfoo->{foo/1,1}}!
+ trace: postponed_configurations::add: merge {bar | libbar->{bar/1,1}}! into {box foo | libfoo->{box/1,1 foo/1,1} libbar->{box/1,1}}!
+ %.*
+ trace: collect_build_prerequisites: cfg-postponing dependent box/1.0.0 involves (being) negotiated configurations and results in {bar box foo | libfoo->{box/1,1 foo/1,1} libbar->{bar/1,1 box/1,1}}!, throwing retry_configuration
+ trace: collect_build_postponed (0): cfg-negotiation of {foo | libfoo->{foo/1,1}} failed due to dependent box, refining configuration
+ trace: collect_build_postponed (1): begin {foo | libfoo->{foo/1,1}}
+ %.*
+ trace: collect_build_postponed (1): cfg-negotiate begin {foo | libfoo->{foo/1,1}}
+ %.*
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libfoo/1.0.0
+ trace: collect_build_prerequisites: end libfoo/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent foo/1.0.0
+ trace: collect_build_prerequisites: resume foo/1.0.0
+ trace: collect_build_prerequisites: end foo/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {foo | libfoo->{foo/1,1}}!
+ trace: collect_build_postponed (2): begin {bar | libbar->{bar/1,1}}
+ %.*
+ trace: collect_build_postponed (2): cfg-negotiate begin {bar | libbar->{bar/1,1}}
+ %.*
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbar/1.0.0
+ trace: collect_build_prerequisites: end libbar/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent bar/1.0.0
+ trace: collect_build_prerequisites: resume bar/1.0.0
+ trace: collect_build_prerequisites: end bar/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {bar | libbar->{bar/1,1}}!
+ trace: collect_build_postponed (2): index 1 collect alt-postponed box/1.0.0
+ trace: collect_build_prerequisites: resume box/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent box/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent box/1.0.0
+ %.*
+ trace: postponed_configurations::add: add {box 1,1: libbar libfoo} to {foo | libfoo->{foo/1,1}}!
+ trace: postponed_configurations::add: merge {bar | libbar->{bar/1,1}}! into {box foo | libfoo->{box/1,1 foo/1,1} libbar->{box/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent box/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent box/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: dependency libfoo/1.0.0 of dependent box/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end box/1.0.0
+ trace: collect_build_postponed (2): end {bar | libbar->{bar/1,1}}
+ trace: collect_build_postponed (1): end {foo | libfoo->{foo/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !foo configured 1.0.0
+ libfoo configured 1.0.0
+ !bar configured 1.0.0
+ libbar configured 1.0.0
+ !box configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured 1.0.0
+ EOO
+
+ $pkg_drop foo bar box
+ }
+
+ : premature
+ :
+ {
+ $clone_cfg;
+
+ $* fux fix 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ %.*
+ trace: collect_build: add fux/1.0.0
+ trace: collect_build: add fix/1.0.0
+ trace: collect_build_prerequisites: begin fux/1.0.0
+ %.*
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libfoo/1.0.0 of dependent fux/1.0.0
+ trace: collect_build_prerequisites: begin libfoo/1.0.0
+ trace: collect_build_prerequisites: end libfoo/1.0.0
+ trace: collect_build_prerequisites: end fux/1.0.0
+ trace: collect_build_prerequisites: begin fix/1.0.0
+ %.*
+ trace: collect_build: add foo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency foo/1.0.0 of dependent fix/1.0.0
+ trace: postponed_configurations::add: create {fix | foo->{fix/1,1}}
+ trace: collect_build_prerequisites: postpone fix/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {fix | foo->{fix/1,1}}
+ %.*
+ trace: collect_build_postponed (1): cfg-negotiate begin {fix | foo->{fix/1,1}}
+ %.*
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin foo/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cannot cfg-postpone dependency libfoo/1.0.0 of dependent foo/1.0.0 (collected prematurely), throwing postpone_dependency
+ trace: pkg_build: collection failed due to prematurely collected dependency (libfoo), retry from scratch
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ %.*
+ trace: collect_build: add fux/1.0.0
+ trace: collect_build: add fix/1.0.0
+ trace: collect_build_prerequisites: begin fux/1.0.0
+ %.*
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: dep-postpone dependency libfoo/1.0.0 of dependent fux/1.0.0
+ trace: collect_build_prerequisites: end fux/1.0.0
+ trace: collect_build_prerequisites: begin fix/1.0.0
+ %.*
+ trace: collect_build: add foo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency foo/1.0.0 of dependent fix/1.0.0
+ trace: postponed_configurations::add: create {fix | foo->{fix/1,1}}
+ trace: collect_build_prerequisites: postpone fix/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {fix | foo->{fix/1,1}}
+ %.*
+ trace: collect_build_postponed (1): cfg-negotiate begin {fix | foo->{fix/1,1}}
+ %.*
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin foo/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent foo/1.0.0
+ trace: postponed_configurations::add: create {foo | libfoo->{foo/1,1}}
+ trace: collect_build_prerequisites: postpone foo/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent fix/1.0.0
+ trace: collect_build_prerequisites: resume fix/1.0.0
+ trace: collect_build_prerequisites: end fix/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {fix | foo->{fix/1,1}}!
+ trace: collect_build_postponed (2): begin {foo | libfoo->{foo/1,1}}
+ %.*
+ trace: collect_build_postponed (2): cfg-negotiate begin {foo | libfoo->{foo/1,1}}
+ %.*
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libfoo/1.0.0
+ trace: collect_build_prerequisites: end libfoo/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent foo/1.0.0
+ trace: collect_build_prerequisites: resume foo/1.0.0
+ trace: collect_build_prerequisites: end foo/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {foo | libfoo->{foo/1,1}}!
+ trace: collect_build_postponed (2): end {foo | libfoo->{foo/1,1}}
+ trace: collect_build_postponed (1): end {fix | foo->{fix/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !fux configured 1.0.0
+ libfoo configured 1.0.0
+ !fix configured 1.0.0
+ foo configured 1.0.0
+ libfoo configured 1.0.0
+ EOO
+
+ $pkg_drop fux fix
+ }
+
+ : double-premature
+ :
+ {
+ $clone_cfg;
+
+ $* fux bex fix buz 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ %.*
+ trace: collect_build: add fux/1.0.0
+ trace: collect_build: add bex/1.0.0
+ trace: collect_build: add fix/1.0.0
+ trace: collect_build: add buz/1.0.0
+ trace: collect_build_prerequisites: begin fux/1.0.0
+ %.*
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libfoo/1.0.0 of dependent fux/1.0.0
+ trace: collect_build_prerequisites: begin libfoo/1.0.0
+ trace: collect_build_prerequisites: end libfoo/1.0.0
+ trace: collect_build_prerequisites: end fux/1.0.0
+ trace: collect_build_prerequisites: begin bex/1.0.0
+ %.*
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/1.0.0 of dependent bex/1.0.0
+ trace: collect_build_prerequisites: begin libbar/1.0.0
+ trace: collect_build_prerequisites: end libbar/1.0.0
+ trace: collect_build_prerequisites: end bex/1.0.0
+ trace: collect_build_prerequisites: begin fix/1.0.0
+ %.*
+ trace: collect_build: add foo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency foo/1.0.0 of dependent fix/1.0.0
+ trace: postponed_configurations::add: create {fix | foo->{fix/1,1}}
+ trace: collect_build_prerequisites: postpone fix/1.0.0
+ trace: collect_build_prerequisites: begin buz/1.0.0
+ %.*
+ trace: collect_build: add bux/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency bux/1.0.0 of dependent buz/1.0.0
+ trace: postponed_configurations::add: create {buz | bux->{buz/1,1}}
+ trace: collect_build_prerequisites: postpone buz/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {fix | foo->{fix/1,1}}
+ %.*
+ trace: collect_build_postponed (1): cfg-negotiate begin {fix | foo->{fix/1,1}}
+ %.*
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin foo/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cannot cfg-postpone dependency libfoo/1.0.0 of dependent foo/1.0.0 (collected prematurely), throwing postpone_dependency
+ trace: pkg_build: collection failed due to prematurely collected dependency (libfoo), retry from scratch
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ %.*
+ trace: collect_build: add fux/1.0.0
+ trace: collect_build: add bex/1.0.0
+ trace: collect_build: add fix/1.0.0
+ trace: collect_build: add buz/1.0.0
+ trace: collect_build_prerequisites: begin fux/1.0.0
+ %.*
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: dep-postpone dependency libfoo/1.0.0 of dependent fux/1.0.0
+ trace: collect_build_prerequisites: end fux/1.0.0
+ trace: collect_build_prerequisites: begin bex/1.0.0
+ %.*
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/1.0.0 of dependent bex/1.0.0
+ trace: collect_build_prerequisites: begin libbar/1.0.0
+ trace: collect_build_prerequisites: end libbar/1.0.0
+ trace: collect_build_prerequisites: end bex/1.0.0
+ trace: collect_build_prerequisites: begin fix/1.0.0
+ %.*
+ trace: collect_build: add foo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency foo/1.0.0 of dependent fix/1.0.0
+ trace: postponed_configurations::add: create {fix | foo->{fix/1,1}}
+ trace: collect_build_prerequisites: postpone fix/1.0.0
+ trace: collect_build_prerequisites: begin buz/1.0.0
+ %.*
+ trace: collect_build: add bux/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency bux/1.0.0 of dependent buz/1.0.0
+ trace: postponed_configurations::add: create {buz | bux->{buz/1,1}}
+ trace: collect_build_prerequisites: postpone buz/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {fix | foo->{fix/1,1}}
+ %.*
+ trace: collect_build_postponed (1): cfg-negotiate begin {fix | foo->{fix/1,1}}
+ %.*
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin foo/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent foo/1.0.0
+ trace: postponed_configurations::add: create {foo | libfoo->{foo/1,1}}
+ trace: collect_build_prerequisites: postpone foo/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent fix/1.0.0
+ trace: collect_build_prerequisites: resume fix/1.0.0
+ trace: collect_build_prerequisites: end fix/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {fix | foo->{fix/1,1}}!
+ trace: collect_build_postponed (2): begin {buz | bux->{buz/1,1}}
+ %.*
+ trace: collect_build_postponed (2): cfg-negotiate begin {buz | bux->{buz/1,1}}
+ %.*
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin bux/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cannot cfg-postpone dependency libbar/1.0.0 of dependent bux/1.0.0 (collected prematurely), throwing postpone_dependency
+ trace: pkg_build: collection failed due to prematurely collected dependency (libbar), retry from scratch
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ %.*
+ trace: collect_build: add fux/1.0.0
+ trace: collect_build: add bex/1.0.0
+ trace: collect_build: add fix/1.0.0
+ trace: collect_build: add buz/1.0.0
+ trace: collect_build_prerequisites: begin fux/1.0.0
+ %.*
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: dep-postpone dependency libfoo/1.0.0 of dependent fux/1.0.0
+ trace: collect_build_prerequisites: end fux/1.0.0
+ trace: collect_build_prerequisites: begin bex/1.0.0
+ %.*
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: dep-postpone dependency libbar/1.0.0 of dependent bex/1.0.0
+ trace: collect_build_prerequisites: end bex/1.0.0
+ trace: collect_build_prerequisites: begin fix/1.0.0
+ %.*
+ trace: collect_build: add foo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency foo/1.0.0 of dependent fix/1.0.0
+ trace: postponed_configurations::add: create {fix | foo->{fix/1,1}}
+ trace: collect_build_prerequisites: postpone fix/1.0.0
+ trace: collect_build_prerequisites: begin buz/1.0.0
+ %.*
+ trace: collect_build: add bux/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency bux/1.0.0 of dependent buz/1.0.0
+ trace: postponed_configurations::add: create {buz | bux->{buz/1,1}}
+ trace: collect_build_prerequisites: postpone buz/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {fix | foo->{fix/1,1}}
+ %.*
+ trace: collect_build_postponed (1): cfg-negotiate begin {fix | foo->{fix/1,1}}
+ %.*
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin foo/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent foo/1.0.0
+ trace: postponed_configurations::add: create {foo | libfoo->{foo/1,1}}
+ trace: collect_build_prerequisites: postpone foo/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent fix/1.0.0
+ trace: collect_build_prerequisites: resume fix/1.0.0
+ trace: collect_build_prerequisites: end fix/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {fix | foo->{fix/1,1}}!
+ trace: collect_build_postponed (2): begin {buz | bux->{buz/1,1}}
+ %.*
+ trace: collect_build_postponed (2): cfg-negotiate begin {buz | bux->{buz/1,1}}
+ %.*
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin bux/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bux/1.0.0
+ trace: postponed_configurations::add: create {bux | libbar->{bux/1,1}}
+ trace: collect_build_prerequisites: postpone bux/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent buz/1.0.0
+ trace: collect_build_prerequisites: resume buz/1.0.0
+ trace: collect_build_prerequisites: end buz/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {buz | bux->{buz/1,1}}!
+ trace: collect_build_postponed (3): begin {foo | libfoo->{foo/1,1}}
+ %.*
+ trace: collect_build_postponed (3): cfg-negotiate begin {foo | libfoo->{foo/1,1}}
+ %.*
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libfoo/1.0.0
+ trace: collect_build_prerequisites: end libfoo/1.0.0
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent foo/1.0.0
+ trace: collect_build_prerequisites: resume foo/1.0.0
+ trace: collect_build_prerequisites: end foo/1.0.0
+ trace: collect_build_postponed (3): cfg-negotiate end {foo | libfoo->{foo/1,1}}!
+ trace: collect_build_postponed (4): begin {bux | libbar->{bux/1,1}}
+ %.*
+ trace: collect_build_postponed (4): cfg-negotiate begin {bux | libbar->{bux/1,1}}
+ %.*
+ trace: collect_build_postponed (4): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbar/1.0.0
+ trace: collect_build_prerequisites: end libbar/1.0.0
+ trace: collect_build_postponed (4): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (4): select cfg-negotiated dependency alternative for dependent bux/1.0.0
+ trace: collect_build_prerequisites: resume bux/1.0.0
+ trace: collect_build_prerequisites: end bux/1.0.0
+ trace: collect_build_postponed (4): cfg-negotiate end {bux | libbar->{bux/1,1}}!
+ trace: collect_build_postponed (4): end {bux | libbar->{bux/1,1}}
+ trace: collect_build_postponed (3): end {foo | libfoo->{foo/1,1}}
+ trace: collect_build_postponed (2): end {buz | bux->{buz/1,1}}
+ trace: collect_build_postponed (1): end {fix | foo->{fix/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !fux configured 1.0.0
+ libfoo configured 1.0.0
+ !fix configured 1.0.0
+ foo configured 1.0.0
+ libfoo configured 1.0.0
+ !bex configured 1.0.0
+ libbar configured 1.0.0
+ !buz configured 1.0.0
+ bux configured 1.0.0
+ libbar configured 1.0.0
+ EOO
+
+ $pkg_drop fux bex fix buz
+ }
+
+ : premature-in-cluster
+ :
+ : Test recollecting from scratch if the prematurely collected dependency
+ : belongs (as a dependency) to a non-negotiated cluster.
+ :
+ {
+ $clone_cfg;
+
+ # Dependencies:
+ #
+ # bas: depends: libbar(c)
+ # depends: bus(c)
+ #
+ # bax: depends: libfoo(c)
+ # depends: {libbox libbar} (c)
+ #
+ # bix: depends: {libbar bar} (c)
+ # depends: bux
+ #
+ # buz: depends: bux(c)
+ #
+ # buc: depends: libfoo(c)
+ # depends: bux(c)
+ #
+ # bus: depends: libbaz(c)
+ # depends: foo(c)
+ #
+ # bux: depends: libbar(c)
+ #
+ # bar: depends: libbar(c)
+ #
+ # foo: depends: libfoo(c)
+ #
+ $* bas bax bix buz buc 2>&1 | $filter 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add bas/1.0.0
+ trace: collect_build: add bax/1.0.0
+ trace: collect_build: add bix/1.0.0
+ trace: collect_build: add buz/1.0.0
+ trace: collect_build: add buc/1.0.0
+ trace: collect_build_prerequisites: begin bas/1.0.0
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bas/1.0.0
+ trace: postponed_configurations::add: create {bas | libbar->{bas/1,1}}
+ trace: collect_build_prerequisites: postpone bas/1.0.0
+ trace: collect_build_prerequisites: begin bax/1.0.0
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent bax/1.0.0
+ trace: postponed_configurations::add: create {bax | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: postpone bax/1.0.0
+ trace: collect_build_prerequisites: begin bix/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bix/1.0.0
+ trace: collect_build: add bar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency bar/1.0.0 of dependent bix/1.0.0
+ trace: postponed_configurations::add: add {bix 1,1: libbar bar} to {bas | libbar->{bas/1,1}}
+ trace: collect_build_prerequisites: postpone bix/1.0.0
+ trace: collect_build_prerequisites: begin buz/1.0.0
+ trace: collect_build: add bux/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency bux/1.0.0 of dependent buz/1.0.0
+ trace: postponed_configurations::add: create {buz | bux->{buz/1,1}}
+ trace: collect_build_prerequisites: postpone buz/1.0.0
+ trace: collect_build_prerequisites: begin buc/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent buc/1.0.0
+ trace: postponed_configurations::add: add {buc 1,1: libfoo} to {bax | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: postpone buc/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {bas bix | libbar->{bas/1,1 bix/1,1} bar->{bix/1,1}}
+ trace: collect_build_postponed (1): cfg-negotiate begin {bas bix | libbar->{bas/1,1 bix/1,1} bar->{bix/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbar/1.0.0
+ trace: collect_build_prerequisites: end libbar/1.0.0
+ trace: collect_build_prerequisites: begin bar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bar/1.0.0
+ trace: postponed_configurations::add: add {bar 1,1: libbar} to {bas bix | libbar->{bas/1,1 bix/1,1} bar->{bix/1,1}}?
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent bar/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent bar/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end bar/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent bas/1.0.0
+ trace: collect_build_prerequisites: resume bas/1.0.0
+ trace: collect_build: add bus/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency bus/1.0.0 of dependent bas/1.0.0
+ trace: postponed_configurations::add: create {bas | bus->{bas/2,1}}
+ trace: collect_build_prerequisites: postpone bas/1.0.0
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent bix/1.0.0
+ trace: collect_build_prerequisites: resume bix/1.0.0
+ trace: collect_build_prerequisites: dep-postpone dependency bux/1.0.0 of dependent bix/1.0.0 since already in cluster {buz | bux->{buz/1,1}}
+ trace: collect_build_prerequisites: end bix/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {bar bas bix | libbar->{bar/1,1 bas/1,1 bix/1,1} bar->{bix/1,1}}!
+ trace: collect_build_postponed (2): begin {bax buc | libfoo->{bax/1,1 buc/1,1}}
+ trace: collect_build_postponed (2): cfg-negotiate begin {bax buc | libfoo->{bax/1,1 buc/1,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libfoo/1.0.0
+ trace: collect_build_prerequisites: end libfoo/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent bax/1.0.0
+ trace: collect_build_prerequisites: resume bax/1.0.0
+ trace: collect_build: add libbox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bax/1.0.0
+ trace: postponed_configurations::add: add {bax 2,1: libbox libbar} to {bar bas bix | libbar->{bar/1,1 bas/1,1 bix/1,1} bar->{bix/1,1}}!
+ trace: collect_build_prerequisites: cfg-postponing dependent bax/1.0.0 involves (being) negotiated configurations and results in {bar bas bax bix | libbar->{bar/1,1 bas/1,1 bax/2,1 bix/1,1} bar->{bix/1,1} libbox->{bax/2,1}}!, throwing retry_configuration
+ trace: collect_build_postponed (0): cfg-negotiation of {bas bix | libbar->{bas/1,1 bix/1,1} bar->{bix/1,1}} failed due to dependent bax, refining configuration
+ trace: collect_build_postponed (1): begin {bas bix | libbar->{bas/1,1 bix/1,1} bar->{bix/1,1}}
+ trace: collect_build_postponed (1): cfg-negotiate begin {bas bix | libbar->{bas/1,1 bix/1,1} bar->{bix/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbar/1.0.0
+ trace: collect_build_prerequisites: end libbar/1.0.0
+ trace: collect_build_prerequisites: begin bar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bar/1.0.0
+ trace: postponed_configurations::add: add {bar 1,1: libbar} to {bas bix | libbar->{bas/1,1 bix/1,1} bar->{bix/1,1}}?
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent bar/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent bar/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end bar/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent bas/1.0.0
+ trace: collect_build_prerequisites: resume bas/1.0.0
+ trace: collect_build: add bus/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency bus/1.0.0 of dependent bas/1.0.0
+ trace: postponed_configurations::add: create {bas | bus->{bas/2,1}}
+ trace: collect_build_prerequisites: postpone bas/1.0.0
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent bix/1.0.0
+ trace: collect_build_prerequisites: resume bix/1.0.0
+ trace: collect_build_prerequisites: dep-postpone dependency bux/1.0.0 of dependent bix/1.0.0 since already in cluster {buz | bux->{buz/1,1}}
+ trace: collect_build_prerequisites: end bix/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {bar bas bix | libbar->{bar/1,1 bas/1,1 bix/1,1} bar->{bix/1,1}}!
+ trace: collect_build_postponed (2): begin {bax buc | libfoo->{bax/1,1 buc/1,1}}
+ trace: collect_build_postponed (2): cfg-negotiate begin {bax buc | libfoo->{bax/1,1 buc/1,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libfoo/1.0.0
+ trace: collect_build_prerequisites: end libfoo/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent bax/1.0.0
+ trace: collect_build_prerequisites: resume bax/1.0.0
+ trace: collect_build: add libbox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bax/1.0.0
+ trace: postponed_configurations::add: add {bax 2,1: libbox libbar} to {bar bas bix | libbar->{bar/1,1 bas/1,1 bix/1,1} bar->{bix/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent bax/1.0.0 is negotiated
+ trace: collect_build_prerequisites: collecting cfg-postponed dependency libbox/1.0.0 of dependent bax/1.0.0
+ trace: collect_build_prerequisites: begin libbox/1.0.0
+ trace: collect_build_prerequisites: end libbox/1.0.0
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent bax/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end bax/1.0.0
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent buc/1.0.0
+ trace: collect_build_prerequisites: resume buc/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency bux/1.0.0 of dependent buc/1.0.0
+ trace: postponed_configurations::add: add {buc 2,1: bux} to {buz | bux->{buz/1,1}}
+ trace: collect_build_prerequisites: postpone buc/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {bax buc | libfoo->{bax/1,1 buc/1,1}}!
+ trace: collect_build_postponed (3): begin {buc buz | bux->{buc/2,1 buz/1,1}}
+ trace: collect_build_postponed (3): cfg-negotiate begin {buc buz | bux->{buc/2,1 buz/1,1}}
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin bux/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bux/1.0.0
+ trace: postponed_configurations::add: add {bux 1,1: libbar} to {bar bas bax bix | libbar->{bar/1,1 bas/1,1 bax/2,1 bix/1,1} bar->{bix/1,1} libbox->{bax/2,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent bux/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent bux/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end bux/1.0.0
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent buc/1.0.0
+ trace: collect_build_prerequisites: resume buc/1.0.0
+ trace: collect_build_prerequisites: end buc/1.0.0
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent buz/1.0.0
+ trace: collect_build_prerequisites: resume buz/1.0.0
+ trace: collect_build_prerequisites: end buz/1.0.0
+ trace: collect_build_postponed (3): cfg-negotiate end {buc buz | bux->{buc/2,1 buz/1,1}}!
+ trace: collect_build_postponed (4): begin {bas | bus->{bas/2,1}}
+ trace: collect_build_postponed (4): cfg-negotiate begin {bas | bus->{bas/2,1}}
+ trace: collect_build_postponed (4): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin bus/1.0.0
+ trace: collect_build: add libbaz/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbaz/1.0.0 of dependent bus/1.0.0
+ trace: postponed_configurations::add: create {bus | libbaz->{bus/1,1}}
+ trace: collect_build_prerequisites: postpone bus/1.0.0
+ trace: collect_build_postponed (4): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (4): select cfg-negotiated dependency alternative for dependent bas/1.0.0
+ trace: collect_build_prerequisites: resume bas/1.0.0
+ trace: collect_build_prerequisites: end bas/1.0.0
+ trace: collect_build_postponed (4): cfg-negotiate end {bas | bus->{bas/2,1}}!
+ trace: collect_build_postponed (5): begin {bus | libbaz->{bus/1,1}}
+ trace: collect_build_postponed (5): cfg-negotiate begin {bus | libbaz->{bus/1,1}}
+ trace: collect_build_postponed (5): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbaz/1.0.0
+ trace: collect_build_prerequisites: end libbaz/1.0.0
+ trace: collect_build_postponed (5): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (5): select cfg-negotiated dependency alternative for dependent bus/1.0.0
+ trace: collect_build_prerequisites: resume bus/1.0.0
+ trace: collect_build: add foo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency foo/1.0.0 of dependent bus/1.0.0
+ trace: postponed_configurations::add: create {bus | foo->{bus/2,1}}
+ trace: collect_build_prerequisites: postpone bus/1.0.0
+ trace: collect_build_postponed (5): cfg-negotiate end {bus | libbaz->{bus/1,1}}!
+ trace: collect_build_postponed (6): begin {bus | foo->{bus/2,1}}
+ trace: collect_build_postponed (6): cfg-negotiate begin {bus | foo->{bus/2,1}}
+ trace: collect_build_postponed (6): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin foo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent foo/1.0.0
+ trace: postponed_configurations::add: add {foo 1,1: libfoo} to {bax buc | libfoo->{bax/1,1 buc/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent foo/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libfoo/1.0.0 of dependent foo/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end foo/1.0.0
+ trace: collect_build_postponed (6): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (6): select cfg-negotiated dependency alternative for dependent bus/1.0.0
+ trace: collect_build_prerequisites: resume bus/1.0.0
+ trace: collect_build_prerequisites: end bus/1.0.0
+ trace: collect_build_postponed (6): cfg-negotiate end {bus | foo->{bus/2,1}}!
+ trace: collect_build_postponed (6): end {bus | foo->{bus/2,1}}
+ trace: collect_build_postponed (5): end {bus | libbaz->{bus/1,1}}
+ trace: collect_build_postponed (4): end {bas | bus->{bas/2,1}}
+ trace: collect_build_postponed (3): end {buc buz | bux->{buc/2,1 buz/1,1}}
+ trace: collect_build_postponed (2): end {bax buc | libfoo->{bax/1,1 buc/1,1}}
+ trace: collect_build_postponed (1): end {bas bix | libbar->{bas/1,1 bix/1,1} bar->{bix/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ build plan:
+ new libbaz/1.0.0 (required by bus)
+ config.libbaz.extras=true (set by bus)
+ new libbox/1.0.0 (required by bax)
+ config.libbox.extras=true (set by bax)
+ new libfoo/1.0.0 (required by bax, buc, foo)
+ config.libfoo.extras=true (set by bax)
+ new foo/1.0.0 (required by bus)
+ config.foo.extras=true (set by bus)
+ config.foo.libfoo_extras=true (set by foo)
+ new bus/1.0.0 (required by bas)
+ config.bus.extras=true (set by bas)
+ new libbar/1.0.0 (required by bar, bas, bax, bix, bux)
+ config.libbar.extras=true (set by bas)
+ new bas/1.0.0
+ new bax/1.0.0
+ config.bax.libfoo_extras=true (set by bax)
+ new bar/1.0.0 (required by bix)
+ config.bar.extras=true (set by bix)
+ new bux/1.0.0 (required by bix, buc, buz)
+ config.bux.extras=true (set by buc)
+ new bix/1.0.0
+ new buz/1.0.0
+ new buc/1.0.0
+ trace: execute_plan: simulate: no
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !bas configured 1.0.0
+ bus configured 1.0.0
+ foo configured 1.0.0
+ libfoo configured 1.0.0
+ libbaz configured 1.0.0
+ libbar configured 1.0.0
+ !bax configured 1.0.0
+ libbar configured 1.0.0
+ libbox configured 1.0.0
+ libfoo configured 1.0.0
+ !bix configured 1.0.0
+ bar configured 1.0.0
+ libbar configured 1.0.0
+ bux configured 1.0.0
+ libbar configured 1.0.0
+ libbar configured 1.0.0
+ !buz configured 1.0.0
+ bux configured 1.0.0
+ libbar configured 1.0.0
+ !buc configured 1.0.0
+ bux configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured 1.0.0
+ EOO
+
+ $pkg_drop bas bax bix buz buc
+ }
+
+ : up-negotiate-dependency
+ :
+ {
+ $clone_cfg;
+
+ # Dependencies:
+ #
+ # bar: depends: libbar(c)
+ #
+ # bux: depends: libbar(c)
+ #
+ # bix: depends: {libbar bar} (c)
+ # depends: bux
+ #
+ $* bix 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ %.*
+ trace: collect_build: add bix/1.0.0
+ trace: collect_build_prerequisites: begin bix/1.0.0
+ %.*
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bix/1.0.0
+ trace: collect_build: add bar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency bar/1.0.0 of dependent bix/1.0.0
+ trace: postponed_configurations::add: create {bix | libbar->{bix/1,1} bar->{bix/1,1}}
+ trace: collect_build_prerequisites: postpone bix/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {bix | libbar->{bix/1,1} bar->{bix/1,1}}
+ %.*
+ trace: collect_build_postponed (1): cfg-negotiate begin {bix | libbar->{bix/1,1} bar->{bix/1,1}}
+ %.*
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbar/1.0.0
+ trace: collect_build_prerequisites: end libbar/1.0.0
+ trace: collect_build_prerequisites: begin bar/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bar/1.0.0
+ trace: postponed_configurations::add: add {bar 1,1: libbar} to {bix | libbar->{bix/1,1} bar->{bix/1,1}}?
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent bar/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent bar/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end bar/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent bix/1.0.0
+ trace: collect_build_prerequisites: resume bix/1.0.0
+ %.*
+ trace: collect_build: add bux/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency bux/1.0.0 of dependent bix/1.0.0
+ trace: collect_build_prerequisites: begin bux/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bux/1.0.0
+ trace: postponed_configurations::add: add {bux 1,1: libbar} to {bar bix | libbar->{bar/1,1 bix/1,1} bar->{bix/1,1}}?
+ %.*
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent bux/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent bux/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end bux/1.0.0
+ trace: collect_build_prerequisites: end bix/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {bar bix bux | libbar->{bar/1,1 bix/1,1 bux/1,1} bar->{bix/1,1}}!
+ trace: collect_build_postponed (1): end {bix | libbar->{bix/1,1} bar->{bix/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !bix configured 1.0.0
+ bar configured 1.0.0
+ libbar configured 1.0.0
+ bux configured 1.0.0
+ libbar configured 1.0.0
+ libbar configured 1.0.0
+ EOO
+
+ $pkg_drop bix
+ }
+
+ : postponed-alts
+ :
+ {
+ +$clone_cfg
+
+ : with-premature
+ :
+ {
+ $clone_cfg;
+
+ $* fux boo 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ %.*
+ trace: collect_build: add fux/1.0.0
+ trace: collect_build: add boo/1.0.0
+ trace: collect_build_prerequisites: begin fux/1.0.0
+ %.*
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libfoo/1.0.0 of dependent fux/1.0.0
+ trace: collect_build_prerequisites: begin libfoo/1.0.0
+ trace: collect_build_prerequisites: end libfoo/1.0.0
+ trace: collect_build_prerequisites: end fux/1.0.0
+ trace: collect_build_prerequisites: begin boo/1.0.0
+ %.*
+ trace: collect_build_prerequisites: alt-postpone dependent boo/1.0.0 since max index is reached: 0
+ info: dependency alternative: libbar
+ trace: collect_build_prerequisites: postpone boo/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (0): index 1 collect alt-postponed boo/1.0.0
+ trace: collect_build_prerequisites: resume boo/1.0.0
+ %.*
+ trace: collect_build_prerequisites: alt-postpone dependent boo/1.0.0 since max index is reached: 1
+ info: dependency alternative: libfoo
+ {
+ require
+ {
+ config.libfoo.extras = true
+ }
+ }
+ trace: collect_build_prerequisites: postpone boo/1.0.0
+ trace: collect_build_postponed (0): index 2 collect alt-postponed boo/1.0.0
+ trace: collect_build_prerequisites: resume boo/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cannot cfg-postpone dependency libfoo/1.0.0 of dependent boo/1.0.0 (collected prematurely), throwing postpone_dependency
+ trace: pkg_build: collection failed due to prematurely collected dependency (libfoo), retry from scratch
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ %.*
+ trace: collect_build: add fux/1.0.0
+ trace: collect_build: add boo/1.0.0
+ trace: collect_build_prerequisites: begin fux/1.0.0
+ %.*
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: dep-postpone dependency libfoo/1.0.0 of dependent fux/1.0.0
+ trace: collect_build_prerequisites: end fux/1.0.0
+ trace: collect_build_prerequisites: begin boo/1.0.0
+ %.*
+ trace: collect_build_prerequisites: alt-postpone dependent boo/1.0.0 since max index is reached: 0
+ info: dependency alternative: libbar
+ trace: collect_build_prerequisites: postpone boo/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (0): index 1 collect alt-postponed boo/1.0.0
+ trace: collect_build_prerequisites: resume boo/1.0.0
+ %.*
+ trace: collect_build_prerequisites: alt-postpone dependent boo/1.0.0 since max index is reached: 1
+ info: dependency alternative: libfoo
+ {
+ require
+ {
+ config.libfoo.extras = true
+ }
+ }
+ trace: collect_build_prerequisites: postpone boo/1.0.0
+ trace: collect_build_postponed (0): index 2 collect alt-postponed boo/1.0.0
+ trace: collect_build_prerequisites: resume boo/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent boo/1.0.0
+ %.*
+ trace: postponed_configurations::add: create {boo | libfoo->{boo/1,2}}
+ trace: collect_build_prerequisites: postpone boo/1.0.0
+ trace: collect_build_postponed (1): begin {boo | libfoo->{boo/1,2}}
+ %.*
+ trace: collect_build_postponed (1): cfg-negotiate begin {boo | libfoo->{boo/1,2}}
+ %.*
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libfoo/1.0.0
+ trace: collect_build_prerequisites: end libfoo/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent boo/1.0.0
+ trace: collect_build_prerequisites: resume boo/1.0.0
+ trace: collect_build_prerequisites: end boo/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {boo | libfoo->{boo/1,2}}!
+ trace: collect_build_postponed (1): end {boo | libfoo->{boo/1,2}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !fux configured 1.0.0
+ libfoo configured 1.0.0
+ !boo configured 1.0.0
+ libfoo configured 1.0.0
+ EOO
+
+ $pkg_drop fux boo
+ }
+
+ : with-premature-complex
+ :
+ {
+ $clone_cfg;
+
+ $* fux bex fix biz 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ %.*
+ trace: collect_build: add fux/1.0.0
+ trace: collect_build: add bex/1.0.0
+ trace: collect_build: add fix/1.0.0
+ trace: collect_build: add biz/1.0.0
+ trace: collect_build_prerequisites: begin fux/1.0.0
+ %.*
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libfoo/1.0.0 of dependent fux/1.0.0
+ trace: collect_build_prerequisites: begin libfoo/1.0.0
+ trace: collect_build_prerequisites: end libfoo/1.0.0
+ trace: collect_build_prerequisites: end fux/1.0.0
+ trace: collect_build_prerequisites: begin bex/1.0.0
+ %.*
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/1.0.0 of dependent bex/1.0.0
+ trace: collect_build_prerequisites: begin libbar/1.0.0
+ trace: collect_build_prerequisites: end libbar/1.0.0
+ trace: collect_build_prerequisites: end bex/1.0.0
+ trace: collect_build_prerequisites: begin fix/1.0.0
+ %.*
+ trace: collect_build: add foo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency foo/1.0.0 of dependent fix/1.0.0
+ trace: postponed_configurations::add: create {fix | foo->{fix/1,1}}
+ trace: collect_build_prerequisites: postpone fix/1.0.0
+ trace: collect_build_prerequisites: begin biz/1.0.0
+ %.*
+ trace: collect_build: add boo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency boo/1.0.0 of dependent biz/1.0.0
+ trace: postponed_configurations::add: create {biz | boo->{biz/1,1}}
+ trace: collect_build_prerequisites: postpone biz/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {fix | foo->{fix/1,1}}
+ %.*
+ trace: collect_build_postponed (1): cfg-negotiate begin {fix | foo->{fix/1,1}}
+ %.*
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin foo/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cannot cfg-postpone dependency libfoo/1.0.0 of dependent foo/1.0.0 (collected prematurely), throwing postpone_dependency
+ trace: pkg_build: collection failed due to prematurely collected dependency (libfoo), retry from scratch
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ %.*
+ trace: collect_build: add fux/1.0.0
+ trace: collect_build: add bex/1.0.0
+ trace: collect_build: add fix/1.0.0
+ trace: collect_build: add biz/1.0.0
+ trace: collect_build_prerequisites: begin fux/1.0.0
+ %.*
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: dep-postpone dependency libfoo/1.0.0 of dependent fux/1.0.0
+ trace: collect_build_prerequisites: end fux/1.0.0
+ trace: collect_build_prerequisites: begin bex/1.0.0
+ %.*
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/1.0.0 of dependent bex/1.0.0
+ trace: collect_build_prerequisites: begin libbar/1.0.0
+ trace: collect_build_prerequisites: end libbar/1.0.0
+ trace: collect_build_prerequisites: end bex/1.0.0
+ trace: collect_build_prerequisites: begin fix/1.0.0
+ %.*
+ trace: collect_build: add foo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency foo/1.0.0 of dependent fix/1.0.0
+ trace: postponed_configurations::add: create {fix | foo->{fix/1,1}}
+ trace: collect_build_prerequisites: postpone fix/1.0.0
+ trace: collect_build_prerequisites: begin biz/1.0.0
+ %.*
+ trace: collect_build: add boo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency boo/1.0.0 of dependent biz/1.0.0
+ trace: postponed_configurations::add: create {biz | boo->{biz/1,1}}
+ trace: collect_build_prerequisites: postpone biz/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {fix | foo->{fix/1,1}}
+ %.*
+ trace: collect_build_postponed (1): cfg-negotiate begin {fix | foo->{fix/1,1}}
+ %.*
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin foo/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent foo/1.0.0
+ trace: postponed_configurations::add: create {foo | libfoo->{foo/1,1}}
+ trace: collect_build_prerequisites: postpone foo/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent fix/1.0.0
+ trace: collect_build_prerequisites: resume fix/1.0.0
+ trace: collect_build_prerequisites: end fix/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {fix | foo->{fix/1,1}}!
+ trace: collect_build_postponed (2): begin {biz | boo->{biz/1,1}}
+ %.*
+ trace: collect_build_postponed (2): cfg-negotiate begin {biz | boo->{biz/1,1}}
+ %.*
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin boo/1.0.0
+ %.*
+ trace: collect_build_prerequisites: alt-postpone dependent boo/1.0.0 since max index is reached: 0
+ info: dependency alternative: libbar
+ trace: collect_build_prerequisites: postpone boo/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent biz/1.0.0
+ trace: collect_build_prerequisites: resume biz/1.0.0
+ trace: collect_build_prerequisites: end biz/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {biz | boo->{biz/1,1}}!
+ trace: collect_build_postponed (3): begin {foo | libfoo->{foo/1,1}}
+ %.*
+ trace: collect_build_postponed (3): cfg-negotiate begin {foo | libfoo->{foo/1,1}}
+ %.*
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libfoo/1.0.0
+ trace: collect_build_prerequisites: end libfoo/1.0.0
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent foo/1.0.0
+ trace: collect_build_prerequisites: resume foo/1.0.0
+ trace: collect_build_prerequisites: end foo/1.0.0
+ trace: collect_build_postponed (3): cfg-negotiate end {foo | libfoo->{foo/1,1}}!
+ trace: collect_build_postponed (3): index 1 collect alt-postponed boo/1.0.0
+ trace: collect_build_prerequisites: resume boo/1.0.0
+ %.*
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/1.0.0 of dependent boo/1.0.0
+ trace: collect_build_prerequisites: end boo/1.0.0
+ trace: collect_build_postponed (3): end {foo | libfoo->{foo/1,1}}
+ trace: collect_build_postponed (2): end {biz | boo->{biz/1,1}}
+ trace: collect_build_postponed (1): end {fix | foo->{fix/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !fux configured 1.0.0
+ libfoo configured 1.0.0
+ !fix configured 1.0.0
+ foo configured 1.0.0
+ libfoo configured 1.0.0
+ !bex configured 1.0.0
+ libbar configured 1.0.0
+ !biz configured 1.0.0
+ boo configured 1.0.0
+ libbar configured 1.0.0
+ EOO
+
+ $pkg_drop fux bex fix biz
+ }
+
+ : up-negotiate
+ :
+ {
+ $clone_cfg;
+
+ $* foo boo 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ %.*
+ trace: collect_build: add foo/1.0.0
+ trace: collect_build: add boo/1.0.0
+ trace: collect_build_prerequisites: begin foo/1.0.0
+ %.*
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent foo/1.0.0
+ trace: postponed_configurations::add: create {foo | libfoo->{foo/1,1}}
+ trace: collect_build_prerequisites: postpone foo/1.0.0
+ trace: collect_build_prerequisites: begin boo/1.0.0
+ %.*
+ trace: collect_build_prerequisites: alt-postpone dependent boo/1.0.0 since max index is reached: 0
+ info: dependency alternative: libbar
+ trace: collect_build_prerequisites: postpone boo/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {foo | libfoo->{foo/1,1}}
+ %.*
+ trace: collect_build_postponed (1): cfg-negotiate begin {foo | libfoo->{foo/1,1}}
+ %.*
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libfoo/1.0.0
+ trace: collect_build_prerequisites: end libfoo/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent foo/1.0.0
+ trace: collect_build_prerequisites: resume foo/1.0.0
+ trace: collect_build_prerequisites: end foo/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {foo | libfoo->{foo/1,1}}!
+ trace: collect_build_postponed (1): index 1 collect alt-postponed boo/1.0.0
+ trace: collect_build_prerequisites: resume boo/1.0.0
+ %.*
+ trace: collect_build_prerequisites: alt-postpone dependent boo/1.0.0 since max index is reached: 1
+ info: dependency alternative: libfoo
+ {
+ require
+ {
+ config.libfoo.extras = true
+ }
+ }
+ trace: collect_build_prerequisites: postpone boo/1.0.0
+ trace: collect_build_postponed (1): index 2 collect alt-postponed boo/1.0.0
+ trace: collect_build_prerequisites: resume boo/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent boo/1.0.0
+ %.*
+ trace: postponed_configurations::add: add {boo 1,2: libfoo} to {foo | libfoo->{foo/1,1}}!
+ %.*
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent boo/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libfoo/1.0.0 of dependent boo/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end boo/1.0.0
+ trace: collect_build_postponed (1): end {foo | libfoo->{foo/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !foo configured 1.0.0
+ libfoo configured 1.0.0
+ !boo configured 1.0.0
+ libfoo configured 1.0.0
+ EOO
+
+ $pkg_drop foo boo
+ }
+
+ : after-negotiation
+ :
+ {
+ $clone_cfg;
+
+ $* foo biz 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ %.*
+ trace: collect_build: add foo/1.0.0
+ trace: collect_build: add biz/1.0.0
+ trace: collect_build_prerequisites: begin foo/1.0.0
+ %.*
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent foo/1.0.0
+ trace: postponed_configurations::add: create {foo | libfoo->{foo/1,1}}
+ trace: collect_build_prerequisites: postpone foo/1.0.0
+ trace: collect_build_prerequisites: begin biz/1.0.0
+ %.*
+ trace: collect_build: add boo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency boo/1.0.0 of dependent biz/1.0.0
+ %.*
+ trace: postponed_configurations::add: create {biz | boo->{biz/1,1}}
+ trace: collect_build_prerequisites: postpone biz/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {foo | libfoo->{foo/1,1}}
+ %.*
+ trace: collect_build_postponed (1): cfg-negotiate begin {foo | libfoo->{foo/1,1}}
+ %.*
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libfoo/1.0.0
+ trace: collect_build_prerequisites: end libfoo/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent foo/1.0.0
+ trace: collect_build_prerequisites: resume foo/1.0.0
+ trace: collect_build_prerequisites: end foo/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {foo | libfoo->{foo/1,1}}!
+ trace: collect_build_postponed (2): begin {biz | boo->{biz/1,1}}
+ %.*
+ trace: collect_build_postponed (2): cfg-negotiate begin {biz | boo->{biz/1,1}}
+ %.*
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin boo/1.0.0
+ %.*
+ trace: collect_build_prerequisites: alt-postpone dependent boo/1.0.0 since max index is reached: 0
+ info: dependency alternative: libbar
+ trace: collect_build_prerequisites: postpone boo/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent biz/1.0.0
+ trace: collect_build_prerequisites: resume biz/1.0.0
+ trace: collect_build_prerequisites: end biz/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {biz | boo->{biz/1,1}}!
+ trace: collect_build_postponed (2): index 1 collect alt-postponed boo/1.0.0
+ trace: collect_build_prerequisites: resume boo/1.0.0
+ %.*
+ trace: collect_build_prerequisites: alt-postpone dependent boo/1.0.0 since max index is reached: 1
+ info: dependency alternative: libfoo
+ {
+ require
+ {
+ config.libfoo.extras = true
+ }
+ }
+ trace: collect_build_prerequisites: postpone boo/1.0.0
+ trace: collect_build_postponed (2): index 2 collect alt-postponed boo/1.0.0
+ trace: collect_build_prerequisites: resume boo/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent boo/1.0.0
+ %.*
+ trace: postponed_configurations::add: add {boo 1,2: libfoo} to {foo | libfoo->{foo/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent boo/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libfoo/1.0.0 of dependent boo/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end boo/1.0.0
+ trace: collect_build_postponed (2): end {biz | boo->{biz/1,1}}
+ trace: collect_build_postponed (1): end {foo | libfoo->{foo/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !foo configured 1.0.0
+ libfoo configured 1.0.0
+ !biz configured 1.0.0
+ boo configured 1.0.0
+ libfoo configured 1.0.0
+ EOO
+
+ $pkg_drop foo biz
+ }
+ }
+
+ : bogus-postponement
+ :
+ {
+ $clone_cfg;
+
+ # Dependencies:
+ #
+ # tax: depends: libbar(c)
+ # depends: libfoo
+ #
+ # toz/0.1.0:
+ #
+ # toz/1.0.0: depends: libbaz(c)
+ # depends: libfoo(c)
+ # depends: libbar(c)
+ #
+ # tez: depends: libbox(c)
+ # depends: toz == 0.1.0 (c)
+ # depends: libbar(c)
+ #
+ $* tax toz tez 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ %.*
+ trace: collect_build: add tax/1.0.0
+ trace: collect_build: add toz/1.0.0
+ trace: collect_build: add tez/1.0.0
+ trace: collect_build_prerequisites: begin tax/1.0.0
+ %.*
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tax/1.0.0
+ trace: postponed_configurations::add: create {tax | libbar->{tax/1,1}}
+ trace: collect_build_prerequisites: postpone tax/1.0.0
+ trace: collect_build_prerequisites: begin toz/1.0.0
+ %.*
+ trace: collect_build: add libbaz/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbaz/1.0.0 of dependent toz/1.0.0
+ trace: postponed_configurations::add: create {toz | libbaz->{toz/1,1}}
+ trace: collect_build_prerequisites: postpone toz/1.0.0
+ trace: collect_build_prerequisites: begin tez/1.0.0
+ %.*
+ trace: collect_build: add libbox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: create {tez | libbox->{tez/1,1}}
+ trace: collect_build_prerequisites: postpone tez/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {tax | libbar->{tax/1,1}}
+ %.*
+ trace: collect_build_postponed (1): cfg-negotiate begin {tax | libbar->{tax/1,1}}
+ %.*
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbar/1.0.0
+ trace: collect_build_prerequisites: end libbar/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tax/1.0.0
+ trace: collect_build_prerequisites: resume tax/1.0.0
+ %.*
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libfoo/1.0.0 of dependent tax/1.0.0
+ trace: collect_build_prerequisites: begin libfoo/1.0.0
+ trace: collect_build_prerequisites: end libfoo/1.0.0
+ trace: collect_build_prerequisites: end tax/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {tax | libbar->{tax/1,1}}!
+ trace: collect_build_postponed (2): begin {toz | libbaz->{toz/1,1}}
+ %.*
+ trace: collect_build_postponed (2): cfg-negotiate begin {toz | libbaz->{toz/1,1}}
+ %.*
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbaz/1.0.0
+ trace: collect_build_prerequisites: end libbaz/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent toz/1.0.0
+ trace: collect_build_prerequisites: resume toz/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cannot cfg-postpone dependency libfoo/1.0.0 of dependent toz/1.0.0 (collected prematurely), throwing postpone_dependency
+ trace: pkg_build: collection failed due to prematurely collected dependency (libfoo), retry from scratch
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ %.*
+ trace: collect_build: add tax/1.0.0
+ trace: collect_build: add toz/1.0.0
+ trace: collect_build: add tez/1.0.0
+ trace: collect_build_prerequisites: begin tax/1.0.0
+ %.*
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tax/1.0.0
+ trace: postponed_configurations::add: create {tax | libbar->{tax/1,1}}
+ trace: collect_build_prerequisites: postpone tax/1.0.0
+ trace: collect_build_prerequisites: begin toz/1.0.0
+ %.*
+ trace: collect_build: add libbaz/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbaz/1.0.0 of dependent toz/1.0.0
+ trace: postponed_configurations::add: create {toz | libbaz->{toz/1,1}}
+ trace: collect_build_prerequisites: postpone toz/1.0.0
+ trace: collect_build_prerequisites: begin tez/1.0.0
+ %.*
+ trace: collect_build: add libbox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: create {tez | libbox->{tez/1,1}}
+ trace: collect_build_prerequisites: postpone tez/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {tax | libbar->{tax/1,1}}
+ %.*
+ trace: collect_build_postponed (1): cfg-negotiate begin {tax | libbar->{tax/1,1}}
+ %.*
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbar/1.0.0
+ trace: collect_build_prerequisites: end libbar/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tax/1.0.0
+ trace: collect_build_prerequisites: resume tax/1.0.0
+ %.*
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: dep-postpone dependency libfoo/1.0.0 of dependent tax/1.0.0
+ trace: collect_build_prerequisites: end tax/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {tax | libbar->{tax/1,1}}!
+ trace: collect_build_postponed (2): begin {toz | libbaz->{toz/1,1}}
+ %.*
+ trace: collect_build_postponed (2): cfg-negotiate begin {toz | libbaz->{toz/1,1}}
+ %.*
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbaz/1.0.0
+ trace: collect_build_prerequisites: end libbaz/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent toz/1.0.0
+ trace: collect_build_prerequisites: resume toz/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent toz/1.0.0
+ trace: postponed_configurations::add: create {toz | libfoo->{toz/2,1}}
+ trace: collect_build_prerequisites: postpone toz/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {toz | libbaz->{toz/1,1}}!
+ trace: collect_build_postponed (3): begin {tez | libbox->{tez/1,1}}
+ %.*
+ trace: collect_build_postponed (3): cfg-negotiate begin {tez | libbox->{tez/1,1}}
+ %.*
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbox/1.0.0
+ trace: collect_build_prerequisites: end libbox/1.0.0
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent tez/1.0.0
+ trace: collect_build_prerequisites: resume tez/1.0.0
+ %.*
+ trace: collect_build: pick toz/0.1.0 over toz/1.0.0
+ trace: collect_build: toz/1.0.0 package version needs to be replaced with toz/0.1.0
+ trace: pkg_build: collection failed due to package version replacement, retry from scratch
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ %.*
+ trace: collect_build: add tax/1.0.0
+ trace: collect_build: apply version replacement for toz/1.0.0
+ trace: collect_build: replacement: toz/0.1.0
+ trace: collect_build: add toz/0.1.0
+ trace: collect_build: add tez/1.0.0
+ trace: collect_build_prerequisites: begin tax/1.0.0
+ %.*
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tax/1.0.0
+ trace: postponed_configurations::add: create {tax | libbar->{tax/1,1}}
+ trace: collect_build_prerequisites: postpone tax/1.0.0
+ trace: collect_build_prerequisites: begin toz/0.1.0
+ trace: collect_build_prerequisites: end toz/0.1.0
+ trace: collect_build_prerequisites: begin tez/1.0.0
+ %.*
+ trace: collect_build: add libbox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: create {tez | libbox->{tez/1,1}}
+ trace: collect_build_prerequisites: postpone tez/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {tax | libbar->{tax/1,1}}
+ %.*
+ trace: collect_build_postponed (1): cfg-negotiate begin {tax | libbar->{tax/1,1}}
+ %.*
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbar/1.0.0
+ trace: collect_build_prerequisites: end libbar/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tax/1.0.0
+ trace: collect_build_prerequisites: resume tax/1.0.0
+ %.*
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: dep-postpone dependency libfoo/1.0.0 of dependent tax/1.0.0
+ trace: collect_build_prerequisites: end tax/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {tax | libbar->{tax/1,1}}!
+ trace: collect_build_postponed (2): begin {tez | libbox->{tez/1,1}}
+ %.*
+ trace: collect_build_postponed (2): cfg-negotiate begin {tez | libbox->{tez/1,1}}
+ %.*
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbox/1.0.0
+ trace: collect_build_prerequisites: end libbox/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent tez/1.0.0
+ trace: collect_build_prerequisites: resume tez/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cannot cfg-postpone dependency toz/0.1.0 of dependent tez/1.0.0 (collected prematurely), throwing postpone_dependency
+ trace: pkg_build: collection failed due to prematurely collected dependency (toz), retry from scratch
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ %.*
+ trace: collect_build: add tax/1.0.0
+ trace: collect_build: apply version replacement for toz/1.0.0
+ trace: collect_build: replacement: toz/0.1.0
+ trace: collect_build: add toz/0.1.0
+ trace: collect_build: add tez/1.0.0
+ trace: collect_build_prerequisites: begin tax/1.0.0
+ %.*
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tax/1.0.0
+ trace: postponed_configurations::add: create {tax | libbar->{tax/1,1}}
+ trace: collect_build_prerequisites: postpone tax/1.0.0
+ trace: pkg_build: dep-postpone user-specified toz
+ trace: collect_build_prerequisites: begin tez/1.0.0
+ %.*
+ trace: collect_build: add libbox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: create {tez | libbox->{tez/1,1}}
+ trace: collect_build_prerequisites: postpone tez/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {tax | libbar->{tax/1,1}}
+ %.*
+ trace: collect_build_postponed (1): cfg-negotiate begin {tax | libbar->{tax/1,1}}
+ %.*
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbar/1.0.0
+ trace: collect_build_prerequisites: end libbar/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tax/1.0.0
+ trace: collect_build_prerequisites: resume tax/1.0.0
+ %.*
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: dep-postpone dependency libfoo/1.0.0 of dependent tax/1.0.0
+ trace: collect_build_prerequisites: end tax/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {tax | libbar->{tax/1,1}}!
+ trace: collect_build_postponed (2): begin {tez | libbox->{tez/1,1}}
+ %.*
+ trace: collect_build_postponed (2): cfg-negotiate begin {tez | libbox->{tez/1,1}}
+ %.*
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbox/1.0.0
+ trace: collect_build_prerequisites: end libbox/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent tez/1.0.0
+ trace: collect_build_prerequisites: resume tez/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency toz/0.1.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: create {tez | toz->{tez/2,1}}
+ trace: collect_build_prerequisites: postpone tez/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {tez | libbox->{tez/1,1}}!
+ trace: collect_build_postponed (3): begin {tez | toz->{tez/2,1}}
+ %.*
+ trace: collect_build_postponed (3): cfg-negotiate begin {tez | toz->{tez/2,1}}
+ %.*
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin toz/0.1.0
+ trace: collect_build_prerequisites: end toz/0.1.0
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent tez/1.0.0
+ trace: collect_build_prerequisites: resume tez/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: add {tez 3,1: libbar} to {tax | libbar->{tax/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent tez/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent tez/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end tez/1.0.0
+ trace: collect_build_postponed (3): cfg-negotiate end {tez | toz->{tez/2,1}}!
+ %.*
+ trace: collect_build_postponed (3): erase bogus postponement libfoo
+ trace: collect_build_postponed (3): bogus postponements erased, throwing
+ trace: pkg_build: collection failed due to bogus dependency collection postponement cancellation, retry from scratch
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ %.*
+ trace: collect_build: add tax/1.0.0
+ trace: collect_build: apply version replacement for toz/1.0.0
+ trace: collect_build: replacement: toz/0.1.0
+ trace: collect_build: add toz/0.1.0
+ trace: collect_build: add tez/1.0.0
+ trace: collect_build_prerequisites: begin tax/1.0.0
+ %.*
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tax/1.0.0
+ trace: postponed_configurations::add: create {tax | libbar->{tax/1,1}}
+ trace: collect_build_prerequisites: postpone tax/1.0.0
+ trace: pkg_build: dep-postpone user-specified toz
+ trace: collect_build_prerequisites: begin tez/1.0.0
+ %.*
+ trace: collect_build: add libbox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: create {tez | libbox->{tez/1,1}}
+ trace: collect_build_prerequisites: postpone tez/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {tax | libbar->{tax/1,1}}
+ %.*
+ trace: collect_build_postponed (1): cfg-negotiate begin {tax | libbar->{tax/1,1}}
+ %.*
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbar/1.0.0
+ trace: collect_build_prerequisites: end libbar/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tax/1.0.0
+ trace: collect_build_prerequisites: resume tax/1.0.0
+ %.*
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libfoo/1.0.0 of dependent tax/1.0.0
+ trace: collect_build_prerequisites: begin libfoo/1.0.0
+ trace: collect_build_prerequisites: end libfoo/1.0.0
+ trace: collect_build_prerequisites: end tax/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {tax | libbar->{tax/1,1}}!
+ trace: collect_build_postponed (2): begin {tez | libbox->{tez/1,1}}
+ %.*
+ trace: collect_build_postponed (2): cfg-negotiate begin {tez | libbox->{tez/1,1}}
+ %.*
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbox/1.0.0
+ trace: collect_build_prerequisites: end libbox/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent tez/1.0.0
+ trace: collect_build_prerequisites: resume tez/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency toz/0.1.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: create {tez | toz->{tez/2,1}}
+ trace: collect_build_prerequisites: postpone tez/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {tez | libbox->{tez/1,1}}!
+ trace: collect_build_postponed (3): begin {tez | toz->{tez/2,1}}
+ %.*
+ trace: collect_build_postponed (3): cfg-negotiate begin {tez | toz->{tez/2,1}}
+ %.*
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin toz/0.1.0
+ trace: collect_build_prerequisites: end toz/0.1.0
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent tez/1.0.0
+ trace: collect_build_prerequisites: resume tez/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: add {tez 3,1: libbar} to {tax | libbar->{tax/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent tez/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent tez/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end tez/1.0.0
+ trace: collect_build_postponed (3): cfg-negotiate end {tez | toz->{tez/2,1}}!
+ trace: collect_build_postponed (3): end {tez | toz->{tez/2,1}}
+ trace: collect_build_postponed (2): end {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (1): end {tax | libbar->{tax/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !toz configured 0.1.0 available 1.0.0 0.2.0
+ !tax configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured 1.0.0
+ !tez configured 1.0.0
+ libbar configured 1.0.0
+ libbox configured 1.0.0
+ !toz configured 0.1.0 available 1.0.0 0.2.0
+ EOO
+
+ $pkg_drop tax toz tez
+ }
+
+ : unconstrain-deps
+ :
+ {
+ $clone_cfg;
+
+ # Dependencies:
+ #
+ # tvz: depends: toz == 0.2.0 (c)
+ #
+ # toz/0.2.0: depends: libfoo(c)
+ # depends: libbar(c)
+ #
+ # tez: depends: libbox(c)
+ # depends: toz == 0.1.0 (c)
+ # depends: libbar(c)
+ #
+ $* tvz 2>!;
+
+ $pkg_status -r >>EOO;
+ !tvz configured 1.0.0
+ toz configured 0.2.0 available 1.0.0
+ libbar configured 1.0.0
+ libfoo configured 1.0.0
+ EOO
+
+ $* tvz +{ config.tvz.extras=true } tez 2>&1 != 0 | $filter 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add tvz/1.0.0
+ trace: collect_build: add tez/1.0.0
+ trace: collect_build_prerequisites: begin tvz/1.0.0
+ trace: collect_build: add toz/0.2.0
+ trace: collect_build_prerequisites: cfg-postpone dependency toz/0.2.0 of dependent tvz/1.0.0
+ trace: postponed_configurations::add: create {tvz | toz->{tvz/1,1}}
+ trace: collect_build_prerequisites: postpone tvz/1.0.0
+ trace: collect_build_prerequisites: begin tez/1.0.0
+ trace: collect_build: add libbox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: create {tez | libbox->{tez/1,1}}
+ trace: collect_build_prerequisites: postpone tez/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {tvz | toz->{tvz/1,1}}
+ trace: collect_build_postponed (1): skip being built existing dependent tvz of dependency toz
+ trace: collect_build_postponed (1): cfg-negotiate begin {tvz | toz->{tvz/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin toz/0.2.0
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent toz/0.2.0
+ trace: postponed_configurations::add: create {toz | libfoo->{toz/1,1}}
+ trace: collect_build_prerequisites: postpone toz/0.2.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tvz/1.0.0
+ trace: collect_build_prerequisites: resume tvz/1.0.0
+ trace: collect_build_prerequisites: end tvz/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {tvz | toz->{tvz/1,1}}!
+ trace: collect_build_postponed (2): begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (2): cfg-negotiate begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbox/1.0.0
+ trace: collect_build_prerequisites: end libbox/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent tez/1.0.0
+ trace: collect_build_prerequisites: resume tez/1.0.0
+ trace: collect_build: postpone failure for dependent tez unsatisfied with dependency toz/0.2.0 (== 0.1.0)
+ trace: collect_build: pick toz/0.2.0 over toz/0.1.0
+ trace: collect_build_prerequisites: cfg-postpone dependency toz/0.2.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: add {tez 2,1: toz} to {tvz | toz->{tvz/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent tez/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency toz/0.2.0 of dependent tez/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: create {tez | libbar->{tez/3,1}}
+ trace: collect_build_prerequisites: postpone tez/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {tez | libbox->{tez/1,1}}!
+ trace: collect_build_postponed (3): begin {toz | libfoo->{toz/1,1}}
+ trace: collect_build_postponed (3): skip being built existing dependent toz of dependency libfoo
+ trace: collect_build_postponed (3): cfg-negotiate begin {toz | libfoo->{toz/1,1}}
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libfoo/1.0.0
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent toz/0.2.0
+ trace: collect_build_prerequisites: resume toz/0.2.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent toz/0.2.0
+ trace: postponed_configurations::add: add {toz 2,1: libbar} to {tez | libbar->{tez/3,1}}
+ trace: collect_build_prerequisites: postpone toz/0.2.0
+ trace: collect_build_postponed (3): cfg-negotiate end {toz | libfoo->{toz/1,1}}!
+ trace: collect_build_postponed (4): begin {tez toz | libbar->{tez/3,1 toz/2,1}}
+ trace: collect_build_postponed (4): skip being built existing dependent toz of dependency libbar
+ trace: collect_build_postponed (4): cfg-negotiate begin {tez toz | libbar->{tez/3,1 toz/2,1}}
+ trace: collect_build_postponed (4): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbar/1.0.0
+ trace: collect_build_postponed (4): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (4): select cfg-negotiated dependency alternative for dependent tez/1.0.0
+ trace: collect_build_prerequisites: resume tez/1.0.0
+ trace: collect_build_prerequisites: end tez/1.0.0
+ trace: collect_build_postponed (4): select cfg-negotiated dependency alternative for dependent toz/0.2.0
+ trace: collect_build_prerequisites: resume toz/0.2.0
+ trace: collect_build_prerequisites: end toz/0.2.0
+ trace: collect_build_postponed (4): cfg-negotiate end {tez toz | libbar->{tez/3,1 toz/2,1}}!
+ trace: collect_build_postponed (4): end {tez toz | libbar->{tez/3,1 toz/2,1}}
+ trace: collect_build_postponed (3): end {toz | libfoo->{toz/1,1}}
+ trace: collect_build_postponed (2): end {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (1): end {tvz | toz->{tvz/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: execute_plan: while configuring dependent tez in simulation mode unconstrain (toz == 0.1.0)
+ %.*
+ trace: pkg_build: try to replace unsatisfactory dependency toz/0.2.0 with some other version
+ trace: try_replace_dependent: try to replace unsatisfied dependent tez/1.0.0 of dependency toz/0.2.0 with some other version
+ trace: try_replace_dependency: replace unsatisfied dependent version tez/1.0.0 with 0.1.0 by adding constraint 'tez' -> 'tez == 0.1.0' on command line
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add tvz/1.0.0
+ trace: collect_build: add tez/0.1.0
+ trace: collect_build_prerequisites: begin tvz/1.0.0
+ trace: collect_build: add toz/0.2.0
+ trace: collect_build_prerequisites: cfg-postpone dependency toz/0.2.0 of dependent tvz/1.0.0
+ trace: postponed_configurations::add: create {tvz | toz->{tvz/1,1}}
+ trace: collect_build_prerequisites: postpone tvz/1.0.0
+ trace: collect_build_prerequisites: begin tez/0.1.0
+ trace: collect_build: add libbox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent tez/0.1.0
+ trace: postponed_configurations::add: create {tez | libbox->{tez/1,1}}
+ trace: collect_build_prerequisites: postpone tez/0.1.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {tvz | toz->{tvz/1,1}}
+ trace: collect_build_postponed (1): skip being built existing dependent tvz of dependency toz
+ trace: collect_build_postponed (1): cfg-negotiate begin {tvz | toz->{tvz/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin toz/0.2.0
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent toz/0.2.0
+ trace: postponed_configurations::add: create {toz | libfoo->{toz/1,1}}
+ trace: collect_build_prerequisites: postpone toz/0.2.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tvz/1.0.0
+ trace: collect_build_prerequisites: resume tvz/1.0.0
+ trace: collect_build_prerequisites: end tvz/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {tvz | toz->{tvz/1,1}}!
+ trace: collect_build_postponed (2): begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (2): cfg-negotiate begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbox/1.0.0
+ trace: collect_build_prerequisites: end libbox/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent tez/0.1.0
+ trace: collect_build_prerequisites: resume tez/0.1.0
+ trace: collect_build: postpone failure for dependent tez unsatisfied with dependency toz/0.2.0 (== 0.1.0)
+ trace: collect_build: pick toz/0.2.0 over toz/0.1.0
+ trace: collect_build_prerequisites: cfg-postpone dependency toz/0.2.0 of dependent tez/0.1.0
+ trace: postponed_configurations::add: add {tez 2,1: toz} to {tvz | toz->{tvz/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent tez/0.1.0 is negotiated
+ trace: collect_build_prerequisites: dependency toz/0.2.0 of dependent tez/0.1.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end tez/0.1.0
+ trace: collect_build_postponed (2): cfg-negotiate end {tez | libbox->{tez/1,1}}!
+ trace: collect_build_postponed (3): begin {toz | libfoo->{toz/1,1}}
+ trace: collect_build_postponed (3): skip being built existing dependent toz of dependency libfoo
+ trace: collect_build_postponed (3): cfg-negotiate begin {toz | libfoo->{toz/1,1}}
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libfoo/1.0.0
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent toz/0.2.0
+ trace: collect_build_prerequisites: resume toz/0.2.0
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent toz/0.2.0
+ trace: postponed_configurations::add: create {toz | libbar->{toz/2,1}}
+ trace: collect_build_prerequisites: postpone toz/0.2.0
+ trace: collect_build_postponed (3): cfg-negotiate end {toz | libfoo->{toz/1,1}}!
+ trace: collect_build_postponed (4): begin {toz | libbar->{toz/2,1}}
+ trace: collect_build_postponed (4): skip being built existing dependent toz of dependency libbar
+ trace: collect_build_postponed (4): cfg-negotiate begin {toz | libbar->{toz/2,1}}
+ trace: collect_build_postponed (4): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbar/1.0.0
+ trace: collect_build_postponed (4): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (4): select cfg-negotiated dependency alternative for dependent toz/0.2.0
+ trace: collect_build_prerequisites: resume toz/0.2.0
+ trace: collect_build_prerequisites: end toz/0.2.0
+ trace: collect_build_postponed (4): cfg-negotiate end {toz | libbar->{toz/2,1}}!
+ trace: collect_build_postponed (4): end {toz | libbar->{toz/2,1}}
+ trace: collect_build_postponed (3): end {toz | libfoo->{toz/1,1}}
+ trace: collect_build_postponed (2): end {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (1): end {tvz | toz->{tvz/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: execute_plan: while configuring dependent tez in simulation mode unconstrain (toz == 0.1.0)
+ %.*
+ trace: pkg_build: try to replace unsatisfactory dependency toz/0.2.0 with some other version
+ trace: try_replace_dependent: try to replace unsatisfied dependent tez/0.1.0 of dependency toz/0.2.0 with some other version
+ trace: try_replace_dependency: replacement of unsatisfied dependent version tez/0.1.0 is denied since it is specified on command line as 'tez == 0.1.0'
+ trace: try_replace_dependent: try to replace conflicting dependent tvz/1.0.0 of dependency toz/0.2.0 with some other version
+ trace: try_replace_dependency: replace conflicting dependent version tvz/1.0.0 with 0.1.0 by adding constraint 'tvz' -> 'tvz == 0.1.0' on command line
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add tvz/0.1.0
+ trace: collect_build: add tez/0.1.0
+ trace: collect_build_prerequisites: begin tvz/0.1.0
+ trace: collect_build: add toz/0.2.0
+ trace: collect_build_prerequisites: cfg-postpone dependency toz/0.2.0 of dependent tvz/0.1.0
+ trace: postponed_configurations::add: create {tvz | toz->{tvz/1,1}}
+ trace: collect_build_prerequisites: postpone tvz/0.1.0
+ trace: collect_build_prerequisites: begin tez/0.1.0
+ trace: collect_build: add libbox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent tez/0.1.0
+ trace: postponed_configurations::add: create {tez | libbox->{tez/1,1}}
+ trace: collect_build_prerequisites: postpone tez/0.1.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {tvz | toz->{tvz/1,1}}
+ trace: collect_build_postponed (1): skip being built existing dependent tvz of dependency toz
+ trace: collect_build_postponed (1): cfg-negotiate begin {tvz | toz->{tvz/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin toz/0.2.0
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent toz/0.2.0
+ trace: postponed_configurations::add: create {toz | libfoo->{toz/1,1}}
+ trace: collect_build_prerequisites: postpone toz/0.2.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tvz/0.1.0
+ trace: collect_build_prerequisites: resume tvz/0.1.0
+ trace: collect_build: add bax/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency bax/1.0.0 of dependent tvz/0.1.0
+ trace: collect_build_prerequisites: begin bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent bax/1.0.0
+ trace: postponed_configurations::add: add {bax 1,1: libfoo} to {toz | libfoo->{toz/1,1}}
+ trace: collect_build_prerequisites: postpone bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent tvz/0.1.0
+ trace: postponed_configurations::add: add {tvz 3,1: libfoo} to {bax toz | libfoo->{bax/1,1 toz/1,1}}
+ trace: collect_build_prerequisites: postpone tvz/0.1.0
+ trace: collect_build_postponed (1): cfg-negotiate end {tvz | toz->{tvz/1,1}}!
+ trace: collect_build_postponed (2): begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (2): cfg-negotiate begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbox/1.0.0
+ trace: collect_build_prerequisites: end libbox/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent tez/0.1.0
+ trace: collect_build_prerequisites: resume tez/0.1.0
+ trace: collect_build: postpone failure for dependent tez unsatisfied with dependency toz/0.2.0 (== 0.1.0)
+ trace: collect_build: pick toz/0.2.0 over toz/0.1.0
+ trace: collect_build_prerequisites: cfg-postpone dependency toz/0.2.0 of dependent tez/0.1.0
+ trace: postponed_configurations::add: add {tez 2,1: toz} to {tvz | toz->{tvz/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent tez/0.1.0 is negotiated
+ trace: collect_build_prerequisites: dependency toz/0.2.0 of dependent tez/0.1.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end tez/0.1.0
+ trace: collect_build_postponed (2): cfg-negotiate end {tez | libbox->{tez/1,1}}!
+ trace: collect_build_postponed (3): begin {bax toz tvz | libfoo->{bax/1,1 toz/1,1 tvz/3,1}}
+ trace: collect_build_postponed (3): skip being built existing dependent toz of dependency libfoo
+ trace: collect_build_postponed (3): cfg-negotiate begin {bax toz tvz | libfoo->{bax/1,1 toz/1,1 tvz/3,1}}
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libfoo/1.0.0
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent bax/1.0.0
+ trace: collect_build_prerequisites: resume bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent bax/1.0.0
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bax/1.0.0
+ trace: postponed_configurations::add: add {bax 2,1: libbox libbar} to {tez | libbox->{tez/1,1}}!
+ trace: collect_build_prerequisites: skip being built existing dependent toz of dependency libbar
+ trace: collect_build_prerequisites: cfg-postponing dependent bax/1.0.0 involves (being) negotiated configurations and results in {bax tez | libbox->{bax/2,1 tez/1,1} libbar->{bax/2,1}}!, throwing retry_configuration
+ trace: collect_build_postponed (1): cfg-negotiation of {tez | libbox->{tez/1,1}} failed due to dependent bax, refining configuration
+ trace: collect_build_postponed (2): begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (2): cfg-negotiate begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbox/1.0.0
+ trace: collect_build_prerequisites: end libbox/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent tez/0.1.0
+ trace: collect_build_prerequisites: resume tez/0.1.0
+ trace: collect_build: postpone failure for dependent tez unsatisfied with dependency toz/0.2.0 (== 0.1.0)
+ trace: collect_build: pick toz/0.2.0 over toz/0.1.0
+ trace: collect_build_prerequisites: cfg-postpone dependency toz/0.2.0 of dependent tez/0.1.0
+ trace: postponed_configurations::add: add {tez 2,1: toz} to {tvz | toz->{tvz/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent tez/0.1.0 is negotiated
+ trace: collect_build_prerequisites: dependency toz/0.2.0 of dependent tez/0.1.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end tez/0.1.0
+ trace: collect_build_postponed (2): cfg-negotiate end {tez | libbox->{tez/1,1}}!
+ trace: collect_build_postponed (3): begin {bax toz tvz | libfoo->{bax/1,1 toz/1,1 tvz/3,1}}
+ trace: collect_build_postponed (3): skip being built existing dependent toz of dependency libfoo
+ trace: collect_build_postponed (3): cfg-negotiate begin {bax toz tvz | libfoo->{bax/1,1 toz/1,1 tvz/3,1}}
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libfoo/1.0.0
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent bax/1.0.0
+ trace: collect_build_prerequisites: resume bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent bax/1.0.0
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bax/1.0.0
+ trace: postponed_configurations::add: add {bax 2,1: libbox libbar} to {tez | libbox->{tez/1,1}}!
+ trace: collect_build_prerequisites: skip being built existing dependent toz of dependency libbar
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent bax/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbox/1.0.0 of dependent bax/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: collecting cfg-postponed dependency libbar/1.0.0 of dependent bax/1.0.0
+ trace: collect_build_prerequisites: skip configured libbar/1.0.0
+ trace: collect_build_prerequisites: end bax/1.0.0
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent toz/0.2.0
+ trace: collect_build_prerequisites: resume toz/0.2.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent toz/0.2.0
+ trace: postponed_configurations::add: add {toz 2,1: libbar} to {bax tez | libbox->{bax/2,1 tez/1,1} libbar->{bax/2,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent toz/0.2.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent toz/0.2.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end toz/0.2.0
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent tvz/0.1.0
+ trace: collect_build_prerequisites: resume tvz/0.1.0
+ trace: collect_build_prerequisites: end tvz/0.1.0
+ trace: collect_build_postponed (3): cfg-negotiate end {bax toz tvz | libfoo->{bax/1,1 toz/1,1 tvz/3,1}}!
+ trace: collect_build_postponed (3): end {bax toz tvz | libfoo->{bax/1,1 toz/1,1 tvz/3,1}}
+ trace: collect_build_postponed (2): end {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (1): end {tvz | toz->{tvz/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: execute_plan: while configuring dependent tez in simulation mode unconstrain (toz == 0.1.0)
+ %.*
+ trace: pkg_build: try to replace unsatisfactory dependency toz/0.2.0 with some other version
+ trace: try_replace_dependent: try to replace unsatisfied dependent tez/0.1.0 of dependency toz/0.2.0 with some other version
+ trace: try_replace_dependency: replacement of unsatisfied dependent version tez/0.1.0 is denied since it is specified on command line as 'tez == 0.1.0'
+ trace: try_replace_dependent: try to replace conflicting dependent tvz/0.1.0 of dependency toz/0.2.0 with some other version
+ trace: try_replace_dependency: replacement of conflicting dependent version tvz/0.1.0 is denied since it is specified on command line as 'tvz == 0.1.0'
+ trace: pkg_build: cannot replace any package, rolling back latest command line adjustment ('tvz' -> 'tvz == 0.1.0')
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add tvz/1.0.0
+ trace: collect_build: add tez/0.1.0
+ trace: collect_build_prerequisites: begin tvz/1.0.0
+ trace: collect_build: add toz/0.2.0
+ trace: collect_build_prerequisites: cfg-postpone dependency toz/0.2.0 of dependent tvz/1.0.0
+ trace: postponed_configurations::add: create {tvz | toz->{tvz/1,1}}
+ trace: collect_build_prerequisites: postpone tvz/1.0.0
+ trace: collect_build_prerequisites: begin tez/0.1.0
+ trace: collect_build: add libbox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent tez/0.1.0
+ trace: postponed_configurations::add: create {tez | libbox->{tez/1,1}}
+ trace: collect_build_prerequisites: postpone tez/0.1.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {tvz | toz->{tvz/1,1}}
+ trace: collect_build_postponed (1): skip being built existing dependent tvz of dependency toz
+ trace: collect_build_postponed (1): cfg-negotiate begin {tvz | toz->{tvz/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin toz/0.2.0
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent toz/0.2.0
+ trace: postponed_configurations::add: create {toz | libfoo->{toz/1,1}}
+ trace: collect_build_prerequisites: postpone toz/0.2.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tvz/1.0.0
+ trace: collect_build_prerequisites: resume tvz/1.0.0
+ trace: collect_build_prerequisites: end tvz/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {tvz | toz->{tvz/1,1}}!
+ trace: collect_build_postponed (2): begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (2): cfg-negotiate begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbox/1.0.0
+ trace: collect_build_prerequisites: end libbox/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent tez/0.1.0
+ trace: collect_build_prerequisites: resume tez/0.1.0
+ trace: collect_build: postpone failure for dependent tez unsatisfied with dependency toz/0.2.0 (== 0.1.0)
+ trace: collect_build: pick toz/0.2.0 over toz/0.1.0
+ trace: collect_build_prerequisites: cfg-postpone dependency toz/0.2.0 of dependent tez/0.1.0
+ trace: postponed_configurations::add: add {tez 2,1: toz} to {tvz | toz->{tvz/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent tez/0.1.0 is negotiated
+ trace: collect_build_prerequisites: dependency toz/0.2.0 of dependent tez/0.1.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end tez/0.1.0
+ trace: collect_build_postponed (2): cfg-negotiate end {tez | libbox->{tez/1,1}}!
+ trace: collect_build_postponed (3): begin {toz | libfoo->{toz/1,1}}
+ trace: collect_build_postponed (3): skip being built existing dependent toz of dependency libfoo
+ trace: collect_build_postponed (3): cfg-negotiate begin {toz | libfoo->{toz/1,1}}
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libfoo/1.0.0
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent toz/0.2.0
+ trace: collect_build_prerequisites: resume toz/0.2.0
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent toz/0.2.0
+ trace: postponed_configurations::add: create {toz | libbar->{toz/2,1}}
+ trace: collect_build_prerequisites: postpone toz/0.2.0
+ trace: collect_build_postponed (3): cfg-negotiate end {toz | libfoo->{toz/1,1}}!
+ trace: collect_build_postponed (4): begin {toz | libbar->{toz/2,1}}
+ trace: collect_build_postponed (4): skip being built existing dependent toz of dependency libbar
+ trace: collect_build_postponed (4): cfg-negotiate begin {toz | libbar->{toz/2,1}}
+ trace: collect_build_postponed (4): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbar/1.0.0
+ trace: collect_build_postponed (4): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (4): select cfg-negotiated dependency alternative for dependent toz/0.2.0
+ trace: collect_build_prerequisites: resume toz/0.2.0
+ trace: collect_build_prerequisites: end toz/0.2.0
+ trace: collect_build_postponed (4): cfg-negotiate end {toz | libbar->{toz/2,1}}!
+ trace: collect_build_postponed (4): end {toz | libbar->{toz/2,1}}
+ trace: collect_build_postponed (3): end {toz | libfoo->{toz/1,1}}
+ trace: collect_build_postponed (2): end {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (1): end {tvz | toz->{tvz/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: execute_plan: while configuring dependent tez in simulation mode unconstrain (toz == 0.1.0)
+ %.*
+ trace: pkg_build: try to replace unsatisfactory dependency toz/0.2.0 with some other version
+ trace: try_replace_dependent: try to replace unsatisfied dependent tez/0.1.0 of dependency toz/0.2.0 with some other version
+ trace: try_replace_dependency: replacement of unsatisfied dependent version tez/0.1.0 is denied since it is specified on command line as 'tez == 0.1.0'
+ trace: try_replace_dependent: try to replace conflicting dependent tvz/1.0.0 of dependency toz/0.2.0 with some other version
+ trace: try_replace_dependency: replacement tvz/0.1.0 tried earlier for same command line, skipping
+ trace: pkg_build: cannot replace any package, rolling back latest command line adjustment ('tez' -> 'tez == 0.1.0')
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add tvz/1.0.0
+ trace: collect_build: add tez/1.0.0
+ trace: collect_build_prerequisites: begin tvz/1.0.0
+ trace: collect_build: add toz/0.2.0
+ trace: collect_build_prerequisites: cfg-postpone dependency toz/0.2.0 of dependent tvz/1.0.0
+ trace: postponed_configurations::add: create {tvz | toz->{tvz/1,1}}
+ trace: collect_build_prerequisites: postpone tvz/1.0.0
+ trace: collect_build_prerequisites: begin tez/1.0.0
+ trace: collect_build: add libbox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: create {tez | libbox->{tez/1,1}}
+ trace: collect_build_prerequisites: postpone tez/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {tvz | toz->{tvz/1,1}}
+ trace: collect_build_postponed (1): skip being built existing dependent tvz of dependency toz
+ trace: collect_build_postponed (1): cfg-negotiate begin {tvz | toz->{tvz/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin toz/0.2.0
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent toz/0.2.0
+ trace: postponed_configurations::add: create {toz | libfoo->{toz/1,1}}
+ trace: collect_build_prerequisites: postpone toz/0.2.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tvz/1.0.0
+ trace: collect_build_prerequisites: resume tvz/1.0.0
+ trace: collect_build_prerequisites: end tvz/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {tvz | toz->{tvz/1,1}}!
+ trace: collect_build_postponed (2): begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (2): cfg-negotiate begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbox/1.0.0
+ trace: collect_build_prerequisites: end libbox/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent tez/1.0.0
+ trace: collect_build_prerequisites: resume tez/1.0.0
+ trace: collect_build: postpone failure for dependent tez unsatisfied with dependency toz/0.2.0 (== 0.1.0)
+ trace: collect_build: pick toz/0.2.0 over toz/0.1.0
+ trace: collect_build_prerequisites: cfg-postpone dependency toz/0.2.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: add {tez 2,1: toz} to {tvz | toz->{tvz/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent tez/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency toz/0.2.0 of dependent tez/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: create {tez | libbar->{tez/3,1}}
+ trace: collect_build_prerequisites: postpone tez/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {tez | libbox->{tez/1,1}}!
+ trace: collect_build_postponed (3): begin {toz | libfoo->{toz/1,1}}
+ trace: collect_build_postponed (3): skip being built existing dependent toz of dependency libfoo
+ trace: collect_build_postponed (3): cfg-negotiate begin {toz | libfoo->{toz/1,1}}
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libfoo/1.0.0
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent toz/0.2.0
+ trace: collect_build_prerequisites: resume toz/0.2.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent toz/0.2.0
+ trace: postponed_configurations::add: add {toz 2,1: libbar} to {tez | libbar->{tez/3,1}}
+ trace: collect_build_prerequisites: postpone toz/0.2.0
+ trace: collect_build_postponed (3): cfg-negotiate end {toz | libfoo->{toz/1,1}}!
+ trace: collect_build_postponed (4): begin {tez toz | libbar->{tez/3,1 toz/2,1}}
+ trace: collect_build_postponed (4): skip being built existing dependent toz of dependency libbar
+ trace: collect_build_postponed (4): cfg-negotiate begin {tez toz | libbar->{tez/3,1 toz/2,1}}
+ trace: collect_build_postponed (4): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbar/1.0.0
+ trace: collect_build_postponed (4): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (4): select cfg-negotiated dependency alternative for dependent tez/1.0.0
+ trace: collect_build_prerequisites: resume tez/1.0.0
+ trace: collect_build_prerequisites: end tez/1.0.0
+ trace: collect_build_postponed (4): select cfg-negotiated dependency alternative for dependent toz/0.2.0
+ trace: collect_build_prerequisites: resume toz/0.2.0
+ trace: collect_build_prerequisites: end toz/0.2.0
+ trace: collect_build_postponed (4): cfg-negotiate end {tez toz | libbar->{tez/3,1 toz/2,1}}!
+ trace: collect_build_postponed (4): end {tez toz | libbar->{tez/3,1 toz/2,1}}
+ trace: collect_build_postponed (3): end {toz | libfoo->{toz/1,1}}
+ trace: collect_build_postponed (2): end {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (1): end {tvz | toz->{tvz/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: execute_plan: while configuring dependent tez in simulation mode unconstrain (toz == 0.1.0)
+ %.*
+ trace: pkg_build: try to replace unsatisfactory dependency toz/0.2.0 with some other version
+ trace: try_replace_dependent: try to replace unsatisfied dependent tez/1.0.0 of dependency toz/0.2.0 with some other version
+ trace: try_replace_dependency: replacement tez/0.1.0 tried earlier for same command line, skipping
+ trace: try_replace_dependent: try to replace conflicting dependent tvz/1.0.0 of dependency toz/0.2.0 with some other version
+ trace: try_replace_dependency: replace conflicting dependent version tvz/1.0.0 with 0.1.0 by adding constraint 'tvz' -> 'tvz == 0.1.0' on command line
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add tvz/0.1.0
+ trace: collect_build: add tez/1.0.0
+ trace: collect_build_prerequisites: begin tvz/0.1.0
+ trace: collect_build: add toz/0.2.0
+ trace: collect_build_prerequisites: cfg-postpone dependency toz/0.2.0 of dependent tvz/0.1.0
+ trace: postponed_configurations::add: create {tvz | toz->{tvz/1,1}}
+ trace: collect_build_prerequisites: postpone tvz/0.1.0
+ trace: collect_build_prerequisites: begin tez/1.0.0
+ trace: collect_build: add libbox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: create {tez | libbox->{tez/1,1}}
+ trace: collect_build_prerequisites: postpone tez/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {tvz | toz->{tvz/1,1}}
+ trace: collect_build_postponed (1): skip being built existing dependent tvz of dependency toz
+ trace: collect_build_postponed (1): cfg-negotiate begin {tvz | toz->{tvz/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin toz/0.2.0
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent toz/0.2.0
+ trace: postponed_configurations::add: create {toz | libfoo->{toz/1,1}}
+ trace: collect_build_prerequisites: postpone toz/0.2.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tvz/0.1.0
+ trace: collect_build_prerequisites: resume tvz/0.1.0
+ trace: collect_build: add bax/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency bax/1.0.0 of dependent tvz/0.1.0
+ trace: collect_build_prerequisites: begin bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent bax/1.0.0
+ trace: postponed_configurations::add: add {bax 1,1: libfoo} to {toz | libfoo->{toz/1,1}}
+ trace: collect_build_prerequisites: postpone bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent tvz/0.1.0
+ trace: postponed_configurations::add: add {tvz 3,1: libfoo} to {bax toz | libfoo->{bax/1,1 toz/1,1}}
+ trace: collect_build_prerequisites: postpone tvz/0.1.0
+ trace: collect_build_postponed (1): cfg-negotiate end {tvz | toz->{tvz/1,1}}!
+ trace: collect_build_postponed (2): begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (2): cfg-negotiate begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbox/1.0.0
+ trace: collect_build_prerequisites: end libbox/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent tez/1.0.0
+ trace: collect_build_prerequisites: resume tez/1.0.0
+ trace: collect_build: postpone failure for dependent tez unsatisfied with dependency toz/0.2.0 (== 0.1.0)
+ trace: collect_build: pick toz/0.2.0 over toz/0.1.0
+ trace: collect_build_prerequisites: cfg-postpone dependency toz/0.2.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: add {tez 2,1: toz} to {tvz | toz->{tvz/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent tez/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency toz/0.2.0 of dependent tez/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: create {tez | libbar->{tez/3,1}}
+ trace: collect_build_prerequisites: postpone tez/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {tez | libbox->{tez/1,1}}!
+ trace: collect_build_postponed (3): begin {bax toz tvz | libfoo->{bax/1,1 toz/1,1 tvz/3,1}}
+ trace: collect_build_postponed (3): skip being built existing dependent toz of dependency libfoo
+ trace: collect_build_postponed (3): cfg-negotiate begin {bax toz tvz | libfoo->{bax/1,1 toz/1,1 tvz/3,1}}
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libfoo/1.0.0
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent bax/1.0.0
+ trace: collect_build_prerequisites: resume bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bax/1.0.0
+ trace: postponed_configurations::add: add {bax 2,1: libbox libbar} to {tez | libbox->{tez/1,1}}!
+ trace: postponed_configurations::add: merge {tez | libbar->{tez/3,1}} into {bax tez | libbox->{bax/2,1 tez/1,1} libbar->{bax/2,1}}!
+ trace: collect_build_prerequisites: cfg-postponing dependent bax/1.0.0 merges non-negotiated and/or being negotiated configurations in and results in {bax tez | libbox->{bax/2,1 tez/1,1} libbar->{bax/2,1 tez/3,1}}!, throwing merge_configuration
+ trace: collect_build_postponed (1): cfg-negotiation of {tez | libbox->{tez/1,1}} failed due to non-negotiated clusters, force-merging based on shadow cluster {bax tez | libbox->{bax/2,1 tez/1,1} libbar->{bax/2,1 tez/3,1}}!
+ trace: collect_build_postponed (2): begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (2): cfg-negotiate begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbox/1.0.0
+ trace: collect_build_prerequisites: end libbox/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent tez/1.0.0
+ trace: collect_build_prerequisites: resume tez/1.0.0
+ trace: collect_build: postpone failure for dependent tez unsatisfied with dependency toz/0.2.0 (== 0.1.0)
+ trace: collect_build: pick toz/0.2.0 over toz/0.1.0
+ trace: collect_build_prerequisites: cfg-postpone dependency toz/0.2.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: add {tez 2,1: toz} to {tvz | toz->{tvz/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent tez/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency toz/0.2.0 of dependent tez/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: add {tez 3,1: libbar} to {tez | libbox->{tez/1,1}}? (shadow cluster-based)
+ trace: collect_build_prerequisites: skip being built existing dependent toz of dependency libbar
+ trace: collect_build_prerequisites: cfg-postponing dependent tez/1.0.0 involves (being) negotiated configurations and results in {tez | libbox->{tez/1,1} libbar->{tez/3,1}}?, throwing retry_configuration
+ trace: collect_build_postponed (1): cfg-negotiation of {tez | libbox->{tez/1,1}} failed due to dependent tez, refining configuration
+ trace: collect_build_postponed (2): begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (2): cfg-negotiate begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbox/1.0.0
+ trace: collect_build_prerequisites: end libbox/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent tez/1.0.0
+ trace: collect_build_prerequisites: resume tez/1.0.0
+ trace: collect_build: postpone failure for dependent tez unsatisfied with dependency toz/0.2.0 (== 0.1.0)
+ trace: collect_build: pick toz/0.2.0 over toz/0.1.0
+ trace: collect_build_prerequisites: cfg-postpone dependency toz/0.2.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: add {tez 2,1: toz} to {tvz | toz->{tvz/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent tez/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency toz/0.2.0 of dependent tez/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: add {tez 3,1: libbar} to {tez | libbox->{tez/1,1}}? (shadow cluster-based)
+ trace: collect_build_prerequisites: skip being built existing dependent toz of dependency libbar
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent tez/1.0.0 is shadow-negotiated
+ trace: collect_build_prerequisites: collecting cfg-postponed dependency libbar/1.0.0 of dependent tez/1.0.0
+ trace: collect_build_prerequisites: skip configured libbar/1.0.0
+ trace: collect_build_prerequisites: end tez/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {tez | libbox->{tez/1,1} libbar->{tez/3,1}}!
+ trace: collect_build_postponed (3): begin {bax toz tvz | libfoo->{bax/1,1 toz/1,1 tvz/3,1}}
+ trace: collect_build_postponed (3): skip being built existing dependent toz of dependency libfoo
+ trace: collect_build_postponed (3): cfg-negotiate begin {bax toz tvz | libfoo->{bax/1,1 toz/1,1 tvz/3,1}}
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libfoo/1.0.0
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent bax/1.0.0
+ trace: collect_build_prerequisites: resume bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bax/1.0.0
+ trace: postponed_configurations::add: add {bax 2,1: libbox libbar} to {tez | libbox->{tez/1,1} libbar->{tez/3,1}}! (shadow cluster-based)
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent bax/1.0.0 is shadow-negotiated
+ trace: collect_build_prerequisites: dependency libbox/1.0.0 of dependent bax/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent bax/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end bax/1.0.0
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent toz/0.2.0
+ trace: collect_build_prerequisites: resume toz/0.2.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent toz/0.2.0
+ trace: postponed_configurations::add: add {toz 2,1: libbar} to {bax tez | libbox->{bax/2,1 tez/1,1} libbar->{bax/2,1 tez/3,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent toz/0.2.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent toz/0.2.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end toz/0.2.0
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent tvz/0.1.0
+ trace: collect_build_prerequisites: resume tvz/0.1.0
+ trace: collect_build_prerequisites: end tvz/0.1.0
+ trace: collect_build_postponed (3): cfg-negotiate end {bax toz tvz | libfoo->{bax/1,1 toz/1,1 tvz/3,1}}!
+ trace: collect_build_postponed (3): end {bax toz tvz | libfoo->{bax/1,1 toz/1,1 tvz/3,1}}
+ trace: collect_build_postponed (2): end {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (1): end {tvz | toz->{tvz/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: execute_plan: while configuring dependent tez in simulation mode unconstrain (toz == 0.1.0)
+ %.*
+ trace: pkg_build: try to replace unsatisfactory dependency toz/0.2.0 with some other version
+ trace: try_replace_dependent: try to replace unsatisfied dependent tez/1.0.0 of dependency toz/0.2.0 with some other version
+ trace: try_replace_dependency: replacement tez/0.1.0 tried earlier for same command line, skipping
+ trace: try_replace_dependent: try to replace conflicting dependent tvz/0.1.0 of dependency toz/0.2.0 with some other version
+ trace: try_replace_dependency: replacement of conflicting dependent version tvz/0.1.0 is denied since it is specified on command line as 'tvz == 0.1.0'
+ trace: pkg_build: cannot replace any package, rolling back latest command line adjustment ('tvz' -> 'tvz == 0.1.0')
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add tvz/1.0.0
+ trace: collect_build: add tez/1.0.0
+ trace: collect_build_prerequisites: begin tvz/1.0.0
+ trace: collect_build: add toz/0.2.0
+ trace: collect_build_prerequisites: cfg-postpone dependency toz/0.2.0 of dependent tvz/1.0.0
+ trace: postponed_configurations::add: create {tvz | toz->{tvz/1,1}}
+ trace: collect_build_prerequisites: postpone tvz/1.0.0
+ trace: collect_build_prerequisites: begin tez/1.0.0
+ trace: collect_build: add libbox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: create {tez | libbox->{tez/1,1}}
+ trace: collect_build_prerequisites: postpone tez/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {tvz | toz->{tvz/1,1}}
+ trace: collect_build_postponed (1): skip being built existing dependent tvz of dependency toz
+ trace: collect_build_postponed (1): cfg-negotiate begin {tvz | toz->{tvz/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin toz/0.2.0
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent toz/0.2.0
+ trace: postponed_configurations::add: create {toz | libfoo->{toz/1,1}}
+ trace: collect_build_prerequisites: postpone toz/0.2.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tvz/1.0.0
+ trace: collect_build_prerequisites: resume tvz/1.0.0
+ trace: collect_build_prerequisites: end tvz/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {tvz | toz->{tvz/1,1}}!
+ trace: collect_build_postponed (2): begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (2): cfg-negotiate begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbox/1.0.0
+ trace: collect_build_prerequisites: end libbox/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent tez/1.0.0
+ trace: collect_build_prerequisites: resume tez/1.0.0
+ trace: collect_build: postpone failure for dependent tez unsatisfied with dependency toz/0.2.0 (== 0.1.0)
+ trace: collect_build: pick toz/0.2.0 over toz/0.1.0
+ trace: collect_build_prerequisites: cfg-postpone dependency toz/0.2.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: add {tez 2,1: toz} to {tvz | toz->{tvz/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent tez/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency toz/0.2.0 of dependent tez/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: create {tez | libbar->{tez/3,1}}
+ trace: collect_build_prerequisites: postpone tez/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {tez | libbox->{tez/1,1}}!
+ trace: collect_build_postponed (3): begin {toz | libfoo->{toz/1,1}}
+ trace: collect_build_postponed (3): skip being built existing dependent toz of dependency libfoo
+ trace: collect_build_postponed (3): cfg-negotiate begin {toz | libfoo->{toz/1,1}}
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libfoo/1.0.0
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent toz/0.2.0
+ trace: collect_build_prerequisites: resume toz/0.2.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent toz/0.2.0
+ trace: postponed_configurations::add: add {toz 2,1: libbar} to {tez | libbar->{tez/3,1}}
+ trace: collect_build_prerequisites: postpone toz/0.2.0
+ trace: collect_build_postponed (3): cfg-negotiate end {toz | libfoo->{toz/1,1}}!
+ trace: collect_build_postponed (4): begin {tez toz | libbar->{tez/3,1 toz/2,1}}
+ trace: collect_build_postponed (4): skip being built existing dependent toz of dependency libbar
+ trace: collect_build_postponed (4): cfg-negotiate begin {tez toz | libbar->{tez/3,1 toz/2,1}}
+ trace: collect_build_postponed (4): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbar/1.0.0
+ trace: collect_build_postponed (4): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (4): select cfg-negotiated dependency alternative for dependent tez/1.0.0
+ trace: collect_build_prerequisites: resume tez/1.0.0
+ trace: collect_build_prerequisites: end tez/1.0.0
+ trace: collect_build_postponed (4): select cfg-negotiated dependency alternative for dependent toz/0.2.0
+ trace: collect_build_prerequisites: resume toz/0.2.0
+ trace: collect_build_prerequisites: end toz/0.2.0
+ trace: collect_build_postponed (4): cfg-negotiate end {tez toz | libbar->{tez/3,1 toz/2,1}}!
+ trace: collect_build_postponed (4): end {tez toz | libbar->{tez/3,1 toz/2,1}}
+ trace: collect_build_postponed (3): end {toz | libfoo->{toz/1,1}}
+ trace: collect_build_postponed (2): end {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (1): end {tvz | toz->{tvz/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: execute_plan: while configuring dependent tez in simulation mode unconstrain (toz == 0.1.0)
+ %.*
+ trace: pkg_build: try to replace unsatisfactory dependency toz/0.2.0 with some other version
+ trace: try_replace_dependent: try to replace unsatisfied dependent tez/1.0.0 of dependency toz/0.2.0 with some other version
+ trace: try_replace_dependency: replacement tez/0.1.0 tried earlier for same command line, skipping
+ trace: try_replace_dependent: try to replace conflicting dependent tvz/1.0.0 of dependency toz/0.2.0 with some other version
+ trace: try_replace_dependency: replacement tvz/0.1.0 tried earlier for same command line, skipping
+ error: unable to satisfy constraints on package toz
+ info: tvz/1.0.0 depends on (toz == 0.2.0)
+ info: tez/1.0.0 depends on (toz == 0.1.0)
+ info: available toz/0.2.0
+ info: available toz/0.1.0
+ info: while satisfying tez/1.0.0
+ info: explicitly specify toz version to manually satisfy both constraints
+ %.*
+ EOE
+
+ $pkg_drop tvz
+ }
+
+ : restore-unsatisfied-depts
+ :
+ {
+ $clone_cfg;
+
+ # Dependencies:
+ #
+ # tez: depends: libbox(c)
+ # depends: toz == 0.1.0 (c)
+ # depends: libbar(c)
+ #
+ # toz/0.1.0:
+ #
+ # toz/0.2.0: depends: libfoo(c)
+ # depends: libbar(c)
+ #
+ # tvz: depends: toz == 0.2.0 (c)
+ # depends: bax
+ # depends: libfoo(c)
+ #
+ # bax: depends: libfoo(c)
+ # depends: {libbox libbar} (c)
+ #
+ $* tez 2>!;
+
+ $pkg_status -r >>EOO;
+ !tez configured 1.0.0
+ libbar configured 1.0.0
+ libbox configured 1.0.0
+ toz configured 0.1.0 available 1.0.0 0.2.0
+ EOO
+
+ $* tvz/0.1.0 tez +{ config.tvz.extras=true } 2>&1 != 0 | $filter 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add tvz/0.1.0
+ trace: collect_build: add tez/1.0.0
+ trace: collect_build_prerequisites: begin tvz/0.1.0
+ trace: collect_build: add toz/0.2.0
+ info: package tvz dependency on (toz == 0.2.0) is forcing upgrade of toz/0.1.0 to 0.2.0
+ trace: collect_build_prerequisites: cfg-postpone dependency toz/0.2.0 of dependent tvz/0.1.0
+ trace: postponed_configurations::add: create {tvz | toz->{tvz/1,1}}
+ trace: collect_build_prerequisites: postpone tvz/0.1.0
+ trace: collect_build_prerequisites: begin tez/1.0.0
+ trace: collect_build: add libbox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: create {tez | libbox->{tez/1,1}}
+ trace: collect_build_prerequisites: postpone tez/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {tvz | toz->{tvz/1,1}}
+ trace: collect_build_postponed (1): skip being built existing dependent tez of dependency toz
+ trace: collect_build_postponed (1): cfg-negotiate begin {tvz | toz->{tvz/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin toz/0.2.0
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent toz/0.2.0
+ trace: postponed_configurations::add: create {toz | libfoo->{toz/1,1}}
+ trace: collect_build_prerequisites: postpone toz/0.2.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tvz/0.1.0
+ trace: collect_build_prerequisites: resume tvz/0.1.0
+ trace: collect_build: add bax/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency bax/1.0.0 of dependent tvz/0.1.0
+ trace: collect_build_prerequisites: begin bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent bax/1.0.0
+ trace: postponed_configurations::add: add {bax 1,1: libfoo} to {toz | libfoo->{toz/1,1}}
+ trace: collect_build_prerequisites: postpone bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent tvz/0.1.0
+ trace: postponed_configurations::add: add {tvz 3,1: libfoo} to {bax toz | libfoo->{bax/1,1 toz/1,1}}
+ trace: collect_build_prerequisites: postpone tvz/0.1.0
+ trace: collect_build_postponed (1): cfg-negotiate end {tvz | toz->{tvz/1,1}}!
+ trace: collect_build_postponed (2): begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (2): skip being built existing dependent tez of dependency libbox
+ trace: collect_build_postponed (2): cfg-negotiate begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbox/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent tez/1.0.0
+ trace: collect_build_prerequisites: resume tez/1.0.0
+ trace: collect_build: postpone failure for dependent tez unsatisfied with dependency toz/0.2.0 (== 0.1.0)
+ trace: collect_build: pick toz/0.2.0 over toz/0.1.0
+ trace: collect_build_prerequisites: cfg-postpone dependency toz/0.2.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: add {tez 2,1: toz} to {tvz | toz->{tvz/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent tez/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency toz/0.2.0 of dependent tez/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: create {tez | libbar->{tez/3,1}}
+ trace: collect_build_prerequisites: postpone tez/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {tez | libbox->{tez/1,1}}!
+ trace: collect_build_postponed (3): begin {bax toz tvz | libfoo->{bax/1,1 toz/1,1 tvz/3,1}}
+ trace: collect_build_postponed (3): cfg-negotiate begin {bax toz tvz | libfoo->{bax/1,1 toz/1,1 tvz/3,1}}
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libfoo/1.0.0
+ trace: collect_build_prerequisites: end libfoo/1.0.0
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent bax/1.0.0
+ trace: collect_build_prerequisites: resume bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bax/1.0.0
+ trace: postponed_configurations::add: add {bax 2,1: libbox libbar} to {tez | libbox->{tez/1,1}}!
+ trace: postponed_configurations::add: merge {tez | libbar->{tez/3,1}} into {bax tez | libbox->{bax/2,1 tez/1,1} libbar->{bax/2,1}}!
+ trace: collect_build_prerequisites: cfg-postponing dependent bax/1.0.0 merges non-negotiated and/or being negotiated configurations in and results in {bax tez | libbox->{bax/2,1 tez/1,1} libbar->{bax/2,1 tez/3,1}}!, throwing merge_configuration
+ trace: collect_build_postponed (1): cfg-negotiation of {tez | libbox->{tez/1,1}} failed due to non-negotiated clusters, force-merging based on shadow cluster {bax tez | libbox->{bax/2,1 tez/1,1} libbar->{bax/2,1 tez/3,1}}!
+ trace: collect_build_postponed (2): begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (2): skip being built existing dependent tez of dependency libbox
+ trace: collect_build_postponed (2): cfg-negotiate begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbox/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent tez/1.0.0
+ trace: collect_build_prerequisites: resume tez/1.0.0
+ trace: collect_build: postpone failure for dependent tez unsatisfied with dependency toz/0.2.0 (== 0.1.0)
+ trace: collect_build: pick toz/0.2.0 over toz/0.1.0
+ trace: collect_build_prerequisites: cfg-postpone dependency toz/0.2.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: add {tez 2,1: toz} to {tvz | toz->{tvz/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent tez/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency toz/0.2.0 of dependent tez/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: add {tez 3,1: libbar} to {tez | libbox->{tez/1,1}}? (shadow cluster-based)
+ trace: collect_build_prerequisites: skip being built existing dependent tez of dependency libbar
+ trace: collect_build_prerequisites: cfg-postponing dependent tez/1.0.0 involves (being) negotiated configurations and results in {tez | libbox->{tez/1,1} libbar->{tez/3,1}}?, throwing retry_configuration
+ trace: collect_build_postponed (1): cfg-negotiation of {tez | libbox->{tez/1,1}} failed due to dependent tez, refining configuration
+ trace: collect_build_postponed (2): begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (2): skip being built existing dependent tez of dependency libbox
+ trace: collect_build_postponed (2): cfg-negotiate begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbox/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent tez/1.0.0
+ trace: collect_build_prerequisites: resume tez/1.0.0
+ trace: collect_build: postpone failure for dependent tez unsatisfied with dependency toz/0.2.0 (== 0.1.0)
+ trace: collect_build: pick toz/0.2.0 over toz/0.1.0
+ trace: collect_build_prerequisites: cfg-postpone dependency toz/0.2.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: add {tez 2,1: toz} to {tvz | toz->{tvz/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent tez/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency toz/0.2.0 of dependent tez/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: add {tez 3,1: libbar} to {tez | libbox->{tez/1,1}}? (shadow cluster-based)
+ trace: collect_build_prerequisites: skip being built existing dependent tez of dependency libbar
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent tez/1.0.0 is shadow-negotiated
+ trace: collect_build_prerequisites: collecting cfg-postponed dependency libbar/1.0.0 of dependent tez/1.0.0
+ trace: collect_build_prerequisites: skip configured libbar/1.0.0
+ trace: collect_build_prerequisites: end tez/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {tez | libbox->{tez/1,1} libbar->{tez/3,1}}!
+ trace: collect_build_postponed (3): begin {bax toz tvz | libfoo->{bax/1,1 toz/1,1 tvz/3,1}}
+ trace: collect_build_postponed (3): cfg-negotiate begin {bax toz tvz | libfoo->{bax/1,1 toz/1,1 tvz/3,1}}
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libfoo/1.0.0
+ trace: collect_build_prerequisites: end libfoo/1.0.0
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent bax/1.0.0
+ trace: collect_build_prerequisites: resume bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bax/1.0.0
+ trace: postponed_configurations::add: add {bax 2,1: libbox libbar} to {tez | libbox->{tez/1,1} libbar->{tez/3,1}}! (shadow cluster-based)
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent bax/1.0.0 is shadow-negotiated
+ trace: collect_build_prerequisites: dependency libbox/1.0.0 of dependent bax/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent bax/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end bax/1.0.0
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent toz/0.2.0
+ trace: collect_build_prerequisites: resume toz/0.2.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent toz/0.2.0
+ trace: postponed_configurations::add: add {toz 2,1: libbar} to {bax tez | libbox->{bax/2,1 tez/1,1} libbar->{bax/2,1 tez/3,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent toz/0.2.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent toz/0.2.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end toz/0.2.0
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent tvz/0.1.0
+ trace: collect_build_prerequisites: resume tvz/0.1.0
+ trace: collect_build_prerequisites: end tvz/0.1.0
+ trace: collect_build_postponed (3): cfg-negotiate end {bax toz tvz | libfoo->{bax/1,1 toz/1,1 tvz/3,1}}!
+ trace: collect_build_postponed (3): end {bax toz tvz | libfoo->{bax/1,1 toz/1,1 tvz/3,1}}
+ trace: collect_build_postponed (2): end {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (1): end {tvz | toz->{tvz/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: execute_plan: while configuring dependent tez in simulation mode unconstrain (toz == 0.1.0)
+ %.*
+ trace: pkg_build: try to replace unsatisfactory dependency toz/0.2.0 with some other version
+ trace: try_replace_dependent: try to replace unsatisfied dependent tez/1.0.0 of dependency toz/0.2.0 with some other version
+ trace: try_replace_dependency: replace unsatisfied dependent version tez/1.0.0 with 0.1.0 by adding constraint 'tez' -> 'tez == 0.1.0' on command line
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add tvz/0.1.0
+ trace: collect_build: add tez/0.1.0
+ trace: collect_build_prerequisites: begin tvz/0.1.0
+ trace: collect_build: add toz/0.2.0
+ info: package tvz dependency on (toz == 0.2.0) is forcing upgrade of toz/0.1.0 to 0.2.0
+ trace: collect_build_prerequisites: cfg-postpone dependency toz/0.2.0 of dependent tvz/0.1.0
+ trace: postponed_configurations::add: create {tvz | toz->{tvz/1,1}}
+ trace: collect_build_prerequisites: postpone tvz/0.1.0
+ trace: collect_build_prerequisites: begin tez/0.1.0
+ trace: collect_build: add libbox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent tez/0.1.0
+ trace: postponed_configurations::add: create {tez | libbox->{tez/1,1}}
+ trace: collect_build_prerequisites: postpone tez/0.1.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {tvz | toz->{tvz/1,1}}
+ trace: collect_build_postponed (1): skip being built existing dependent tez of dependency toz
+ trace: collect_build_postponed (1): cfg-negotiate begin {tvz | toz->{tvz/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin toz/0.2.0
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent toz/0.2.0
+ trace: postponed_configurations::add: create {toz | libfoo->{toz/1,1}}
+ trace: collect_build_prerequisites: postpone toz/0.2.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tvz/0.1.0
+ trace: collect_build_prerequisites: resume tvz/0.1.0
+ trace: collect_build: add bax/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency bax/1.0.0 of dependent tvz/0.1.0
+ trace: collect_build_prerequisites: begin bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent bax/1.0.0
+ trace: postponed_configurations::add: add {bax 1,1: libfoo} to {toz | libfoo->{toz/1,1}}
+ trace: collect_build_prerequisites: postpone bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent tvz/0.1.0
+ trace: postponed_configurations::add: add {tvz 3,1: libfoo} to {bax toz | libfoo->{bax/1,1 toz/1,1}}
+ trace: collect_build_prerequisites: postpone tvz/0.1.0
+ trace: collect_build_postponed (1): cfg-negotiate end {tvz | toz->{tvz/1,1}}!
+ trace: collect_build_postponed (2): begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (2): skip being built existing dependent tez of dependency libbox
+ trace: collect_build_postponed (2): cfg-negotiate begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbox/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent tez/0.1.0
+ trace: collect_build_prerequisites: resume tez/0.1.0
+ trace: collect_build: postpone failure for dependent tez unsatisfied with dependency toz/0.2.0 (== 0.1.0)
+ trace: collect_build: pick toz/0.2.0 over toz/0.1.0
+ trace: collect_build_prerequisites: cfg-postpone dependency toz/0.2.0 of dependent tez/0.1.0
+ trace: postponed_configurations::add: add {tez 2,1: toz} to {tvz | toz->{tvz/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent tez/0.1.0 is negotiated
+ trace: collect_build_prerequisites: dependency toz/0.2.0 of dependent tez/0.1.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end tez/0.1.0
+ trace: collect_build_postponed (2): cfg-negotiate end {tez | libbox->{tez/1,1}}!
+ trace: collect_build_postponed (3): begin {bax toz tvz | libfoo->{bax/1,1 toz/1,1 tvz/3,1}}
+ trace: collect_build_postponed (3): cfg-negotiate begin {bax toz tvz | libfoo->{bax/1,1 toz/1,1 tvz/3,1}}
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libfoo/1.0.0
+ trace: collect_build_prerequisites: end libfoo/1.0.0
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent bax/1.0.0
+ trace: collect_build_prerequisites: resume bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent bax/1.0.0
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bax/1.0.0
+ trace: postponed_configurations::add: add {bax 2,1: libbox libbar} to {tez | libbox->{tez/1,1}}!
+ trace: collect_build_prerequisites: skip being built existing dependent tez of dependency libbar
+ trace: collect_build_prerequisites: cfg-postponing dependent bax/1.0.0 involves (being) negotiated configurations and results in {bax tez | libbox->{bax/2,1 tez/1,1} libbar->{bax/2,1}}!, throwing retry_configuration
+ trace: collect_build_postponed (1): cfg-negotiation of {tez | libbox->{tez/1,1}} failed due to dependent bax, refining configuration
+ trace: collect_build_postponed (2): begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (2): skip being built existing dependent tez of dependency libbox
+ trace: collect_build_postponed (2): cfg-negotiate begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbox/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent tez/0.1.0
+ trace: collect_build_prerequisites: resume tez/0.1.0
+ trace: collect_build: postpone failure for dependent tez unsatisfied with dependency toz/0.2.0 (== 0.1.0)
+ trace: collect_build: pick toz/0.2.0 over toz/0.1.0
+ trace: collect_build_prerequisites: cfg-postpone dependency toz/0.2.0 of dependent tez/0.1.0
+ trace: postponed_configurations::add: add {tez 2,1: toz} to {tvz | toz->{tvz/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent tez/0.1.0 is negotiated
+ trace: collect_build_prerequisites: dependency toz/0.2.0 of dependent tez/0.1.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end tez/0.1.0
+ trace: collect_build_postponed (2): cfg-negotiate end {tez | libbox->{tez/1,1}}!
+ trace: collect_build_postponed (3): begin {bax toz tvz | libfoo->{bax/1,1 toz/1,1 tvz/3,1}}
+ trace: collect_build_postponed (3): cfg-negotiate begin {bax toz tvz | libfoo->{bax/1,1 toz/1,1 tvz/3,1}}
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libfoo/1.0.0
+ trace: collect_build_prerequisites: end libfoo/1.0.0
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent bax/1.0.0
+ trace: collect_build_prerequisites: resume bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent bax/1.0.0
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bax/1.0.0
+ trace: postponed_configurations::add: add {bax 2,1: libbox libbar} to {tez | libbox->{tez/1,1}}!
+ trace: collect_build_prerequisites: skip being built existing dependent tez of dependency libbar
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent bax/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbox/1.0.0 of dependent bax/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: collecting cfg-postponed dependency libbar/1.0.0 of dependent bax/1.0.0
+ trace: collect_build_prerequisites: skip configured libbar/1.0.0
+ trace: collect_build_prerequisites: end bax/1.0.0
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent toz/0.2.0
+ trace: collect_build_prerequisites: resume toz/0.2.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent toz/0.2.0
+ trace: postponed_configurations::add: add {toz 2,1: libbar} to {bax tez | libbox->{bax/2,1 tez/1,1} libbar->{bax/2,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent toz/0.2.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent toz/0.2.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end toz/0.2.0
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent tvz/0.1.0
+ trace: collect_build_prerequisites: resume tvz/0.1.0
+ trace: collect_build_prerequisites: end tvz/0.1.0
+ trace: collect_build_postponed (3): cfg-negotiate end {bax toz tvz | libfoo->{bax/1,1 toz/1,1 tvz/3,1}}!
+ trace: collect_build_postponed (3): end {bax toz tvz | libfoo->{bax/1,1 toz/1,1 tvz/3,1}}
+ trace: collect_build_postponed (2): end {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (1): end {tvz | toz->{tvz/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: execute_plan: while configuring dependent tez in simulation mode unconstrain (toz == 0.1.0)
+ %.*
+ trace: pkg_build: try to replace unsatisfactory dependency toz/0.2.0 with some other version
+ trace: try_replace_dependent: try to replace unsatisfied dependent tez/0.1.0 of dependency toz/0.2.0 with some other version
+ trace: try_replace_dependency: replacement of unsatisfied dependent version tez/0.1.0 is denied since it is specified on command line as 'tez == 0.1.0'
+ trace: try_replace_dependent: try to replace conflicting dependent tvz/0.1.0 of dependency toz/0.2.0 with some other version
+ trace: try_replace_dependency: replacement of conflicting dependent version tvz/0.1.0 is denied since it is specified on command line as 'tvz == 0.1.0'
+ trace: pkg_build: cannot replace any package, rolling back latest command line adjustment ('tez' -> 'tez == 0.1.0')
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add tvz/0.1.0
+ trace: collect_build: add tez/1.0.0
+ trace: collect_build_prerequisites: begin tvz/0.1.0
+ trace: collect_build: add toz/0.2.0
+ info: package tvz dependency on (toz == 0.2.0) is forcing upgrade of toz/0.1.0 to 0.2.0
+ trace: collect_build_prerequisites: cfg-postpone dependency toz/0.2.0 of dependent tvz/0.1.0
+ trace: postponed_configurations::add: create {tvz | toz->{tvz/1,1}}
+ trace: collect_build_prerequisites: postpone tvz/0.1.0
+ trace: collect_build_prerequisites: begin tez/1.0.0
+ trace: collect_build: add libbox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: create {tez | libbox->{tez/1,1}}
+ trace: collect_build_prerequisites: postpone tez/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {tvz | toz->{tvz/1,1}}
+ trace: collect_build_postponed (1): skip being built existing dependent tez of dependency toz
+ trace: collect_build_postponed (1): cfg-negotiate begin {tvz | toz->{tvz/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin toz/0.2.0
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent toz/0.2.0
+ trace: postponed_configurations::add: create {toz | libfoo->{toz/1,1}}
+ trace: collect_build_prerequisites: postpone toz/0.2.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tvz/0.1.0
+ trace: collect_build_prerequisites: resume tvz/0.1.0
+ trace: collect_build: add bax/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency bax/1.0.0 of dependent tvz/0.1.0
+ trace: collect_build_prerequisites: begin bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent bax/1.0.0
+ trace: postponed_configurations::add: add {bax 1,1: libfoo} to {toz | libfoo->{toz/1,1}}
+ trace: collect_build_prerequisites: postpone bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent tvz/0.1.0
+ trace: postponed_configurations::add: add {tvz 3,1: libfoo} to {bax toz | libfoo->{bax/1,1 toz/1,1}}
+ trace: collect_build_prerequisites: postpone tvz/0.1.0
+ trace: collect_build_postponed (1): cfg-negotiate end {tvz | toz->{tvz/1,1}}!
+ trace: collect_build_postponed (2): begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (2): skip being built existing dependent tez of dependency libbox
+ trace: collect_build_postponed (2): cfg-negotiate begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbox/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent tez/1.0.0
+ trace: collect_build_prerequisites: resume tez/1.0.0
+ trace: collect_build: postpone failure for dependent tez unsatisfied with dependency toz/0.2.0 (== 0.1.0)
+ trace: collect_build: pick toz/0.2.0 over toz/0.1.0
+ trace: collect_build_prerequisites: cfg-postpone dependency toz/0.2.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: add {tez 2,1: toz} to {tvz | toz->{tvz/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent tez/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency toz/0.2.0 of dependent tez/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: create {tez | libbar->{tez/3,1}}
+ trace: collect_build_prerequisites: postpone tez/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {tez | libbox->{tez/1,1}}!
+ trace: collect_build_postponed (3): begin {bax toz tvz | libfoo->{bax/1,1 toz/1,1 tvz/3,1}}
+ trace: collect_build_postponed (3): cfg-negotiate begin {bax toz tvz | libfoo->{bax/1,1 toz/1,1 tvz/3,1}}
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libfoo/1.0.0
+ trace: collect_build_prerequisites: end libfoo/1.0.0
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent bax/1.0.0
+ trace: collect_build_prerequisites: resume bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bax/1.0.0
+ trace: postponed_configurations::add: add {bax 2,1: libbox libbar} to {tez | libbox->{tez/1,1}}!
+ trace: postponed_configurations::add: merge {tez | libbar->{tez/3,1}} into {bax tez | libbox->{bax/2,1 tez/1,1} libbar->{bax/2,1}}!
+ trace: collect_build_prerequisites: cfg-postponing dependent bax/1.0.0 merges non-negotiated and/or being negotiated configurations in and results in {bax tez | libbox->{bax/2,1 tez/1,1} libbar->{bax/2,1 tez/3,1}}!, throwing merge_configuration
+ trace: collect_build_postponed (1): cfg-negotiation of {tez | libbox->{tez/1,1}} failed due to non-negotiated clusters, force-merging based on shadow cluster {bax tez | libbox->{bax/2,1 tez/1,1} libbar->{bax/2,1 tez/3,1}}!
+ trace: collect_build_postponed (2): begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (2): skip being built existing dependent tez of dependency libbox
+ trace: collect_build_postponed (2): cfg-negotiate begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbox/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent tez/1.0.0
+ trace: collect_build_prerequisites: resume tez/1.0.0
+ trace: collect_build: postpone failure for dependent tez unsatisfied with dependency toz/0.2.0 (== 0.1.0)
+ trace: collect_build: pick toz/0.2.0 over toz/0.1.0
+ trace: collect_build_prerequisites: cfg-postpone dependency toz/0.2.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: add {tez 2,1: toz} to {tvz | toz->{tvz/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent tez/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency toz/0.2.0 of dependent tez/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: add {tez 3,1: libbar} to {tez | libbox->{tez/1,1}}? (shadow cluster-based)
+ trace: collect_build_prerequisites: skip being built existing dependent tez of dependency libbar
+ trace: collect_build_prerequisites: cfg-postponing dependent tez/1.0.0 involves (being) negotiated configurations and results in {tez | libbox->{tez/1,1} libbar->{tez/3,1}}?, throwing retry_configuration
+ trace: collect_build_postponed (1): cfg-negotiation of {tez | libbox->{tez/1,1}} failed due to dependent tez, refining configuration
+ trace: collect_build_postponed (2): begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (2): skip being built existing dependent tez of dependency libbox
+ trace: collect_build_postponed (2): cfg-negotiate begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbox/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent tez/1.0.0
+ trace: collect_build_prerequisites: resume tez/1.0.0
+ trace: collect_build: postpone failure for dependent tez unsatisfied with dependency toz/0.2.0 (== 0.1.0)
+ trace: collect_build: pick toz/0.2.0 over toz/0.1.0
+ trace: collect_build_prerequisites: cfg-postpone dependency toz/0.2.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: add {tez 2,1: toz} to {tvz | toz->{tvz/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent tez/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency toz/0.2.0 of dependent tez/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: add {tez 3,1: libbar} to {tez | libbox->{tez/1,1}}? (shadow cluster-based)
+ trace: collect_build_prerequisites: skip being built existing dependent tez of dependency libbar
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent tez/1.0.0 is shadow-negotiated
+ trace: collect_build_prerequisites: collecting cfg-postponed dependency libbar/1.0.0 of dependent tez/1.0.0
+ trace: collect_build_prerequisites: skip configured libbar/1.0.0
+ trace: collect_build_prerequisites: end tez/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {tez | libbox->{tez/1,1} libbar->{tez/3,1}}!
+ trace: collect_build_postponed (3): begin {bax toz tvz | libfoo->{bax/1,1 toz/1,1 tvz/3,1}}
+ trace: collect_build_postponed (3): cfg-negotiate begin {bax toz tvz | libfoo->{bax/1,1 toz/1,1 tvz/3,1}}
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libfoo/1.0.0
+ trace: collect_build_prerequisites: end libfoo/1.0.0
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent bax/1.0.0
+ trace: collect_build_prerequisites: resume bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bax/1.0.0
+ trace: postponed_configurations::add: add {bax 2,1: libbox libbar} to {tez | libbox->{tez/1,1} libbar->{tez/3,1}}! (shadow cluster-based)
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent bax/1.0.0 is shadow-negotiated
+ trace: collect_build_prerequisites: dependency libbox/1.0.0 of dependent bax/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent bax/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end bax/1.0.0
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent toz/0.2.0
+ trace: collect_build_prerequisites: resume toz/0.2.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent toz/0.2.0
+ trace: postponed_configurations::add: add {toz 2,1: libbar} to {bax tez | libbox->{bax/2,1 tez/1,1} libbar->{bax/2,1 tez/3,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent toz/0.2.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent toz/0.2.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end toz/0.2.0
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent tvz/0.1.0
+ trace: collect_build_prerequisites: resume tvz/0.1.0
+ trace: collect_build_prerequisites: end tvz/0.1.0
+ trace: collect_build_postponed (3): cfg-negotiate end {bax toz tvz | libfoo->{bax/1,1 toz/1,1 tvz/3,1}}!
+ trace: collect_build_postponed (3): end {bax toz tvz | libfoo->{bax/1,1 toz/1,1 tvz/3,1}}
+ trace: collect_build_postponed (2): end {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (1): end {tvz | toz->{tvz/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: execute_plan: while configuring dependent tez in simulation mode unconstrain (toz == 0.1.0)
+ %.*
+ trace: pkg_build: try to replace unsatisfactory dependency toz/0.2.0 with some other version
+ trace: try_replace_dependent: try to replace unsatisfied dependent tez/1.0.0 of dependency toz/0.2.0 with some other version
+ trace: try_replace_dependency: replacement tez/0.1.0 tried earlier for same command line, skipping
+ trace: try_replace_dependent: try to replace conflicting dependent tvz/0.1.0 of dependency toz/0.2.0 with some other version
+ trace: try_replace_dependency: replacement of conflicting dependent version tvz/0.1.0 is denied since it is specified on command line as 'tvz == 0.1.0'
+ error: unable to satisfy constraints on package toz
+ info: tvz/0.1.0 depends on (toz == 0.2.0)
+ command line requires (tvz == 0.1.0)
+ info: tez/1.0.0 depends on (toz == 0.1.0)
+ info: available toz/0.2.0
+ info: available toz/0.1.0
+ info: while satisfying tez/1.0.0
+ info: explicitly specify toz version to manually satisfy both constraints
+ %.*
+ EOE
+
+ $pkg_drop tez
+ }
+ }
+
+ : cycle
+ :
+ {
+ +$clone_cfg
+
+ : direct
+ :
+ {
+ +$clone_cfg
+
+ : args-tex-tix
+ :
+ {
+ $clone_cfg;
+
+ # Dependencies:
+ #
+ # tex: depends: libbar(c)
+ # depends: libfoo(c)
+ #
+ # tix: depends: libbar(c)
+ # depends: tex(c)
+ #
+ $* tex tix 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ %.*
+ trace: collect_build: add tex/1.0.0
+ trace: collect_build: add tix/1.0.0
+ trace: collect_build_prerequisites: begin tex/1.0.0
+ %.*
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tex/1.0.0
+ trace: postponed_configurations::add: create {tex | libbar->{tex/1,1}}
+ trace: collect_build_prerequisites: postpone tex/1.0.0
+ trace: collect_build_prerequisites: begin tix/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tix/1.0.0
+ trace: postponed_configurations::add: add {tix 1,1: libbar} to {tex | libbar->{tex/1,1}}
+ trace: collect_build_prerequisites: postpone tix/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {tex tix | libbar->{tex/1,1 tix/1,1}}
+ %.*
+ trace: collect_build_postponed (1): cfg-negotiate begin {tex tix | libbar->{tex/1,1 tix/1,1}}
+ %.*
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbar/1.0.0
+ trace: collect_build_prerequisites: end libbar/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tex/1.0.0
+ trace: collect_build_prerequisites: resume tex/1.0.0
+ %.*
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent tex/1.0.0
+ trace: postponed_configurations::add: create {tex | libfoo->{tex/2,1}}
+ trace: collect_build_prerequisites: postpone tex/1.0.0
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tix/1.0.0
+ trace: collect_build_prerequisites: resume tix/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cannot cfg-postpone dependency tex/1.0.0 of dependent tix/1.0.0 (collected prematurely), throwing postpone_dependency
+ trace: pkg_build: collection failed due to prematurely collected dependency (tex), retry from scratch
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ %.*
+ trace: collect_build: add tex/1.0.0
+ trace: collect_build: add tix/1.0.0
+ trace: pkg_build: dep-postpone user-specified tex
+ trace: collect_build_prerequisites: begin tix/1.0.0
+ %.*
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tix/1.0.0
+ trace: postponed_configurations::add: create {tix | libbar->{tix/1,1}}
+ trace: collect_build_prerequisites: postpone tix/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {tix | libbar->{tix/1,1}}
+ %.*
+ trace: collect_build_postponed (1): cfg-negotiate begin {tix | libbar->{tix/1,1}}
+ %.*
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbar/1.0.0
+ trace: collect_build_prerequisites: end libbar/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tix/1.0.0
+ trace: collect_build_prerequisites: resume tix/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency tex/1.0.0 of dependent tix/1.0.0
+ trace: postponed_configurations::add: create {tix | tex->{tix/2,1}}
+ trace: collect_build_prerequisites: postpone tix/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {tix | libbar->{tix/1,1}}!
+ trace: collect_build_postponed (2): begin {tix | tex->{tix/2,1}}
+ %.*
+ trace: collect_build_postponed (2): cfg-negotiate begin {tix | tex->{tix/2,1}}
+ %.*
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin tex/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tex/1.0.0
+ trace: postponed_configurations::add: add {tex 1,1: libbar} to {tix | libbar->{tix/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent tex/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent tex/1.0.0 is already (being) recursively collected, skipping
+ %.*
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent tex/1.0.0
+ trace: postponed_configurations::add: create {tex | libfoo->{tex/2,1}}
+ trace: collect_build_prerequisites: postpone tex/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent tix/1.0.0
+ trace: collect_build_prerequisites: resume tix/1.0.0
+ trace: collect_build_prerequisites: end tix/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {tix | tex->{tix/2,1}}!
+ trace: collect_build_postponed (3): begin {tex | libfoo->{tex/2,1}}
+ %.*
+ trace: collect_build_postponed (3): cfg-negotiate begin {tex | libfoo->{tex/2,1}}
+ %.*
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libfoo/1.0.0
+ trace: collect_build_prerequisites: end libfoo/1.0.0
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent tex/1.0.0
+ trace: collect_build_prerequisites: resume tex/1.0.0
+ trace: collect_build_prerequisites: end tex/1.0.0
+ trace: collect_build_postponed (3): cfg-negotiate end {tex | libfoo->{tex/2,1}}!
+ trace: collect_build_postponed (3): end {tex | libfoo->{tex/2,1}}
+ trace: collect_build_postponed (2): end {tix | tex->{tix/2,1}}
+ trace: collect_build_postponed (1): end {tix | libbar->{tix/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !tex configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured 1.0.0
+ !tix configured 1.0.0
+ libbar configured 1.0.0
+ !tex configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured 1.0.0
+ EOO
+
+ $pkg_drop tex tix
+ }
+
+ : args-tix
+ :
+ : As above but with the different command-line arguments which results
+ : in the different cluster list at the moment of the cycle detection.
+ :
+ {
+ $clone_cfg;
+
+ # Dependencies:
+ #
+ # tex: depends: libbar(c)
+ # depends: libfoo(c)
+ #
+ # tix: depends: libbar(c)
+ # depends: tex(c)
+ #
+ $* tix 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ %.*
+ trace: collect_build: add tix/1.0.0
+ trace: collect_build_prerequisites: begin tix/1.0.0
+ %.*
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tix/1.0.0
+ trace: postponed_configurations::add: create {tix | libbar->{tix/1,1}}
+ trace: collect_build_prerequisites: postpone tix/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {tix | libbar->{tix/1,1}}
+ %.*
+ trace: collect_build_postponed (1): cfg-negotiate begin {tix | libbar->{tix/1,1}}
+ %.*
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbar/1.0.0
+ trace: collect_build_prerequisites: end libbar/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tix/1.0.0
+ trace: collect_build_prerequisites: resume tix/1.0.0
+ %.*
+ trace: collect_build: add tex/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency tex/1.0.0 of dependent tix/1.0.0
+ trace: postponed_configurations::add: create {tix | tex->{tix/2,1}}
+ trace: collect_build_prerequisites: postpone tix/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {tix | libbar->{tix/1,1}}!
+ trace: collect_build_postponed (2): begin {tix | tex->{tix/2,1}}
+ %.*
+ trace: collect_build_postponed (2): cfg-negotiate begin {tix | tex->{tix/2,1}}
+ %.*
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin tex/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tex/1.0.0
+ trace: postponed_configurations::add: add {tex 1,1: libbar} to {tix | libbar->{tix/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent tex/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent tex/1.0.0 is already (being) recursively collected, skipping
+ %.*
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent tex/1.0.0
+ trace: postponed_configurations::add: create {tex | libfoo->{tex/2,1}}
+ trace: collect_build_prerequisites: postpone tex/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent tix/1.0.0
+ trace: collect_build_prerequisites: resume tix/1.0.0
+ trace: collect_build_prerequisites: end tix/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {tix | tex->{tix/2,1}}!
+ trace: collect_build_postponed (3): begin {tex | libfoo->{tex/2,1}}
+ %.*
+ trace: collect_build_postponed (3): cfg-negotiate begin {tex | libfoo->{tex/2,1}}
+ %.*
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libfoo/1.0.0
+ trace: collect_build_prerequisites: end libfoo/1.0.0
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent tex/1.0.0
+ trace: collect_build_prerequisites: resume tex/1.0.0
+ trace: collect_build_prerequisites: end tex/1.0.0
+ trace: collect_build_postponed (3): cfg-negotiate end {tex | libfoo->{tex/2,1}}!
+ trace: collect_build_postponed (3): end {tex | libfoo->{tex/2,1}}
+ trace: collect_build_postponed (2): end {tix | tex->{tix/2,1}}
+ trace: collect_build_postponed (1): end {tix | libbar->{tix/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !tix configured 1.0.0
+ libbar configured 1.0.0
+ tex configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured 1.0.0
+ EOO
+
+ $pkg_drop tix
+ }
+
+ : args-tex-tix-tux
+ :
+ : Here tux requires tix/0.1.0 which has no dependencies.
+ :
+ {
+ $clone_cfg;
+
+ $* tex tix tux 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ %.*
+ trace: collect_build: add tex/1.0.0
+ trace: collect_build: add tix/1.0.0
+ trace: collect_build: add tux/1.0.0
+ trace: collect_build_prerequisites: begin tex/1.0.0
+ %.*
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tex/1.0.0
+ trace: postponed_configurations::add: create {tex | libbar->{tex/1,1}}
+ trace: collect_build_prerequisites: postpone tex/1.0.0
+ trace: collect_build_prerequisites: begin tix/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tix/1.0.0
+ trace: postponed_configurations::add: add {tix 1,1: libbar} to {tex | libbar->{tex/1,1}}
+ trace: collect_build_prerequisites: postpone tix/1.0.0
+ trace: collect_build_prerequisites: begin tux/1.0.0
+ %.*
+ trace: collect_build: add libbox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent tux/1.0.0
+ trace: postponed_configurations::add: create {tux | libbox->{tux/1,1}}
+ trace: collect_build_prerequisites: postpone tux/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {tex tix | libbar->{tex/1,1 tix/1,1}}
+ %.*
+ trace: collect_build_postponed (1): cfg-negotiate begin {tex tix | libbar->{tex/1,1 tix/1,1}}
+ %.*
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbar/1.0.0
+ trace: collect_build_prerequisites: end libbar/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tex/1.0.0
+ trace: collect_build_prerequisites: resume tex/1.0.0
+ %.*
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent tex/1.0.0
+ trace: postponed_configurations::add: create {tex | libfoo->{tex/2,1}}
+ trace: collect_build_prerequisites: postpone tex/1.0.0
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tix/1.0.0
+ trace: collect_build_prerequisites: resume tix/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cannot cfg-postpone dependency tex/1.0.0 of dependent tix/1.0.0 (collected prematurely), throwing postpone_dependency
+ trace: pkg_build: collection failed due to prematurely collected dependency (tex), retry from scratch
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ %.*
+ trace: collect_build: add tex/1.0.0
+ trace: collect_build: add tix/1.0.0
+ trace: collect_build: add tux/1.0.0
+ trace: pkg_build: dep-postpone user-specified tex
+ trace: collect_build_prerequisites: begin tix/1.0.0
+ %.*
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tix/1.0.0
+ trace: postponed_configurations::add: create {tix | libbar->{tix/1,1}}
+ trace: collect_build_prerequisites: postpone tix/1.0.0
+ trace: collect_build_prerequisites: begin tux/1.0.0
+ %.*
+ trace: collect_build: add libbox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent tux/1.0.0
+ trace: postponed_configurations::add: create {tux | libbox->{tux/1,1}}
+ trace: collect_build_prerequisites: postpone tux/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {tix | libbar->{tix/1,1}}
+ %.*
+ trace: collect_build_postponed (1): cfg-negotiate begin {tix | libbar->{tix/1,1}}
+ %.*
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbar/1.0.0
+ trace: collect_build_prerequisites: end libbar/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tix/1.0.0
+ trace: collect_build_prerequisites: resume tix/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency tex/1.0.0 of dependent tix/1.0.0
+ trace: postponed_configurations::add: create {tix | tex->{tix/2,1}}
+ trace: collect_build_prerequisites: postpone tix/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {tix | libbar->{tix/1,1}}!
+ trace: collect_build_postponed (2): begin {tux | libbox->{tux/1,1}}
+ %.*
+ trace: collect_build_postponed (2): cfg-negotiate begin {tux | libbox->{tux/1,1}}
+ %.*
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbox/1.0.0
+ trace: collect_build_prerequisites: end libbox/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent tux/1.0.0
+ trace: collect_build_prerequisites: resume tux/1.0.0
+ %.*
+ trace: collect_build: pick tix/0.1.0 over tix/1.0.0
+ trace: collect_build: tix/1.0.0 package version needs to be replaced with tix/0.1.0
+ trace: pkg_build: collection failed due to package version replacement, retry from scratch
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ %.*
+ trace: collect_build: add tex/1.0.0
+ trace: collect_build: apply version replacement for tix/1.0.0
+ trace: collect_build: replacement: tix/0.1.0
+ trace: collect_build: add tix/0.1.0
+ trace: collect_build: add tux/1.0.0
+ trace: pkg_build: dep-postpone user-specified tex
+ trace: collect_build_prerequisites: begin tix/0.1.0
+ trace: collect_build_prerequisites: end tix/0.1.0
+ trace: collect_build_prerequisites: begin tux/1.0.0
+ %.*
+ trace: collect_build: add libbox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent tux/1.0.0
+ trace: postponed_configurations::add: create {tux | libbox->{tux/1,1}}
+ trace: collect_build_prerequisites: postpone tux/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {tux | libbox->{tux/1,1}}
+ %.*
+ trace: collect_build_postponed (1): cfg-negotiate begin {tux | libbox->{tux/1,1}}
+ %.*
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbox/1.0.0
+ trace: collect_build_prerequisites: end libbox/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tux/1.0.0
+ trace: collect_build_prerequisites: resume tux/1.0.0
+ %.*
+ trace: collect_build_prerequisites: no cfg-clause for dependency tix/0.1.0 of dependent tux/1.0.0
+ trace: collect_build_prerequisites: end tux/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {tux | libbox->{tux/1,1}}!
+ %.*
+ trace: collect_build_postponed (1): erase bogus postponement tex
+ trace: collect_build_postponed (1): bogus postponements erased, throwing
+ trace: pkg_build: collection failed due to bogus dependency collection postponement cancellation, retry from scratch
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ %.*
+ trace: collect_build: add tex/1.0.0
+ trace: collect_build: apply version replacement for tix/1.0.0
+ trace: collect_build: replacement: tix/0.1.0
+ trace: collect_build: add tix/0.1.0
+ trace: collect_build: add tux/1.0.0
+ trace: collect_build_prerequisites: begin tex/1.0.0
+ %.*
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tex/1.0.0
+ trace: postponed_configurations::add: create {tex | libbar->{tex/1,1}}
+ trace: collect_build_prerequisites: postpone tex/1.0.0
+ trace: collect_build_prerequisites: begin tix/0.1.0
+ trace: collect_build_prerequisites: end tix/0.1.0
+ trace: collect_build_prerequisites: begin tux/1.0.0
+ %.*
+ trace: collect_build: add libbox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent tux/1.0.0
+ trace: postponed_configurations::add: create {tux | libbox->{tux/1,1}}
+ trace: collect_build_prerequisites: postpone tux/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {tex | libbar->{tex/1,1}}
+ %.*
+ trace: collect_build_postponed (1): cfg-negotiate begin {tex | libbar->{tex/1,1}}
+ %.*
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbar/1.0.0
+ trace: collect_build_prerequisites: end libbar/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tex/1.0.0
+ trace: collect_build_prerequisites: resume tex/1.0.0
+ %.*
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent tex/1.0.0
+ trace: postponed_configurations::add: create {tex | libfoo->{tex/2,1}}
+ trace: collect_build_prerequisites: postpone tex/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {tex | libbar->{tex/1,1}}!
+ trace: collect_build_postponed (2): begin {tux | libbox->{tux/1,1}}
+ %.*
+ trace: collect_build_postponed (2): cfg-negotiate begin {tux | libbox->{tux/1,1}}
+ %.*
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbox/1.0.0
+ trace: collect_build_prerequisites: end libbox/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent tux/1.0.0
+ trace: collect_build_prerequisites: resume tux/1.0.0
+ %.*
+ trace: collect_build_prerequisites: no cfg-clause for dependency tix/0.1.0 of dependent tux/1.0.0
+ trace: collect_build_prerequisites: end tux/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {tux | libbox->{tux/1,1}}!
+ trace: collect_build_postponed (3): begin {tex | libfoo->{tex/2,1}}
+ %.*
+ trace: collect_build_postponed (3): cfg-negotiate begin {tex | libfoo->{tex/2,1}}
+ %.*
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libfoo/1.0.0
+ trace: collect_build_prerequisites: end libfoo/1.0.0
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent tex/1.0.0
+ trace: collect_build_prerequisites: resume tex/1.0.0
+ trace: collect_build_prerequisites: end tex/1.0.0
+ trace: collect_build_postponed (3): cfg-negotiate end {tex | libfoo->{tex/2,1}}!
+ trace: collect_build_postponed (3): end {tex | libfoo->{tex/2,1}}
+ trace: collect_build_postponed (2): end {tux | libbox->{tux/1,1}}
+ trace: collect_build_postponed (1): end {tex | libbar->{tex/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !tex configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured 1.0.0
+ !tix configured 0.1.0 available 1.0.0
+ !tux configured 1.0.0
+ libbox configured 1.0.0
+ !tix configured 0.1.0 available 1.0.0
+ EOO
+
+ $pkg_drop tex tix tux
+ }
+
+ : args-tex-tiz
+ :
+ : Note that tiz is a correct version of tix, which fixes the
+ : configuration cycle.
+ :
+ {
+ $clone_cfg;
+
+ # Dependencies:
+ #
+ # tex: depends: libbar(c)
+ # depends: libfoo(c)
+ #
+ # tiz: depends: tex(c)
+ # depends: libbar(c)
+ #
+ $* tex tiz 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ %.*
+ trace: collect_build: add tex/1.0.0
+ trace: collect_build: add tiz/1.0.0
+ trace: collect_build_prerequisites: begin tex/1.0.0
+ %.*
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tex/1.0.0
+ trace: postponed_configurations::add: create {tex | libbar->{tex/1,1}}
+ trace: collect_build_prerequisites: postpone tex/1.0.0
+ trace: collect_build_prerequisites: begin tiz/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cannot cfg-postpone dependency tex/1.0.0 of dependent tiz/1.0.0 (collected prematurely), throwing postpone_dependency
+ trace: pkg_build: collection failed due to prematurely collected dependency (tex), retry from scratch
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ %.*
+ trace: collect_build: add tex/1.0.0
+ trace: collect_build: add tiz/1.0.0
+ trace: pkg_build: dep-postpone user-specified tex
+ trace: collect_build_prerequisites: begin tiz/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency tex/1.0.0 of dependent tiz/1.0.0
+ trace: postponed_configurations::add: create {tiz | tex->{tiz/1,1}}
+ trace: collect_build_prerequisites: postpone tiz/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {tiz | tex->{tiz/1,1}}
+ %.*
+ trace: collect_build_postponed (1): cfg-negotiate begin {tiz | tex->{tiz/1,1}}
+ %.*
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin tex/1.0.0
+ %.*
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tex/1.0.0
+ trace: postponed_configurations::add: create {tex | libbar->{tex/1,1}}
+ trace: collect_build_prerequisites: postpone tex/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tiz/1.0.0
+ trace: collect_build_prerequisites: resume tiz/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tiz/1.0.0
+ trace: postponed_configurations::add: add {tiz 2,1: libbar} to {tex | libbar->{tex/1,1}}
+ trace: collect_build_prerequisites: postpone tiz/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {tiz | tex->{tiz/1,1}}!
+ trace: collect_build_postponed (2): begin {tex tiz | libbar->{tex/1,1 tiz/2,1}}
+ %.*
+ trace: collect_build_postponed (2): cfg-negotiate begin {tex tiz | libbar->{tex/1,1 tiz/2,1}}
+ %.*
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbar/1.0.0
+ trace: collect_build_prerequisites: end libbar/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent tex/1.0.0
+ trace: collect_build_prerequisites: resume tex/1.0.0
+ %.*
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent tex/1.0.0
+ trace: postponed_configurations::add: create {tex | libfoo->{tex/2,1}}
+ trace: collect_build_prerequisites: postpone tex/1.0.0
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent tiz/1.0.0
+ trace: collect_build_prerequisites: resume tiz/1.0.0
+ trace: collect_build_prerequisites: end tiz/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {tex tiz | libbar->{tex/1,1 tiz/2,1}}!
+ trace: collect_build_postponed (3): begin {tex | libfoo->{tex/2,1}}
+ %.*
+ trace: collect_build_postponed (3): cfg-negotiate begin {tex | libfoo->{tex/2,1}}
+ %.*
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libfoo/1.0.0
+ trace: collect_build_prerequisites: end libfoo/1.0.0
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent tex/1.0.0
+ trace: collect_build_prerequisites: resume tex/1.0.0
+ trace: collect_build_prerequisites: end tex/1.0.0
+ trace: collect_build_postponed (3): cfg-negotiate end {tex | libfoo->{tex/2,1}}!
+ trace: collect_build_postponed (3): end {tex | libfoo->{tex/2,1}}
+ trace: collect_build_postponed (2): end {tex tiz | libbar->{tex/1,1 tiz/2,1}}
+ trace: collect_build_postponed (1): end {tiz | tex->{tiz/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !tex configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured 1.0.0
+ !tiz configured 1.0.0
+ libbar configured 1.0.0
+ !tex configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured 1.0.0
+ EOO
+
+ $pkg_drop tex tiz
+ }
+
+ : args-tiz
+ :
+ : Note that tiz is a correct version of tix, which fixes the
+ : configuration cycle.
+ :
+ {
+ $clone_cfg;
+
+ # Dependencies:
+ #
+ # tex: depends: libbar(c)
+ # depends: libfoo(c)
+ #
+ # tiz: depends: tex(c)
+ # depends: libbar(c)
+ #
+ $* tiz 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ %.*
+ trace: collect_build: add tiz/1.0.0
+ trace: collect_build_prerequisites: begin tiz/1.0.0
+ %.*
+ trace: collect_build: add tex/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency tex/1.0.0 of dependent tiz/1.0.0
+ trace: postponed_configurations::add: create {tiz | tex->{tiz/1,1}}
+ trace: collect_build_prerequisites: postpone tiz/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {tiz | tex->{tiz/1,1}}
+ %.*
+ trace: collect_build_postponed (1): cfg-negotiate begin {tiz | tex->{tiz/1,1}}
+ %.*
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin tex/1.0.0
+ %.*
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tex/1.0.0
+ trace: postponed_configurations::add: create {tex | libbar->{tex/1,1}}
+ trace: collect_build_prerequisites: postpone tex/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tiz/1.0.0
+ trace: collect_build_prerequisites: resume tiz/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tiz/1.0.0
+ trace: postponed_configurations::add: add {tiz 2,1: libbar} to {tex | libbar->{tex/1,1}}
+ trace: collect_build_prerequisites: postpone tiz/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {tiz | tex->{tiz/1,1}}!
+ trace: collect_build_postponed (2): begin {tex tiz | libbar->{tex/1,1 tiz/2,1}}
+ %.*
+ trace: collect_build_postponed (2): cfg-negotiate begin {tex tiz | libbar->{tex/1,1 tiz/2,1}}
+ %.*
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbar/1.0.0
+ trace: collect_build_prerequisites: end libbar/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent tex/1.0.0
+ trace: collect_build_prerequisites: resume tex/1.0.0
+ %.*
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent tex/1.0.0
+ trace: postponed_configurations::add: create {tex | libfoo->{tex/2,1}}
+ trace: collect_build_prerequisites: postpone tex/1.0.0
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent tiz/1.0.0
+ trace: collect_build_prerequisites: resume tiz/1.0.0
+ trace: collect_build_prerequisites: end tiz/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {tex tiz | libbar->{tex/1,1 tiz/2,1}}!
+ trace: collect_build_postponed (3): begin {tex | libfoo->{tex/2,1}}
+ %.*
+ trace: collect_build_postponed (3): cfg-negotiate begin {tex | libfoo->{tex/2,1}}
+ %.*
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libfoo/1.0.0
+ trace: collect_build_prerequisites: end libfoo/1.0.0
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent tex/1.0.0
+ trace: collect_build_prerequisites: resume tex/1.0.0
+ trace: collect_build_prerequisites: end tex/1.0.0
+ trace: collect_build_postponed (3): cfg-negotiate end {tex | libfoo->{tex/2,1}}!
+ trace: collect_build_postponed (3): end {tex | libfoo->{tex/2,1}}
+ trace: collect_build_postponed (2): end {tex tiz | libbar->{tex/1,1 tiz/2,1}}
+ trace: collect_build_postponed (1): end {tiz | tex->{tiz/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !tiz configured 1.0.0
+ libbar configured 1.0.0
+ tex configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured 1.0.0
+ EOO
+
+ $pkg_drop tiz
+ }
+
+ : depends-depends-conflict
+ :
+ {
+ $clone_cfg;
+
+ # Dependencies:
+ #
+ # tex: depends: libbar(c)
+ # depends: libfoo(c)
+ #
+ # toz: depends: libfoo(c)
+ # depends: libbar(c)
+ #
+ $* tex toz/0.2.0 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ %.*
+ trace: collect_build: add tex/1.0.0
+ trace: collect_build: add toz/0.2.0
+ trace: collect_build_prerequisites: begin tex/1.0.0
+ %.*
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tex/1.0.0
+ trace: postponed_configurations::add: create {tex | libbar->{tex/1,1}}
+ trace: collect_build_prerequisites: postpone tex/1.0.0
+ trace: collect_build_prerequisites: begin toz/0.2.0
+ %.*
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent toz/0.2.0
+ trace: postponed_configurations::add: create {toz | libfoo->{toz/1,1}}
+ trace: collect_build_prerequisites: postpone toz/0.2.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {tex | libbar->{tex/1,1}}
+ %.*
+ trace: collect_build_postponed (1): cfg-negotiate begin {tex | libbar->{tex/1,1}}
+ %.*
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbar/1.0.0
+ trace: collect_build_prerequisites: end libbar/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tex/1.0.0
+ trace: collect_build_prerequisites: resume tex/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent tex/1.0.0
+ trace: postponed_configurations::add: add {tex 2,1: libfoo} to {toz | libfoo->{toz/1,1}}
+ trace: collect_build_prerequisites: postpone tex/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {tex | libbar->{tex/1,1}}!
+ trace: collect_build_postponed (2): begin {tex toz | libfoo->{tex/2,1 toz/1,1}}
+ %.*
+ trace: collect_build_postponed (2): cfg-negotiate begin {tex toz | libfoo->{tex/2,1 toz/1,1}}
+ %.*
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libfoo/1.0.0
+ trace: collect_build_prerequisites: end libfoo/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent tex/1.0.0
+ trace: collect_build_prerequisites: resume tex/1.0.0
+ trace: collect_build_prerequisites: end tex/1.0.0
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent toz/0.2.0
+ trace: collect_build_prerequisites: resume toz/0.2.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent toz/0.2.0
+ trace: postponed_configurations::add: add {toz 2,1: libbar} to {tex | libbar->{tex/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent toz/0.2.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent toz/0.2.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end toz/0.2.0
+ trace: collect_build_postponed (2): cfg-negotiate end {tex toz | libfoo->{tex/2,1 toz/1,1}}!
+ trace: collect_build_postponed (2): end {tex toz | libfoo->{tex/2,1 toz/1,1}}
+ trace: collect_build_postponed (1): end {tex | libbar->{tex/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !tex configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured 1.0.0
+ !toz configured !0.2.0 available 1.0.0
+ libbar configured 1.0.0
+ libfoo configured 1.0.0
+ EOO
+
+ $pkg_drop tex toz
+ }
+
+ : package-depends-conflict
+ :
+ {
+ $clone_cfg;
+
+ # Dependencies:
+ #
+ # baz: depends: {libbar libfoo} (c)
+ #
+ # bac: depends: libbar(c)
+ # depends: libbaz(c)
+ # depends: libfoo(c)
+ #
+ # bat: depends: libbaz(c)
+ #
+ $* baz bac bat 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ %.*
+ trace: collect_build: add baz/1.0.0
+ trace: collect_build: add bac/1.0.0
+ trace: collect_build: add bat/1.0.0
+ trace: collect_build_prerequisites: begin baz/1.0.0
+ %.*
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent baz/1.0.0
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent baz/1.0.0
+ trace: postponed_configurations::add: create {baz | libbar->{baz/1,1} libfoo->{baz/1,1}}
+ trace: collect_build_prerequisites: postpone baz/1.0.0
+ trace: collect_build_prerequisites: begin bac/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bac/1.0.0
+ trace: postponed_configurations::add: add {bac 1,1: libbar} to {baz | libbar->{baz/1,1} libfoo->{baz/1,1}}
+ trace: collect_build_prerequisites: postpone bac/1.0.0
+ trace: collect_build_prerequisites: begin bat/1.0.0
+ %.*
+ trace: collect_build: add libbaz/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbaz/1.0.0 of dependent bat/1.0.0
+ trace: postponed_configurations::add: create {bat | libbaz->{bat/1,1}}
+ trace: collect_build_prerequisites: postpone bat/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {bac baz | libbar->{bac/1,1 baz/1,1} libfoo->{baz/1,1}}
+ %.*
+ trace: collect_build_postponed (1): cfg-negotiate begin {bac baz | libbar->{bac/1,1 baz/1,1} libfoo->{baz/1,1}}
+ %.*
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbar/1.0.0
+ trace: collect_build_prerequisites: end libbar/1.0.0
+ trace: collect_build_prerequisites: begin libfoo/1.0.0
+ trace: collect_build_prerequisites: end libfoo/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent bac/1.0.0
+ trace: collect_build_prerequisites: resume bac/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency libbaz/1.0.0 of dependent bac/1.0.0
+ trace: postponed_configurations::add: add {bac 2,1: libbaz} to {bat | libbaz->{bat/1,1}}
+ trace: collect_build_prerequisites: postpone bac/1.0.0
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent baz/1.0.0
+ trace: collect_build_prerequisites: resume baz/1.0.0
+ trace: collect_build_prerequisites: end baz/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {bac baz | libbar->{bac/1,1 baz/1,1} libfoo->{baz/1,1}}!
+ trace: collect_build_postponed (2): begin {bac bat | libbaz->{bac/2,1 bat/1,1}}
+ %.*
+ trace: collect_build_postponed (2): cfg-negotiate begin {bac bat | libbaz->{bac/2,1 bat/1,1}}
+ %.*
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbaz/1.0.0
+ trace: collect_build_prerequisites: end libbaz/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent bac/1.0.0
+ trace: collect_build_prerequisites: resume bac/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent bac/1.0.0
+ trace: postponed_configurations::add: add {bac 3,1: libfoo} to {bac baz | libbar->{bac/1,1 baz/1,1} libfoo->{baz/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent bac/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libfoo/1.0.0 of dependent bac/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end bac/1.0.0
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent bat/1.0.0
+ trace: collect_build_prerequisites: resume bat/1.0.0
+ trace: collect_build_prerequisites: end bat/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {bac bat | libbaz->{bac/2,1 bat/1,1}}!
+ trace: collect_build_postponed (2): end {bac bat | libbaz->{bac/2,1 bat/1,1}}
+ trace: collect_build_postponed (1): end {bac baz | libbar->{bac/1,1 baz/1,1} libfoo->{baz/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !baz configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured 1.0.0
+ !bac configured 1.0.0
+ libbar configured 1.0.0
+ libbaz configured 1.0.0
+ libfoo configured 1.0.0
+ !bat configured 1.0.0
+ libbaz configured 1.0.0
+ EOO
+
+ $pkg_drop baz bac bat
+ }
+
+ : existing
+ :
+ {
+ +$clone_cfg
+
+ : dependency-new-downgrade
+ :
+ {
+ $clone_cfg;
+
+ # Dependencies:
+ #
+ # tex: depends: libbar(c)
+ # depends: libfoo(c)
+ #
+ # tix: depends: libbar(c)
+ # depends: tex(c)
+ #
+ $* tex --verbose 1 2>!;
+
+ # Build new dependency of an existing dependent.
+ #
+ $* tix 2>&1 | $filter 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add tix/1.0.0
+ trace: collect_build_prerequisites: begin tix/1.0.0
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tix/1.0.0
+ trace: postponed_configurations::add: create {tix | libbar->{tix/1,1}}
+ trace: collect_build_prerequisites: postpone tix/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {tix | libbar->{tix/1,1}}
+ trace: collect_build_prerequisites: pre-reeval tex/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tex/1.0.0: 1,1
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {tix | libbar->{tix/1,1}}
+ trace: collect_build: add tex/1.0.0
+ trace: collect_build_prerequisites: reeval tex/1.0.0
+ trace: postponed_configurations::add: add {tex^ 1,1: libbar} to {tix | libbar->{tix/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent tex/1.0.0 results in {tex^ tix | libbar->{tex/1,1 tix/1,1}}
+ trace: collect_build_prerequisites: re-evaluated tex/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {tex^ tix | libbar->{tex/1,1 tix/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbar/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tex/1.0.0
+ trace: collect_build_prerequisites: resume tex/1.0.0
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent tex/1.0.0
+ trace: postponed_configurations::add: create {tex | libfoo->{tex/2,1}}
+ trace: collect_build_prerequisites: postpone tex/1.0.0
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tix/1.0.0
+ trace: collect_build_prerequisites: resume tix/1.0.0
+ trace: collect_build_prerequisites: cannot cfg-postpone dependency tex/1.0.0 of dependent tix/1.0.0 (collected prematurely), throwing postpone_dependency
+ trace: pkg_build: collection failed due to prematurely collected dependency (tex), retry from scratch
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add tix/1.0.0
+ trace: collect_build_prerequisites: begin tix/1.0.0
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tix/1.0.0
+ trace: postponed_configurations::add: create {tix | libbar->{tix/1,1}}
+ trace: collect_build_prerequisites: postpone tix/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {tix | libbar->{tix/1,1}}
+ trace: collect_build_prerequisites: pre-reeval tex/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tex/1.0.0: 1,1
+ trace: collect_build_postponed (1): skip dep-postponed existing dependent tex of dependency libbar
+ trace: collect_build: add tex/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {tix | libbar->{tix/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbar/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tix/1.0.0
+ trace: collect_build_prerequisites: resume tix/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency tex/1.0.0 of dependent tix/1.0.0
+ trace: postponed_configurations::add: create {tix | tex->{tix/2,1}}
+ trace: collect_build_prerequisites: postpone tix/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {tix | libbar->{tix/1,1}}!
+ trace: collect_build_postponed (2): begin {tix | tex->{tix/2,1}}
+ trace: collect_build_postponed (2): cfg-negotiate begin {tix | tex->{tix/2,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin tex/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tex/1.0.0
+ trace: postponed_configurations::add: add {tex 1,1: libbar} to {tix | libbar->{tix/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent tex/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent tex/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent tex/1.0.0
+ trace: postponed_configurations::add: create {tex | libfoo->{tex/2,1}}
+ trace: collect_build_prerequisites: postpone tex/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent tix/1.0.0
+ trace: collect_build_prerequisites: resume tix/1.0.0
+ trace: collect_build_prerequisites: end tix/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {tix | tex->{tix/2,1}}!
+ trace: collect_build_postponed (3): begin {tex | libfoo->{tex/2,1}}
+ trace: collect_build_postponed (3): skip being built existing dependent tex of dependency libfoo
+ trace: collect_build_postponed (3): cfg-negotiate begin {tex | libfoo->{tex/2,1}}
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libfoo/1.0.0
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent tex/1.0.0
+ trace: collect_build_prerequisites: resume tex/1.0.0
+ trace: collect_build_prerequisites: end tex/1.0.0
+ trace: collect_build_postponed (3): cfg-negotiate end {tex | libfoo->{tex/2,1}}!
+ trace: collect_build_postponed (3): end {tex | libfoo->{tex/2,1}}
+ trace: collect_build_postponed (2): end {tix | tex->{tix/2,1}}
+ trace: collect_build_postponed (1): end {tix | libbar->{tix/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ build plan:
+ new tix/1.0.0
+ trace: execute_plan: simulate: no
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !tex configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured 1.0.0
+ !tix configured 1.0.0
+ libbar configured 1.0.0
+ !tex configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured 1.0.0
+ EOO
+
+ # Downgrade the existing dependency of an existing dependent.
+ #
+ $* tex/0.3.0 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ %.*
+ trace: collect_build: add tex/0.3.0
+ %.*
+ trace: collect_build_prerequisites: pre-reeval tix/1.0.0
+ %.*
+ trace: collect_build_prerequisites: pre-reevaluated tix/1.0.0: 1,1
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of existing dependent tix/1.0.0 due to dependency tex/0.3.0
+ trace: collect_build: add tix/1.0.0
+ trace: postponed_configurations::add: create {tix^ | libbar->{tix/1,1}}
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {tix^ | libbar->{tix/1,1}}
+ %.*
+ trace: collect_build_postponed (1): skip being built existing dependent tex of dependency libbar
+ trace: collect_build_prerequisites: pre-reeval tix/1.0.0
+ %.*
+ trace: collect_build_prerequisites: pre-reevaluated tix/1.0.0: 1,1
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {tix^ | libbar->{tix/1,1}}
+ trace: collect_build_prerequisites: reeval tix/1.0.0
+ %.*
+ trace: postponed_configurations::add: add {tix^ 1,1: libbar} to {tix^ | libbar->{tix/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent tix/1.0.0 results in {tix^ | libbar->{tix/1,1}}
+ trace: collect_build_prerequisites: re-evaluated tix/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {tix^ | libbar->{tix/1,1}}
+ %.*
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbar/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tix/1.0.0
+ trace: collect_build_prerequisites: resume tix/1.0.0
+ %.*
+ trace: collect_build: pick tex/0.3.0 over tex/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency tex/0.3.0 of dependent tix/1.0.0
+ trace: postponed_configurations::add: create {tix | tex->{tix/2,1}}
+ trace: collect_build_prerequisites: postpone tix/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {tix^ | libbar->{tix/1,1}}!
+ trace: collect_build_postponed (2): begin {tix | tex->{tix/2,1}}
+ %.*
+ trace: collect_build_postponed (2): skip being built existing dependent tix of dependency tex
+ trace: collect_build_postponed (2): cfg-negotiate begin {tix | tex->{tix/2,1}}
+ %.*
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin tex/0.3.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tex/0.3.0
+ trace: postponed_configurations::add: add {tex 1,1: libbar} to {tix^ | libbar->{tix/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent tex/0.3.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent tex/0.3.0 is already (being) recursively collected, skipping
+ %.*
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent tex/0.3.0
+ trace: postponed_configurations::add: create {tex | libfoo->{tex/2,1}}
+ trace: collect_build_prerequisites: postpone tex/0.3.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent tix/1.0.0
+ trace: collect_build_prerequisites: resume tix/1.0.0
+ trace: collect_build_prerequisites: end tix/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {tix | tex->{tix/2,1}}!
+ trace: collect_build_postponed (3): begin {tex | libfoo->{tex/2,1}}
+ %.*
+ trace: collect_build_postponed (3): skip being built existing dependent tex of dependency libfoo
+ trace: collect_build_postponed (3): cfg-negotiate begin {tex | libfoo->{tex/2,1}}
+ %.*
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libfoo/1.0.0
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent tex/0.3.0
+ trace: collect_build_prerequisites: resume tex/0.3.0
+ trace: collect_build_prerequisites: end tex/0.3.0
+ trace: collect_build_postponed (3): cfg-negotiate end {tex | libfoo->{tex/2,1}}!
+ trace: collect_build_postponed (3): end {tex | libfoo->{tex/2,1}}
+ trace: collect_build_postponed (2): end {tix | tex->{tix/2,1}}
+ trace: collect_build_postponed (1): end {tix^ | libbar->{tix/1,1}}
+ trace: collect_build_postponed (0): end
+ %.*
+ trace: execute_plan: simulate: yes
+ %.*
+ build plan:
+ downgrade tex/0.3.0
+ config.tex.extras=true (set by tix)
+ reconfigure tix/1.0.0 (dependent of libbar, tex)
+ trace: execute_plan: simulate: no
+ %.*
+ EOE
+
+ $pkg_drop tex tix
+ }
+
+ : dependency-downgrade
+ :
+ : Note that here we also specify the existing dependent tix on the
+ : command line to make sure that its noop recursive collection
+ : doesn't prevent it from being properly re-evaluated afterwords.
+ :
+ : Also note that tex/0.1.0 doesn't depend on libbar.
+ {
+ $clone_cfg;
+
+ # Dependencies:
+ #
+ # tex: depends: libbar(c)
+ # depends: libfoo(c)
+ #
+ # tix: depends: libbar(c)
+ # depends: tex(c)
+ #
+ $* tex tix --verbose 1 2>!;
+
+ $* tix tex/0.1.0 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ %.*
+ trace: collect_build: add tix/1.0.0
+ trace: collect_build: add tex/0.1.0
+ trace: collect_build_prerequisites: skip configured tix/1.0.0
+ %.*
+ trace: collect_build_prerequisites: pre-reeval tix/1.0.0
+ %.*
+ trace: collect_build_prerequisites: pre-reevaluated tix/1.0.0: 1,1
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of existing dependent tix/1.0.0 due to dependency tex/0.1.0
+ trace: postponed_configurations::add: create {tix^ | libbar->{tix/1,1}}
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {tix^ | libbar->{tix/1,1}}
+ %.*
+ trace: collect_build_postponed (1): skip being built existing dependent tex of dependency libbar
+ trace: collect_build_prerequisites: pre-reeval tix/1.0.0
+ %.*
+ trace: collect_build_prerequisites: pre-reevaluated tix/1.0.0: 1,1
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {tix^ | libbar->{tix/1,1}}
+ trace: collect_build_prerequisites: reeval tix/1.0.0
+ %.*
+ trace: postponed_configurations::add: add {tix^ 1,1: libbar} to {tix^ | libbar->{tix/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent tix/1.0.0 results in {tix^ | libbar->{tix/1,1}}
+ trace: collect_build_prerequisites: re-evaluated tix/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {tix^ | libbar->{tix/1,1}}
+ %.*
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbar/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tix/1.0.0
+ trace: collect_build_prerequisites: resume tix/1.0.0
+ %.*
+ trace: collect_build: pick tex/0.1.0 over tex/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency tex/0.1.0 of dependent tix/1.0.0
+ trace: postponed_configurations::add: create {tix | tex->{tix/2,1}}
+ trace: collect_build_prerequisites: postpone tix/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {tix^ | libbar->{tix/1,1}}!
+ trace: collect_build_postponed (2): begin {tix | tex->{tix/2,1}}
+ %.*
+ trace: collect_build_postponed (2): skip being built existing dependent tix of dependency tex
+ trace: collect_build_postponed (2): cfg-negotiate begin {tix | tex->{tix/2,1}}
+ %.*
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin tex/0.1.0
+ %.*
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent tex/0.1.0
+ trace: postponed_configurations::add: create {tex | libfoo->{tex/1,1}}
+ trace: collect_build_prerequisites: postpone tex/0.1.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent tix/1.0.0
+ trace: collect_build_prerequisites: resume tix/1.0.0
+ trace: collect_build_prerequisites: end tix/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {tix | tex->{tix/2,1}}!
+ trace: collect_build_postponed (3): begin {tex | libfoo->{tex/1,1}}
+ %.*
+ trace: collect_build_postponed (3): skip being built existing dependent tex of dependency libfoo
+ trace: collect_build_postponed (3): cfg-negotiate begin {tex | libfoo->{tex/1,1}}
+ %.*
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libfoo/1.0.0
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent tex/0.1.0
+ trace: collect_build_prerequisites: resume tex/0.1.0
+ trace: collect_build_prerequisites: end tex/0.1.0
+ trace: collect_build_postponed (3): cfg-negotiate end {tex | libfoo->{tex/1,1}}!
+ trace: collect_build_postponed (3): end {tex | libfoo->{tex/1,1}}
+ trace: collect_build_postponed (2): end {tix | tex->{tix/2,1}}
+ trace: collect_build_postponed (1): end {tix^ | libbar->{tix/1,1}}
+ trace: collect_build_postponed (0): end
+ %.*
+ trace: execute_plan: simulate: yes
+ %.*
+ build plan:
+ downgrade tex/0.1.0
+ config.tex.extras=true (set by tix)
+ reconfigure/update tix/1.0.0
+ trace: execute_plan: simulate: no
+ %.*
+ EOE
+
+ $pkg_drop tex tix
+ }
+
+ : dependency-downgrade-unhold
+ :
+ : As above but the dependency is also unheld.
+ :
+ {
+ $clone_cfg;
+
+ # Dependencies:
+ #
+ # tex: depends: libbar(c)
+ # depends: libfoo(c)
+ #
+ # tix: depends: libbar(c)
+ # depends: tex(c)
+ #
+ $* tex tix --verbose 1 2>!;
+
+ $* tix ?tex/0.1.0 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ %.*
+ trace: collect_build: add tix/1.0.0
+ trace: collect_build_prerequisites: skip configured tix/1.0.0
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: evaluate_dependency: tex/1.0.0: update to tex/0.1.0
+ %.*
+ trace: pkg_build: refine package collection/plan execution
+ %.*
+ trace: collect_build_prerequisites: pre-reeval tix/1.0.0
+ %.*
+ trace: collect_build_prerequisites: pre-reevaluated tix/1.0.0: 1,1
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of existing dependent tix/1.0.0 due to dependency tex/0.1.0
+ trace: postponed_configurations::add: create {tix^ | libbar->{tix/1,1}}
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {tix^ | libbar->{tix/1,1}}
+ %.*
+ trace: collect_build_postponed (1): skip being built existing dependent tex of dependency libbar
+ trace: collect_build_prerequisites: pre-reeval tix/1.0.0
+ %.*
+ trace: collect_build_prerequisites: pre-reevaluated tix/1.0.0: 1,1
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {tix^ | libbar->{tix/1,1}}
+ trace: collect_build_prerequisites: reeval tix/1.0.0
+ %.*
+ trace: postponed_configurations::add: add {tix^ 1,1: libbar} to {tix^ | libbar->{tix/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent tix/1.0.0 results in {tix^ | libbar->{tix/1,1}}
+ trace: collect_build_prerequisites: re-evaluated tix/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {tix^ | libbar->{tix/1,1}}
+ %.*
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbar/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tix/1.0.0
+ trace: collect_build_prerequisites: resume tix/1.0.0
+ %.*
+ trace: collect_build: pick tex/0.1.0 over tex/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency tex/0.1.0 of dependent tix/1.0.0
+ trace: postponed_configurations::add: create {tix | tex->{tix/2,1}}
+ trace: collect_build_prerequisites: postpone tix/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {tix^ | libbar->{tix/1,1}}!
+ trace: collect_build_postponed (2): begin {tix | tex->{tix/2,1}}
+ %.*
+ trace: collect_build_postponed (2): skip being built existing dependent tix of dependency tex
+ trace: collect_build_postponed (2): cfg-negotiate begin {tix | tex->{tix/2,1}}
+ %.*
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin tex/0.1.0
+ %.*
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent tex/0.1.0
+ trace: postponed_configurations::add: create {tex | libfoo->{tex/1,1}}
+ trace: collect_build_prerequisites: postpone tex/0.1.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent tix/1.0.0
+ trace: collect_build_prerequisites: resume tix/1.0.0
+ trace: collect_build_prerequisites: end tix/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {tix | tex->{tix/2,1}}!
+ trace: collect_build_postponed (3): begin {tex | libfoo->{tex/1,1}}
+ %.*
+ trace: collect_build_postponed (3): skip being built existing dependent tex of dependency libfoo
+ trace: collect_build_postponed (3): cfg-negotiate begin {tex | libfoo->{tex/1,1}}
+ %.*
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libfoo/1.0.0
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent tex/0.1.0
+ trace: collect_build_prerequisites: resume tex/0.1.0
+ trace: collect_build_prerequisites: end tex/0.1.0
+ trace: collect_build_postponed (3): cfg-negotiate end {tex | libfoo->{tex/1,1}}!
+ trace: collect_build_postponed (3): end {tex | libfoo->{tex/1,1}}
+ trace: collect_build_postponed (2): end {tix | tex->{tix/2,1}}
+ trace: collect_build_postponed (1): end {tix^ | libbar->{tix/1,1}}
+ trace: collect_build_postponed (0): end
+ %.*
+ trace: execute_plan: simulate: yes
+ %.*
+ build plan:
+ downgrade/unhold tex/0.1.0
+ config.tex.extras=true (set by tix)
+ reconfigure/update tix/1.0.0
+ trace: execute_plan: simulate: no
+ %.*
+ EOE
+
+ $pkg_drop tix
+ }
+
+ : dependency-downgrade-unhold-premature
+ :
+ : As above but the dependency (tex/0.2.0) depends on libbar without
+ : configuration clause.
+ :
+ {
+ $clone_cfg;
+
+ # Dependencies:
+ #
+ # tex: depends: libbar(c)
+ # depends: libfoo(c)
+ #
+ # tix: depends: libbar(c)
+ # depends: tex(c)
+ #
+ $* tex tix --verbose 1 2>!;
+
+ $* tix ?tex/0.2.0 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ %.*
+ trace: collect_build: add tix/1.0.0
+ trace: collect_build_prerequisites: skip configured tix/1.0.0
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: evaluate_dependency: tex/1.0.0: update to tex/0.2.0
+ %.*
+ trace: pkg_build: refine package collection/plan execution
+ %.*
+ trace: collect_build_prerequisites: pre-reeval tix/1.0.0
+ %.*
+ trace: collect_build_prerequisites: pre-reevaluated tix/1.0.0: 1,1
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of existing dependent tix/1.0.0 due to dependency tex/0.2.0
+ trace: postponed_configurations::add: create {tix^ | libbar->{tix/1,1}}
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {tix^ | libbar->{tix/1,1}}
+ %.*
+ trace: collect_build_postponed (1): skip being built existing dependent tex of dependency libbar
+ trace: collect_build_prerequisites: pre-reeval tix/1.0.0
+ %.*
+ trace: collect_build_prerequisites: pre-reevaluated tix/1.0.0: 1,1
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {tix^ | libbar->{tix/1,1}}
+ trace: collect_build_prerequisites: reeval tix/1.0.0
+ %.*
+ trace: postponed_configurations::add: add {tix^ 1,1: libbar} to {tix^ | libbar->{tix/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent tix/1.0.0 results in {tix^ | libbar->{tix/1,1}}
+ trace: collect_build_prerequisites: re-evaluated tix/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {tix^ | libbar->{tix/1,1}}
+ %.*
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbar/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tix/1.0.0
+ trace: collect_build_prerequisites: resume tix/1.0.0
+ %.*
+ trace: collect_build: pick tex/0.2.0 over tex/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency tex/0.2.0 of dependent tix/1.0.0
+ trace: postponed_configurations::add: create {tix | tex->{tix/2,1}}
+ trace: collect_build_prerequisites: postpone tix/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {tix^ | libbar->{tix/1,1}}!
+ trace: collect_build_postponed (2): begin {tix | tex->{tix/2,1}}
+ %.*
+ trace: collect_build_postponed (2): skip being built existing dependent tix of dependency tex
+ trace: collect_build_postponed (2): cfg-negotiate begin {tix | tex->{tix/2,1}}
+ %.*
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin tex/0.2.0
+ %.*
+ trace: collect_build_prerequisites: dep-postpone dependency libbar/1.0.0 of dependent tex/0.2.0 since already in cluster {tix^ | libbar->{tix/1,1}}!
+ %.*
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent tex/0.2.0
+ trace: postponed_configurations::add: create {tex | libfoo->{tex/2,1}}
+ trace: collect_build_prerequisites: postpone tex/0.2.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent tix/1.0.0
+ trace: collect_build_prerequisites: resume tix/1.0.0
+ trace: collect_build_prerequisites: end tix/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {tix | tex->{tix/2,1}}!
+ trace: collect_build_postponed (3): begin {tex | libfoo->{tex/2,1}}
+ %.*
+ trace: collect_build_postponed (3): skip being built existing dependent tex of dependency libfoo
+ trace: collect_build_postponed (3): cfg-negotiate begin {tex | libfoo->{tex/2,1}}
+ %.*
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libfoo/1.0.0
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent tex/0.2.0
+ trace: collect_build_prerequisites: resume tex/0.2.0
+ trace: collect_build_prerequisites: end tex/0.2.0
+ trace: collect_build_postponed (3): cfg-negotiate end {tex | libfoo->{tex/2,1}}!
+ trace: collect_build_postponed (3): end {tex | libfoo->{tex/2,1}}
+ trace: collect_build_postponed (2): end {tix | tex->{tix/2,1}}
+ trace: collect_build_postponed (1): end {tix^ | libbar->{tix/1,1}}
+ trace: collect_build_postponed (0): end
+ %.*
+ trace: execute_plan: simulate: yes
+ %.*
+ build plan:
+ downgrade/unhold tex/0.2.0
+ config.tex.extras=true (set by tix)
+ reconfigure/update tix/1.0.0
+ trace: execute_plan: simulate: no
+ %.*
+ EOE
+
+ $pkg_drop tix
+ }
+ }
+ }
+
+ : indirect
+ :
+ {
+ +$clone_cfg
+
+ : args-tax-dex-dix
+ :
+ {
+ $clone_cfg;
+
+ # Dependencies:
+ #
+ # tax: depends: libbar(c)
+ # depends: libfoo
+ #
+ # bar: depends: libbar(c)
+ #
+ # dex: depends: bar(c)
+ # depends: libfoo(c)
+ #
+ # dox: dex(c)
+ #
+ # dix: depends: libbar(c)
+ # depends: libbox(c) # causes postponement and initial cluster finished negotiating
+ # depends: dox(c)
+ #
+ $* tax dex dix 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ %.*
+ trace: collect_build: add tax/1.0.0
+ trace: collect_build: add dex/1.0.0
+ trace: collect_build: add dix/1.0.0
+ trace: collect_build_prerequisites: begin tax/1.0.0
+ %.*
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tax/1.0.0
+ trace: postponed_configurations::add: create {tax | libbar->{tax/1,1}}
+ trace: collect_build_prerequisites: postpone tax/1.0.0
+ trace: collect_build_prerequisites: begin dex/1.0.0
+ %.*
+ trace: collect_build: add bar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency bar/1.0.0 of dependent dex/1.0.0
+ trace: postponed_configurations::add: create {dex | bar->{dex/1,1}}
+ trace: collect_build_prerequisites: postpone dex/1.0.0
+ trace: collect_build_prerequisites: begin dix/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent dix/1.0.0
+ trace: postponed_configurations::add: add {dix 1,1: libbar} to {tax | libbar->{tax/1,1}}
+ trace: collect_build_prerequisites: postpone dix/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {dix tax | libbar->{dix/1,1 tax/1,1}}
+ %.*
+ trace: collect_build_postponed (1): cfg-negotiate begin {dix tax | libbar->{dix/1,1 tax/1,1}}
+ %.*
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbar/1.0.0
+ trace: collect_build_prerequisites: end libbar/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent dix/1.0.0
+ trace: collect_build_prerequisites: resume dix/1.0.0
+ %.*
+ trace: collect_build: add libbox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent dix/1.0.0
+ trace: postponed_configurations::add: create {dix | libbox->{dix/2,1}}
+ trace: collect_build_prerequisites: postpone dix/1.0.0
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tax/1.0.0
+ trace: collect_build_prerequisites: resume tax/1.0.0
+ %.*
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libfoo/1.0.0 of dependent tax/1.0.0
+ trace: collect_build_prerequisites: begin libfoo/1.0.0
+ trace: collect_build_prerequisites: end libfoo/1.0.0
+ trace: collect_build_prerequisites: end tax/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {dix tax | libbar->{dix/1,1 tax/1,1}}!
+ trace: collect_build_postponed (2): begin {dex | bar->{dex/1,1}}
+ %.*
+ trace: collect_build_postponed (2): cfg-negotiate begin {dex | bar->{dex/1,1}}
+ %.*
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin bar/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bar/1.0.0
+ trace: postponed_configurations::add: add {bar 1,1: libbar} to {dix tax | libbar->{dix/1,1 tax/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent bar/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent bar/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end bar/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent dex/1.0.0
+ trace: collect_build_prerequisites: resume dex/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cannot cfg-postpone dependency libfoo/1.0.0 of dependent dex/1.0.0 (collected prematurely), throwing postpone_dependency
+ trace: pkg_build: collection failed due to prematurely collected dependency (libfoo), retry from scratch
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ %.*
+ trace: collect_build: add tax/1.0.0
+ trace: collect_build: add dex/1.0.0
+ trace: collect_build: add dix/1.0.0
+ trace: collect_build_prerequisites: begin tax/1.0.0
+ %.*
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tax/1.0.0
+ trace: postponed_configurations::add: create {tax | libbar->{tax/1,1}}
+ trace: collect_build_prerequisites: postpone tax/1.0.0
+ trace: collect_build_prerequisites: begin dex/1.0.0
+ %.*
+ trace: collect_build: add bar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency bar/1.0.0 of dependent dex/1.0.0
+ trace: postponed_configurations::add: create {dex | bar->{dex/1,1}}
+ trace: collect_build_prerequisites: postpone dex/1.0.0
+ trace: collect_build_prerequisites: begin dix/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent dix/1.0.0
+ trace: postponed_configurations::add: add {dix 1,1: libbar} to {tax | libbar->{tax/1,1}}
+ trace: collect_build_prerequisites: postpone dix/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {dix tax | libbar->{dix/1,1 tax/1,1}}
+ %.*
+ trace: collect_build_postponed (1): cfg-negotiate begin {dix tax | libbar->{dix/1,1 tax/1,1}}
+ %.*
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbar/1.0.0
+ trace: collect_build_prerequisites: end libbar/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent dix/1.0.0
+ trace: collect_build_prerequisites: resume dix/1.0.0
+ %.*
+ trace: collect_build: add libbox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent dix/1.0.0
+ trace: postponed_configurations::add: create {dix | libbox->{dix/2,1}}
+ trace: collect_build_prerequisites: postpone dix/1.0.0
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tax/1.0.0
+ trace: collect_build_prerequisites: resume tax/1.0.0
+ %.*
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: dep-postpone dependency libfoo/1.0.0 of dependent tax/1.0.0
+ trace: collect_build_prerequisites: end tax/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {dix tax | libbar->{dix/1,1 tax/1,1}}!
+ trace: collect_build_postponed (2): begin {dex | bar->{dex/1,1}}
+ %.*
+ trace: collect_build_postponed (2): cfg-negotiate begin {dex | bar->{dex/1,1}}
+ %.*
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin bar/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bar/1.0.0
+ trace: postponed_configurations::add: add {bar 1,1: libbar} to {dix tax | libbar->{dix/1,1 tax/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent bar/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent bar/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end bar/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent dex/1.0.0
+ trace: collect_build_prerequisites: resume dex/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent dex/1.0.0
+ trace: postponed_configurations::add: create {dex | libfoo->{dex/2,1}}
+ trace: collect_build_prerequisites: postpone dex/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {dex | bar->{dex/1,1}}!
+ trace: collect_build_postponed (3): begin {dix | libbox->{dix/2,1}}
+ %.*
+ trace: collect_build_postponed (3): cfg-negotiate begin {dix | libbox->{dix/2,1}}
+ %.*
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbox/1.0.0
+ trace: collect_build_prerequisites: end libbox/1.0.0
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent dix/1.0.0
+ trace: collect_build_prerequisites: resume dix/1.0.0
+ %.*
+ trace: collect_build: add dox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency dox/1.0.0 of dependent dix/1.0.0
+ trace: postponed_configurations::add: create {dix | dox->{dix/3,1}}
+ trace: collect_build_prerequisites: postpone dix/1.0.0
+ trace: collect_build_postponed (3): cfg-negotiate end {dix | libbox->{dix/2,1}}!
+ trace: collect_build_postponed (4): begin {dex | libfoo->{dex/2,1}}
+ %.*
+ trace: collect_build_postponed (4): cfg-negotiate begin {dex | libfoo->{dex/2,1}}
+ %.*
+ trace: collect_build_postponed (4): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libfoo/1.0.0
+ trace: collect_build_prerequisites: end libfoo/1.0.0
+ trace: collect_build_postponed (4): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (4): select cfg-negotiated dependency alternative for dependent dex/1.0.0
+ trace: collect_build_prerequisites: resume dex/1.0.0
+ trace: collect_build_prerequisites: end dex/1.0.0
+ trace: collect_build_postponed (4): cfg-negotiate end {dex | libfoo->{dex/2,1}}!
+ trace: collect_build_postponed (5): begin {dix | dox->{dix/3,1}}
+ %.*
+ trace: collect_build_postponed (5): cfg-negotiate begin {dix | dox->{dix/3,1}}
+ %.*
+ trace: collect_build_postponed (5): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin dox/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cannot cfg-postpone dependency dex/1.0.0 of dependent dox/1.0.0 (collected prematurely), throwing postpone_dependency
+ trace: pkg_build: collection failed due to prematurely collected dependency (dex), retry from scratch
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ %.*
+ trace: collect_build: add tax/1.0.0
+ trace: collect_build: add dex/1.0.0
+ trace: collect_build: add dix/1.0.0
+ trace: collect_build_prerequisites: begin tax/1.0.0
+ %.*
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tax/1.0.0
+ trace: postponed_configurations::add: create {tax | libbar->{tax/1,1}}
+ trace: collect_build_prerequisites: postpone tax/1.0.0
+ trace: pkg_build: dep-postpone user-specified dex
+ trace: collect_build_prerequisites: begin dix/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent dix/1.0.0
+ trace: postponed_configurations::add: add {dix 1,1: libbar} to {tax | libbar->{tax/1,1}}
+ trace: collect_build_prerequisites: postpone dix/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {dix tax | libbar->{dix/1,1 tax/1,1}}
+ %.*
+ trace: collect_build_postponed (1): cfg-negotiate begin {dix tax | libbar->{dix/1,1 tax/1,1}}
+ %.*
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbar/1.0.0
+ trace: collect_build_prerequisites: end libbar/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent dix/1.0.0
+ trace: collect_build_prerequisites: resume dix/1.0.0
+ %.*
+ trace: collect_build: add libbox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent dix/1.0.0
+ trace: postponed_configurations::add: create {dix | libbox->{dix/2,1}}
+ trace: collect_build_prerequisites: postpone dix/1.0.0
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tax/1.0.0
+ trace: collect_build_prerequisites: resume tax/1.0.0
+ %.*
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: dep-postpone dependency libfoo/1.0.0 of dependent tax/1.0.0
+ trace: collect_build_prerequisites: end tax/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {dix tax | libbar->{dix/1,1 tax/1,1}}!
+ trace: collect_build_postponed (2): begin {dix | libbox->{dix/2,1}}
+ %.*
+ trace: collect_build_postponed (2): cfg-negotiate begin {dix | libbox->{dix/2,1}}
+ %.*
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbox/1.0.0
+ trace: collect_build_prerequisites: end libbox/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent dix/1.0.0
+ trace: collect_build_prerequisites: resume dix/1.0.0
+ %.*
+ trace: collect_build: add dox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency dox/1.0.0 of dependent dix/1.0.0
+ trace: postponed_configurations::add: create {dix | dox->{dix/3,1}}
+ trace: collect_build_prerequisites: postpone dix/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {dix | libbox->{dix/2,1}}!
+ trace: collect_build_postponed (3): begin {dix | dox->{dix/3,1}}
+ %.*
+ trace: collect_build_postponed (3): cfg-negotiate begin {dix | dox->{dix/3,1}}
+ %.*
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin dox/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency dex/1.0.0 of dependent dox/1.0.0
+ trace: postponed_configurations::add: create {dox | dex->{dox/1,1}}
+ trace: collect_build_prerequisites: postpone dox/1.0.0
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent dix/1.0.0
+ trace: collect_build_prerequisites: resume dix/1.0.0
+ trace: collect_build_prerequisites: end dix/1.0.0
+ trace: collect_build_postponed (3): cfg-negotiate end {dix | dox->{dix/3,1}}!
+ trace: collect_build_postponed (4): begin {dox | dex->{dox/1,1}}
+ %.*
+ trace: collect_build_postponed (4): cfg-negotiate begin {dox | dex->{dox/1,1}}
+ %.*
+ trace: collect_build_postponed (4): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin dex/1.0.0
+ %.*
+ trace: collect_build: add bar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency bar/1.0.0 of dependent dex/1.0.0
+ trace: postponed_configurations::add: create {dex | bar->{dex/1,1}}
+ trace: collect_build_prerequisites: postpone dex/1.0.0
+ trace: collect_build_postponed (4): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (4): select cfg-negotiated dependency alternative for dependent dox/1.0.0
+ trace: collect_build_prerequisites: resume dox/1.0.0
+ trace: collect_build_prerequisites: end dox/1.0.0
+ trace: collect_build_postponed (4): cfg-negotiate end {dox | dex->{dox/1,1}}!
+ trace: collect_build_postponed (5): begin {dex | bar->{dex/1,1}}
+ %.*
+ trace: collect_build_postponed (5): cfg-negotiate begin {dex | bar->{dex/1,1}}
+ %.*
+ trace: collect_build_postponed (5): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin bar/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bar/1.0.0
+ trace: postponed_configurations::add: add {bar 1,1: libbar} to {dix tax | libbar->{dix/1,1 tax/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent bar/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent bar/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end bar/1.0.0
+ trace: collect_build_postponed (5): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (5): select cfg-negotiated dependency alternative for dependent dex/1.0.0
+ trace: collect_build_prerequisites: resume dex/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent dex/1.0.0
+ trace: postponed_configurations::add: create {dex | libfoo->{dex/2,1}}
+ trace: collect_build_prerequisites: postpone dex/1.0.0
+ trace: collect_build_postponed (5): cfg-negotiate end {dex | bar->{dex/1,1}}!
+ trace: collect_build_postponed (6): begin {dex | libfoo->{dex/2,1}}
+ %.*
+ trace: collect_build_postponed (6): cfg-negotiate begin {dex | libfoo->{dex/2,1}}
+ trace: collect_build_postponed (6): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libfoo/1.0.0
+ trace: collect_build_prerequisites: end libfoo/1.0.0
+ trace: collect_build_postponed (6): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (6): select cfg-negotiated dependency alternative for dependent dex/1.0.0
+ trace: collect_build_prerequisites: resume dex/1.0.0
+ trace: collect_build_prerequisites: end dex/1.0.0
+ trace: collect_build_postponed (6): cfg-negotiate end {dex | libfoo->{dex/2,1}}!
+ trace: collect_build_postponed (6): end {dex | libfoo->{dex/2,1}}
+ trace: collect_build_postponed (5): end {dex | bar->{dex/1,1}}
+ trace: collect_build_postponed (4): end {dox | dex->{dox/1,1}}
+ trace: collect_build_postponed (3): end {dix | dox->{dix/3,1}}
+ trace: collect_build_postponed (2): end {dix | libbox->{dix/2,1}}
+ trace: collect_build_postponed (1): end {dix tax | libbar->{dix/1,1 tax/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !tax configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured 1.0.0
+ !dex configured 1.0.0
+ bar configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured 1.0.0
+ !dix configured 1.0.0
+ dox configured 1.0.0
+ !dex configured 1.0.0
+ bar configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured 1.0.0
+ libbar configured 1.0.0
+ libbox configured 1.0.0
+ EOO
+
+ $pkg_drop tax dex dix --drop-dependent
+ }
+
+ : args-dix
+ :
+ : As above but with the different command-line arguments which results
+ : in the different cluster list at the moment of the cycle detection.
+ :
+ {
+ $clone_cfg;
+
+ # Dependencies:
+ #
+ # bar: depends: libbar(c)
+ #
+ # dex: depends: bar(c)
+ # depends: libfoo(c)
+ #
+ # dox: dex(c)
+ #
+ # dix: depends: libbar(c)
+ # depends: libbox(c) # causes postponement and initial cluster finished negotiating
+ # depends: dox(c)
+ #
+ $* dix 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ %.*
+ trace: collect_build: add dix/1.0.0
+ trace: collect_build_prerequisites: begin dix/1.0.0
+ %.*
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent dix/1.0.0
+ trace: postponed_configurations::add: create {dix | libbar->{dix/1,1}}
+ trace: collect_build_prerequisites: postpone dix/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {dix | libbar->{dix/1,1}}
+ %.*
+ trace: collect_build_postponed (1): cfg-negotiate begin {dix | libbar->{dix/1,1}}
+ %.*
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbar/1.0.0
+ trace: collect_build_prerequisites: end libbar/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent dix/1.0.0
+ trace: collect_build_prerequisites: resume dix/1.0.0
+ %.*
+ trace: collect_build: add libbox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent dix/1.0.0
+ trace: postponed_configurations::add: create {dix | libbox->{dix/2,1}}
+ trace: collect_build_prerequisites: postpone dix/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {dix | libbar->{dix/1,1}}!
+ trace: collect_build_postponed (2): begin {dix | libbox->{dix/2,1}}
+ %.*
+ trace: collect_build_postponed (2): cfg-negotiate begin {dix | libbox->{dix/2,1}}
+ %.*
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbox/1.0.0
+ trace: collect_build_prerequisites: end libbox/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent dix/1.0.0
+ trace: collect_build_prerequisites: resume dix/1.0.0
+ %.*
+ trace: collect_build: add dox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency dox/1.0.0 of dependent dix/1.0.0
+ trace: postponed_configurations::add: create {dix | dox->{dix/3,1}}
+ trace: collect_build_prerequisites: postpone dix/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {dix | libbox->{dix/2,1}}!
+ trace: collect_build_postponed (3): begin {dix | dox->{dix/3,1}}
+ %.*
+ trace: collect_build_postponed (3): cfg-negotiate begin {dix | dox->{dix/3,1}}
+ %.*
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin dox/1.0.0
+ %.*
+ trace: collect_build: add dex/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency dex/1.0.0 of dependent dox/1.0.0
+ trace: postponed_configurations::add: create {dox | dex->{dox/1,1}}
+ trace: collect_build_prerequisites: postpone dox/1.0.0
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent dix/1.0.0
+ trace: collect_build_prerequisites: resume dix/1.0.0
+ trace: collect_build_prerequisites: end dix/1.0.0
+ trace: collect_build_postponed (3): cfg-negotiate end {dix | dox->{dix/3,1}}!
+ trace: collect_build_postponed (4): begin {dox | dex->{dox/1,1}}
+ %.*
+ trace: collect_build_postponed (4): cfg-negotiate begin {dox | dex->{dox/1,1}}
+ %.*
+ trace: collect_build_postponed (4): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin dex/1.0.0
+ %.*
+ trace: collect_build: add bar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency bar/1.0.0 of dependent dex/1.0.0
+ trace: postponed_configurations::add: create {dex | bar->{dex/1,1}}
+ trace: collect_build_prerequisites: postpone dex/1.0.0
+ trace: collect_build_postponed (4): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (4): select cfg-negotiated dependency alternative for dependent dox/1.0.0
+ trace: collect_build_prerequisites: resume dox/1.0.0
+ trace: collect_build_prerequisites: end dox/1.0.0
+ trace: collect_build_postponed (4): cfg-negotiate end {dox | dex->{dox/1,1}}!
+ trace: collect_build_postponed (5): begin {dex | bar->{dex/1,1}}
+ %.*
+ trace: collect_build_postponed (5): cfg-negotiate begin {dex | bar->{dex/1,1}}
+ %.*
+ trace: collect_build_postponed (5): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin bar/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bar/1.0.0
+ trace: postponed_configurations::add: add {bar 1,1: libbar} to {dix | libbar->{dix/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent bar/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent bar/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end bar/1.0.0
+ trace: collect_build_postponed (5): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (5): select cfg-negotiated dependency alternative for dependent dex/1.0.0
+ trace: collect_build_prerequisites: resume dex/1.0.0
+ %.*
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent dex/1.0.0
+ trace: postponed_configurations::add: create {dex | libfoo->{dex/2,1}}
+ trace: collect_build_prerequisites: postpone dex/1.0.0
+ trace: collect_build_postponed (5): cfg-negotiate end {dex | bar->{dex/1,1}}!
+ trace: collect_build_postponed (6): begin {dex | libfoo->{dex/2,1}}
+ %.*
+ trace: collect_build_postponed (6): cfg-negotiate begin {dex | libfoo->{dex/2,1}}
+ %.*
+ trace: collect_build_postponed (6): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libfoo/1.0.0
+ trace: collect_build_prerequisites: end libfoo/1.0.0
+ trace: collect_build_postponed (6): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (6): select cfg-negotiated dependency alternative for dependent dex/1.0.0
+ trace: collect_build_prerequisites: resume dex/1.0.0
+ trace: collect_build_prerequisites: end dex/1.0.0
+ trace: collect_build_postponed (6): cfg-negotiate end {dex | libfoo->{dex/2,1}}!
+ trace: collect_build_postponed (6): end {dex | libfoo->{dex/2,1}}
+ trace: collect_build_postponed (5): end {dex | bar->{dex/1,1}}
+ trace: collect_build_postponed (4): end {dox | dex->{dox/1,1}}
+ trace: collect_build_postponed (3): end {dix | dox->{dix/3,1}}
+ trace: collect_build_postponed (2): end {dix | libbox->{dix/2,1}}
+ trace: collect_build_postponed (1): end {dix | libbar->{dix/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !dix configured 1.0.0
+ dox configured 1.0.0
+ dex configured 1.0.0
+ bar configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured 1.0.0
+ libbar configured 1.0.0
+ libbox configured 1.0.0
+ EOO
+
+ $pkg_drop dix
+ }
+
+ : args-tax-dex-diz
+ :
+ : Note that diz is a correct version of dix, which fixes the
+ : configuration cycle.
+ :
+ {
+ $clone_cfg;
+
+ # Dependencies:
+ #
+ # tax: depends: libbar(c)
+ # depends: libfoo
+ #
+ # bar: depends: libbar(c)
+ #
+ # dex: depends: bar(c)
+ # depends: libfoo(c)
+ #
+ # dox: dex(c)
+ #
+ # diz: depends: dox(c)
+ # depends: libbox(c)
+ # depends: libbar(c)
+ #
+ $* tax dex diz 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ %.*
+ trace: collect_build: add tax/1.0.0
+ trace: collect_build: add dex/1.0.0
+ trace: collect_build: add diz/1.0.0
+ trace: collect_build_prerequisites: begin tax/1.0.0
+ %.*
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tax/1.0.0
+ trace: postponed_configurations::add: create {tax | libbar->{tax/1,1}}
+ trace: collect_build_prerequisites: postpone tax/1.0.0
+ trace: collect_build_prerequisites: begin dex/1.0.0
+ %.*
+ trace: collect_build: add bar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency bar/1.0.0 of dependent dex/1.0.0
+ trace: postponed_configurations::add: create {dex | bar->{dex/1,1}}
+ trace: collect_build_prerequisites: postpone dex/1.0.0
+ trace: collect_build_prerequisites: begin diz/1.0.0
+ %.*
+ trace: collect_build: add dox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency dox/1.0.0 of dependent diz/1.0.0
+ trace: postponed_configurations::add: create {diz | dox->{diz/1,1}}
+ trace: collect_build_prerequisites: postpone diz/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {tax | libbar->{tax/1,1}}
+ %.*
+ trace: collect_build_postponed (1): cfg-negotiate begin {tax | libbar->{tax/1,1}}
+ %.*
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbar/1.0.0
+ trace: collect_build_prerequisites: end libbar/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tax/1.0.0
+ trace: collect_build_prerequisites: resume tax/1.0.0
+ %.*
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libfoo/1.0.0 of dependent tax/1.0.0
+ trace: collect_build_prerequisites: begin libfoo/1.0.0
+ trace: collect_build_prerequisites: end libfoo/1.0.0
+ trace: collect_build_prerequisites: end tax/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {tax | libbar->{tax/1,1}}!
+ trace: collect_build_postponed (2): begin {dex | bar->{dex/1,1}}
+ %.*
+ trace: collect_build_postponed (2): cfg-negotiate begin {dex | bar->{dex/1,1}}
+ %.*
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin bar/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bar/1.0.0
+ trace: postponed_configurations::add: add {bar 1,1: libbar} to {tax | libbar->{tax/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent bar/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent bar/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end bar/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent dex/1.0.0
+ trace: collect_build_prerequisites: resume dex/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cannot cfg-postpone dependency libfoo/1.0.0 of dependent dex/1.0.0 (collected prematurely), throwing postpone_dependency
+ trace: pkg_build: collection failed due to prematurely collected dependency (libfoo), retry from scratch
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ %.*
+ trace: collect_build: add tax/1.0.0
+ trace: collect_build: add dex/1.0.0
+ trace: collect_build: add diz/1.0.0
+ trace: collect_build_prerequisites: begin tax/1.0.0
+ %.*
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tax/1.0.0
+ trace: postponed_configurations::add: create {tax | libbar->{tax/1,1}}
+ trace: collect_build_prerequisites: postpone tax/1.0.0
+ trace: collect_build_prerequisites: begin dex/1.0.0
+ %.*
+ trace: collect_build: add bar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency bar/1.0.0 of dependent dex/1.0.0
+ trace: postponed_configurations::add: create {dex | bar->{dex/1,1}}
+ trace: collect_build_prerequisites: postpone dex/1.0.0
+ trace: collect_build_prerequisites: begin diz/1.0.0
+ %.*
+ trace: collect_build: add dox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency dox/1.0.0 of dependent diz/1.0.0
+ trace: postponed_configurations::add: create {diz | dox->{diz/1,1}}
+ trace: collect_build_prerequisites: postpone diz/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {tax | libbar->{tax/1,1}}
+ %.*
+ trace: collect_build_postponed (1): cfg-negotiate begin {tax | libbar->{tax/1,1}}
+ %.*
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbar/1.0.0
+ trace: collect_build_prerequisites: end libbar/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tax/1.0.0
+ trace: collect_build_prerequisites: resume tax/1.0.0
+ %.*
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: dep-postpone dependency libfoo/1.0.0 of dependent tax/1.0.0
+ trace: collect_build_prerequisites: end tax/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {tax | libbar->{tax/1,1}}!
+ trace: collect_build_postponed (2): begin {dex | bar->{dex/1,1}}
+ %.*
+ trace: collect_build_postponed (2): cfg-negotiate begin {dex | bar->{dex/1,1}}
+ %.*
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin bar/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bar/1.0.0
+ trace: postponed_configurations::add: add {bar 1,1: libbar} to {tax | libbar->{tax/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent bar/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent bar/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end bar/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent dex/1.0.0
+ trace: collect_build_prerequisites: resume dex/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent dex/1.0.0
+ trace: postponed_configurations::add: create {dex | libfoo->{dex/2,1}}
+ trace: collect_build_prerequisites: postpone dex/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {dex | bar->{dex/1,1}}!
+ trace: collect_build_postponed (3): begin {diz | dox->{diz/1,1}}
+ %.*
+ trace: collect_build_postponed (3): cfg-negotiate begin {diz | dox->{diz/1,1}}
+ %.*
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin dox/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cannot cfg-postpone dependency dex/1.0.0 of dependent dox/1.0.0 (collected prematurely), throwing postpone_dependency
+ trace: pkg_build: collection failed due to prematurely collected dependency (dex), retry from scratch
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ %.*
+ trace: collect_build: add tax/1.0.0
+ trace: collect_build: add dex/1.0.0
+ trace: collect_build: add diz/1.0.0
+ trace: collect_build_prerequisites: begin tax/1.0.0
+ %.*
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tax/1.0.0
+ trace: postponed_configurations::add: create {tax | libbar->{tax/1,1}}
+ trace: collect_build_prerequisites: postpone tax/1.0.0
+ trace: pkg_build: dep-postpone user-specified dex
+ trace: collect_build_prerequisites: begin diz/1.0.0
+ %.*
+ trace: collect_build: add dox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency dox/1.0.0 of dependent diz/1.0.0
+ trace: postponed_configurations::add: create {diz | dox->{diz/1,1}}
+ trace: collect_build_prerequisites: postpone diz/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {tax | libbar->{tax/1,1}}
+ %.*
+ trace: collect_build_postponed (1): cfg-negotiate begin {tax | libbar->{tax/1,1}}
+ %.*
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbar/1.0.0
+ trace: collect_build_prerequisites: end libbar/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tax/1.0.0
+ trace: collect_build_prerequisites: resume tax/1.0.0
+ %.*
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: dep-postpone dependency libfoo/1.0.0 of dependent tax/1.0.0
+ trace: collect_build_prerequisites: end tax/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {tax | libbar->{tax/1,1}}!
+ trace: collect_build_postponed (2): begin {diz | dox->{diz/1,1}}
+ %.*
+ trace: collect_build_postponed (2): cfg-negotiate begin {diz | dox->{diz/1,1}}
+ %.*
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin dox/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency dex/1.0.0 of dependent dox/1.0.0
+ trace: postponed_configurations::add: create {dox | dex->{dox/1,1}}
+ trace: collect_build_prerequisites: postpone dox/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent diz/1.0.0
+ trace: collect_build_prerequisites: resume diz/1.0.0
+ %.*
+ trace: collect_build: add libbox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent diz/1.0.0
+ trace: postponed_configurations::add: create {diz | libbox->{diz/2,1}}
+ trace: collect_build_prerequisites: postpone diz/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {diz | dox->{diz/1,1}}!
+ trace: collect_build_postponed (3): begin {dox | dex->{dox/1,1}}
+ %.*
+ trace: collect_build_postponed (3): cfg-negotiate begin {dox | dex->{dox/1,1}}
+ %.*
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin dex/1.0.0
+ %.*
+ trace: collect_build: add bar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency bar/1.0.0 of dependent dex/1.0.0
+ trace: postponed_configurations::add: create {dex | bar->{dex/1,1}}
+ trace: collect_build_prerequisites: postpone dex/1.0.0
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent dox/1.0.0
+ trace: collect_build_prerequisites: resume dox/1.0.0
+ trace: collect_build_prerequisites: end dox/1.0.0
+ trace: collect_build_postponed (3): cfg-negotiate end {dox | dex->{dox/1,1}}!
+ trace: collect_build_postponed (4): begin {diz | libbox->{diz/2,1}}
+ %.*
+ trace: collect_build_postponed (4): cfg-negotiate begin {diz | libbox->{diz/2,1}}
+ %.*
+ trace: collect_build_postponed (4): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbox/1.0.0
+ trace: collect_build_prerequisites: end libbox/1.0.0
+ trace: collect_build_postponed (4): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (4): select cfg-negotiated dependency alternative for dependent diz/1.0.0
+ trace: collect_build_prerequisites: resume diz/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent diz/1.0.0
+ trace: postponed_configurations::add: add {diz 3,1: libbar} to {tax | libbar->{tax/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent diz/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent diz/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end diz/1.0.0
+ trace: collect_build_postponed (4): cfg-negotiate end {diz | libbox->{diz/2,1}}!
+ trace: collect_build_postponed (5): begin {dex | bar->{dex/1,1}}
+ %.*
+ trace: collect_build_postponed (5): cfg-negotiate begin {dex | bar->{dex/1,1}}
+ %.*
+ trace: collect_build_postponed (5): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin bar/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bar/1.0.0
+ trace: postponed_configurations::add: add {bar 1,1: libbar} to {diz tax | libbar->{diz/3,1 tax/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent bar/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent bar/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end bar/1.0.0
+ trace: collect_build_postponed (5): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (5): select cfg-negotiated dependency alternative for dependent dex/1.0.0
+ trace: collect_build_prerequisites: resume dex/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent dex/1.0.0
+ trace: postponed_configurations::add: create {dex | libfoo->{dex/2,1}}
+ trace: collect_build_prerequisites: postpone dex/1.0.0
+ trace: collect_build_postponed (5): cfg-negotiate end {dex | bar->{dex/1,1}}!
+ trace: collect_build_postponed (6): begin {dex | libfoo->{dex/2,1}}
+ %.*
+ trace: collect_build_postponed (6): cfg-negotiate begin {dex | libfoo->{dex/2,1}}
+ %.*
+ trace: collect_build_postponed (6): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libfoo/1.0.0
+ trace: collect_build_prerequisites: end libfoo/1.0.0
+ trace: collect_build_postponed (6): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (6): select cfg-negotiated dependency alternative for dependent dex/1.0.0
+ trace: collect_build_prerequisites: resume dex/1.0.0
+ trace: collect_build_prerequisites: end dex/1.0.0
+ trace: collect_build_postponed (6): cfg-negotiate end {dex | libfoo->{dex/2,1}}!
+ trace: collect_build_postponed (6): end {dex | libfoo->{dex/2,1}}
+ trace: collect_build_postponed (5): end {dex | bar->{dex/1,1}}
+ trace: collect_build_postponed (4): end {diz | libbox->{diz/2,1}}
+ trace: collect_build_postponed (3): end {dox | dex->{dox/1,1}}
+ trace: collect_build_postponed (2): end {diz | dox->{diz/1,1}}
+ trace: collect_build_postponed (1): end {tax | libbar->{tax/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !tax configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured 1.0.0
+ !dex configured 1.0.0
+ bar configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured 1.0.0
+ !diz configured 1.0.0
+ dox configured 1.0.0
+ !dex configured 1.0.0
+ bar configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured 1.0.0
+ libbar configured 1.0.0
+ libbox configured 1.0.0
+ EOO
+
+ $pkg_drop tax dex diz --drop-dependent
+ }
+
+ : args-diz
+ :
+ : Note that diz is a correct version of dix, which fixes the
+ : configuration cycle.
+ :
+ {
+ $clone_cfg;
+
+ # Dependencies:
+ #
+ # bar: depends: libbar(c)
+ #
+ # dex: depends: bar(c)
+ # depends: libfoo(c)
+ #
+ # dox: dex(c)
+ #
+ # diz: depends: dox(c)
+ # depends: libbox(c)
+ # depends: libbar(c)
+ #
+ $* diz 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ %.*
+ trace: collect_build: add diz/1.0.0
+ trace: collect_build_prerequisites: begin diz/1.0.0
+ %.*
+ trace: collect_build: add dox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency dox/1.0.0 of dependent diz/1.0.0
+ trace: postponed_configurations::add: create {diz | dox->{diz/1,1}}
+ trace: collect_build_prerequisites: postpone diz/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {diz | dox->{diz/1,1}}
+ %.*
+ trace: collect_build_postponed (1): cfg-negotiate begin {diz | dox->{diz/1,1}}
+ %.*
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin dox/1.0.0
+ %.*
+ trace: collect_build: add dex/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency dex/1.0.0 of dependent dox/1.0.0
+ trace: postponed_configurations::add: create {dox | dex->{dox/1,1}}
+ trace: collect_build_prerequisites: postpone dox/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent diz/1.0.0
+ trace: collect_build_prerequisites: resume diz/1.0.0
+ %.*
+ trace: collect_build: add libbox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent diz/1.0.0
+ trace: postponed_configurations::add: create {diz | libbox->{diz/2,1}}
+ trace: collect_build_prerequisites: postpone diz/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {diz | dox->{diz/1,1}}!
+ trace: collect_build_postponed (2): begin {dox | dex->{dox/1,1}}
+ %.*
+ trace: collect_build_postponed (2): cfg-negotiate begin {dox | dex->{dox/1,1}}
+ %.*
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin dex/1.0.0
+ %.*
+ trace: collect_build: add bar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency bar/1.0.0 of dependent dex/1.0.0
+ trace: postponed_configurations::add: create {dex | bar->{dex/1,1}}
+ trace: collect_build_prerequisites: postpone dex/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent dox/1.0.0
+ trace: collect_build_prerequisites: resume dox/1.0.0
+ trace: collect_build_prerequisites: end dox/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {dox | dex->{dox/1,1}}!
+ trace: collect_build_postponed (3): begin {diz | libbox->{diz/2,1}}
+ %.*
+ trace: collect_build_postponed (3): cfg-negotiate begin {diz | libbox->{diz/2,1}}
+ %.*
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbox/1.0.0
+ trace: collect_build_prerequisites: end libbox/1.0.0
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent diz/1.0.0
+ trace: collect_build_prerequisites: resume diz/1.0.0
+ %.*
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent diz/1.0.0
+ trace: postponed_configurations::add: create {diz | libbar->{diz/3,1}}
+ trace: collect_build_prerequisites: postpone diz/1.0.0
+ trace: collect_build_postponed (3): cfg-negotiate end {diz | libbox->{diz/2,1}}!
+ trace: collect_build_postponed (4): begin {dex | bar->{dex/1,1}}
+ %.*
+ trace: collect_build_postponed (4): cfg-negotiate begin {dex | bar->{dex/1,1}}
+ %.*
+ trace: collect_build_postponed (4): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin bar/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bar/1.0.0
+ trace: postponed_configurations::add: add {bar 1,1: libbar} to {diz | libbar->{diz/3,1}}
+ trace: collect_build_prerequisites: postpone bar/1.0.0
+ trace: collect_build_postponed (4): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (4): select cfg-negotiated dependency alternative for dependent dex/1.0.0
+ trace: collect_build_prerequisites: resume dex/1.0.0
+ %.*
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent dex/1.0.0
+ trace: postponed_configurations::add: create {dex | libfoo->{dex/2,1}}
+ trace: collect_build_prerequisites: postpone dex/1.0.0
+ trace: collect_build_postponed (4): cfg-negotiate end {dex | bar->{dex/1,1}}!
+ trace: collect_build_postponed (5): begin {bar diz | libbar->{bar/1,1 diz/3,1}}
+ %.*
+ trace: collect_build_postponed (5): cfg-negotiate begin {bar diz | libbar->{bar/1,1 diz/3,1}}
+ %.*
+ trace: collect_build_postponed (5): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbar/1.0.0
+ trace: collect_build_prerequisites: end libbar/1.0.0
+ trace: collect_build_postponed (5): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (5): select cfg-negotiated dependency alternative for dependent bar/1.0.0
+ trace: collect_build_prerequisites: resume bar/1.0.0
+ trace: collect_build_prerequisites: end bar/1.0.0
+ trace: collect_build_postponed (5): select cfg-negotiated dependency alternative for dependent diz/1.0.0
+ trace: collect_build_prerequisites: resume diz/1.0.0
+ trace: collect_build_prerequisites: end diz/1.0.0
+ trace: collect_build_postponed (5): cfg-negotiate end {bar diz | libbar->{bar/1,1 diz/3,1}}!
+ trace: collect_build_postponed (6): begin {dex | libfoo->{dex/2,1}}
+ %.*
+ trace: collect_build_postponed (6): cfg-negotiate begin {dex | libfoo->{dex/2,1}}
+ %.*
+ trace: collect_build_postponed (6): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libfoo/1.0.0
+ trace: collect_build_prerequisites: end libfoo/1.0.0
+ trace: collect_build_postponed (6): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (6): select cfg-negotiated dependency alternative for dependent dex/1.0.0
+ trace: collect_build_prerequisites: resume dex/1.0.0
+ trace: collect_build_prerequisites: end dex/1.0.0
+ trace: collect_build_postponed (6): cfg-negotiate end {dex | libfoo->{dex/2,1}}!
+ trace: collect_build_postponed (6): end {dex | libfoo->{dex/2,1}}
+ trace: collect_build_postponed (5): end {bar diz | libbar->{bar/1,1 diz/3,1}}
+ trace: collect_build_postponed (4): end {dex | bar->{dex/1,1}}
+ trace: collect_build_postponed (3): end {diz | libbox->{diz/2,1}}
+ trace: collect_build_postponed (2): end {dox | dex->{dox/1,1}}
+ trace: collect_build_postponed (1): end {diz | dox->{diz/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !diz configured 1.0.0
+ dox configured 1.0.0
+ dex configured 1.0.0
+ bar configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured 1.0.0
+ libbar configured 1.0.0
+ libbox configured 1.0.0
+ EOO
+
+ $pkg_drop diz
+ }
+
+ : depends-depends-conflict
+ :
+ {
+ $clone_cfg;
+
+ # Dependencies:
+ #
+ # bar: depends: libbar(c)
+ #
+ # dex: depends: bar(c)
+ # depends: libfoo(c)
+ #
+ # buc: depends: libfoo(c)
+ # depends: bux(c)
+ #
+ # bux: depends: libbar(c)
+ #
+ $* bar dex buc bux 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ %.*
+ trace: collect_build: add bar/1.0.0
+ trace: collect_build: add dex/1.0.0
+ trace: collect_build: add buc/1.0.0
+ trace: collect_build: add bux/1.0.0
+ trace: collect_build_prerequisites: begin bar/1.0.0
+ %.*
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bar/1.0.0
+ trace: postponed_configurations::add: create {bar | libbar->{bar/1,1}}
+ trace: collect_build_prerequisites: postpone bar/1.0.0
+ trace: collect_build_prerequisites: begin dex/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cannot cfg-postpone dependency bar/1.0.0 of dependent dex/1.0.0 (collected prematurely), throwing postpone_dependency
+ trace: pkg_build: collection failed due to prematurely collected dependency (bar), retry from scratch
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ %.*
+ trace: collect_build: add bar/1.0.0
+ trace: collect_build: add dex/1.0.0
+ trace: collect_build: add buc/1.0.0
+ trace: collect_build: add bux/1.0.0
+ trace: pkg_build: dep-postpone user-specified bar
+ trace: collect_build_prerequisites: begin dex/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency bar/1.0.0 of dependent dex/1.0.0
+ trace: postponed_configurations::add: create {dex | bar->{dex/1,1}}
+ trace: collect_build_prerequisites: postpone dex/1.0.0
+ trace: collect_build_prerequisites: begin buc/1.0.0
+ %.*
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent buc/1.0.0
+ trace: postponed_configurations::add: create {buc | libfoo->{buc/1,1}}
+ trace: collect_build_prerequisites: postpone buc/1.0.0
+ trace: collect_build_prerequisites: begin bux/1.0.0
+ %.*
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bux/1.0.0
+ trace: postponed_configurations::add: create {bux | libbar->{bux/1,1}}
+ trace: collect_build_prerequisites: postpone bux/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {dex | bar->{dex/1,1}}
+ %.*
+ trace: collect_build_postponed (1): cfg-negotiate begin {dex | bar->{dex/1,1}}
+ %.*
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin bar/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bar/1.0.0
+ trace: postponed_configurations::add: add {bar 1,1: libbar} to {bux | libbar->{bux/1,1}}
+ trace: collect_build_prerequisites: postpone bar/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent dex/1.0.0
+ trace: collect_build_prerequisites: resume dex/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent dex/1.0.0
+ trace: postponed_configurations::add: add {dex 2,1: libfoo} to {buc | libfoo->{buc/1,1}}
+ trace: collect_build_prerequisites: postpone dex/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {dex | bar->{dex/1,1}}!
+ trace: collect_build_postponed (2): begin {buc dex | libfoo->{buc/1,1 dex/2,1}}
+ %.*
+ trace: collect_build_postponed (2): cfg-negotiate begin {buc dex | libfoo->{buc/1,1 dex/2,1}}
+ %.*
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libfoo/1.0.0
+ trace: collect_build_prerequisites: end libfoo/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent buc/1.0.0
+ trace: collect_build_prerequisites: resume buc/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cannot cfg-postpone dependency bux/1.0.0 of dependent buc/1.0.0 (collected prematurely), throwing postpone_dependency
+ trace: pkg_build: collection failed due to prematurely collected dependency (bux), retry from scratch
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ %.*
+ trace: collect_build: add bar/1.0.0
+ trace: collect_build: add dex/1.0.0
+ trace: collect_build: add buc/1.0.0
+ trace: collect_build: add bux/1.0.0
+ trace: pkg_build: dep-postpone user-specified bar
+ trace: collect_build_prerequisites: begin dex/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency bar/1.0.0 of dependent dex/1.0.0
+ trace: postponed_configurations::add: create {dex | bar->{dex/1,1}}
+ trace: collect_build_prerequisites: postpone dex/1.0.0
+ trace: collect_build_prerequisites: begin buc/1.0.0
+ %.*
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent buc/1.0.0
+ trace: postponed_configurations::add: create {buc | libfoo->{buc/1,1}}
+ trace: collect_build_prerequisites: postpone buc/1.0.0
+ trace: pkg_build: dep-postpone user-specified bux
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {dex | bar->{dex/1,1}}
+ %.*
+ trace: collect_build_postponed (1): cfg-negotiate begin {dex | bar->{dex/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin bar/1.0.0
+ %.*
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bar/1.0.0
+ trace: postponed_configurations::add: create {bar | libbar->{bar/1,1}}
+ trace: collect_build_prerequisites: postpone bar/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent dex/1.0.0
+ trace: collect_build_prerequisites: resume dex/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent dex/1.0.0
+ trace: postponed_configurations::add: add {dex 2,1: libfoo} to {buc | libfoo->{buc/1,1}}
+ trace: collect_build_prerequisites: postpone dex/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {dex | bar->{dex/1,1}}!
+ trace: collect_build_postponed (2): begin {buc dex | libfoo->{buc/1,1 dex/2,1}}
+ %.*
+ trace: collect_build_postponed (2): cfg-negotiate begin {buc dex | libfoo->{buc/1,1 dex/2,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libfoo/1.0.0
+ trace: collect_build_prerequisites: end libfoo/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent buc/1.0.0
+ trace: collect_build_prerequisites: resume buc/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency bux/1.0.0 of dependent buc/1.0.0
+ trace: postponed_configurations::add: create {buc | bux->{buc/2,1}}
+ trace: collect_build_prerequisites: postpone buc/1.0.0
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent dex/1.0.0
+ trace: collect_build_prerequisites: resume dex/1.0.0
+ trace: collect_build_prerequisites: end dex/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {buc dex | libfoo->{buc/1,1 dex/2,1}}!
+ trace: collect_build_postponed (3): begin {bar | libbar->{bar/1,1}}
+ %.*
+ trace: collect_build_postponed (3): cfg-negotiate begin {bar | libbar->{bar/1,1}}
+ %.*
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbar/1.0.0
+ trace: collect_build_prerequisites: end libbar/1.0.0
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent bar/1.0.0
+ trace: collect_build_prerequisites: resume bar/1.0.0
+ trace: collect_build_prerequisites: end bar/1.0.0
+ trace: collect_build_postponed (3): cfg-negotiate end {bar | libbar->{bar/1,1}}!
+ trace: collect_build_postponed (4): begin {buc | bux->{buc/2,1}}
+ %.*
+ trace: collect_build_postponed (4): cfg-negotiate begin {buc | bux->{buc/2,1}}
+ %.*
+ trace: collect_build_postponed (4): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin bux/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bux/1.0.0
+ trace: postponed_configurations::add: add {bux 1,1: libbar} to {bar | libbar->{bar/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent bux/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent bux/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end bux/1.0.0
+ trace: collect_build_postponed (4): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (4): select cfg-negotiated dependency alternative for dependent buc/1.0.0
+ trace: collect_build_prerequisites: resume buc/1.0.0
+ trace: collect_build_prerequisites: end buc/1.0.0
+ trace: collect_build_postponed (4): cfg-negotiate end {buc | bux->{buc/2,1}}!
+ trace: collect_build_postponed (4): end {buc | bux->{buc/2,1}}
+ trace: collect_build_postponed (3): end {bar | libbar->{bar/1,1}}
+ trace: collect_build_postponed (2): end {buc dex | libfoo->{buc/1,1 dex/2,1}}
+ trace: collect_build_postponed (1): end {dex | bar->{dex/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ build plan:
+ new libfoo/1.0.0 (required by buc, dex)
+ config.libfoo.extras=true (set by buc)
+ new libbar/1.0.0 (required by bar, bux)
+ config.libbar.extras=true (set by bar)
+ new bar/1.0.0
+ config.bar.extras=true (set by dex)
+ new dex/1.0.0
+ new bux/1.0.0
+ config.bux.extras=true (set by buc)
+ new buc/1.0.0
+ trace: execute_plan: simulate: no
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !bar configured 1.0.0
+ libbar configured 1.0.0
+ !dex configured 1.0.0
+ !bar configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured 1.0.0
+ !bux configured 1.0.0
+ libbar configured 1.0.0
+ !buc configured 1.0.0
+ !bux configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured 1.0.0
+ EOO
+
+ $pkg_drop bar dex buc bux
+ }
+
+ : package-depends-conflict
+ :
+ {
+ $clone_cfg;
+
+ # Dependencies:
+ #
+ # baz: depends: {libbar libfoo} (c)
+ #
+ # bas: depends: libbar(c)
+ # depends: bus (c)
+ #
+ # bus: depends: libbaz(c)
+ # depends: foo (c)
+ #
+ # foo: depends: libfoo(c)
+ #
+ # bat: depends: libbaz(c)
+ #
+ $* baz bas bus foo bat 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ %.*
+ trace: collect_build: add baz/1.0.0
+ trace: collect_build: add bas/1.0.0
+ trace: collect_build: add bus/1.0.0
+ trace: collect_build: add foo/1.0.0
+ trace: collect_build: add bat/1.0.0
+ trace: collect_build_prerequisites: begin baz/1.0.0
+ %.*
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent baz/1.0.0
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent baz/1.0.0
+ trace: postponed_configurations::add: create {baz | libbar->{baz/1,1} libfoo->{baz/1,1}}
+ trace: collect_build_prerequisites: postpone baz/1.0.0
+ trace: collect_build_prerequisites: begin bas/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bas/1.0.0
+ trace: postponed_configurations::add: add {bas 1,1: libbar} to {baz | libbar->{baz/1,1} libfoo->{baz/1,1}}
+ trace: collect_build_prerequisites: postpone bas/1.0.0
+ trace: collect_build_prerequisites: begin bus/1.0.0
+ %.*
+ trace: collect_build: add libbaz/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbaz/1.0.0 of dependent bus/1.0.0
+ trace: postponed_configurations::add: create {bus | libbaz->{bus/1,1}}
+ trace: collect_build_prerequisites: postpone bus/1.0.0
+ trace: collect_build_prerequisites: begin foo/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent foo/1.0.0
+ trace: postponed_configurations::add: add {foo 1,1: libfoo} to {bas baz | libbar->{bas/1,1 baz/1,1} libfoo->{baz/1,1}}
+ trace: collect_build_prerequisites: postpone foo/1.0.0
+ trace: collect_build_prerequisites: begin bat/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency libbaz/1.0.0 of dependent bat/1.0.0
+ trace: postponed_configurations::add: add {bat 1,1: libbaz} to {bus | libbaz->{bus/1,1}}
+ trace: collect_build_prerequisites: postpone bat/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {bas baz foo | libbar->{bas/1,1 baz/1,1} libfoo->{baz/1,1 foo/1,1}}
+ %.*
+ trace: collect_build_postponed (1): cfg-negotiate begin {bas baz foo | libbar->{bas/1,1 baz/1,1} libfoo->{baz/1,1 foo/1,1}}
+ %.*
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbar/1.0.0
+ trace: collect_build_prerequisites: end libbar/1.0.0
+ trace: collect_build_prerequisites: begin libfoo/1.0.0
+ trace: collect_build_prerequisites: end libfoo/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent bas/1.0.0
+ trace: collect_build_prerequisites: resume bas/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cannot cfg-postpone dependency bus/1.0.0 of dependent bas/1.0.0 (collected prematurely), throwing postpone_dependency
+ trace: pkg_build: collection failed due to prematurely collected dependency (bus), retry from scratch
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ %.*
+ trace: collect_build: add baz/1.0.0
+ trace: collect_build: add bas/1.0.0
+ trace: collect_build: add bus/1.0.0
+ trace: collect_build: add foo/1.0.0
+ trace: collect_build: add bat/1.0.0
+ trace: collect_build_prerequisites: begin baz/1.0.0
+ %.*
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent baz/1.0.0
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent baz/1.0.0
+ trace: postponed_configurations::add: create {baz | libbar->{baz/1,1} libfoo->{baz/1,1}}
+ trace: collect_build_prerequisites: postpone baz/1.0.0
+ trace: collect_build_prerequisites: begin bas/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bas/1.0.0
+ trace: postponed_configurations::add: add {bas 1,1: libbar} to {baz | libbar->{baz/1,1} libfoo->{baz/1,1}}
+ trace: collect_build_prerequisites: postpone bas/1.0.0
+ trace: pkg_build: dep-postpone user-specified bus
+ trace: collect_build_prerequisites: begin foo/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent foo/1.0.0
+ trace: postponed_configurations::add: add {foo 1,1: libfoo} to {bas baz | libbar->{bas/1,1 baz/1,1} libfoo->{baz/1,1}}
+ trace: collect_build_prerequisites: postpone foo/1.0.0
+ trace: collect_build_prerequisites: begin bat/1.0.0
+ %.*
+ trace: collect_build: add libbaz/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbaz/1.0.0 of dependent bat/1.0.0
+ trace: postponed_configurations::add: create {bat | libbaz->{bat/1,1}}
+ trace: collect_build_prerequisites: postpone bat/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {bas baz foo | libbar->{bas/1,1 baz/1,1} libfoo->{baz/1,1 foo/1,1}}
+ %.*
+ trace: collect_build_postponed (1): cfg-negotiate begin {bas baz foo | libbar->{bas/1,1 baz/1,1} libfoo->{baz/1,1 foo/1,1}}
+ %.*
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbar/1.0.0
+ trace: collect_build_prerequisites: end libbar/1.0.0
+ trace: collect_build_prerequisites: begin libfoo/1.0.0
+ trace: collect_build_prerequisites: end libfoo/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent bas/1.0.0
+ trace: collect_build_prerequisites: resume bas/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency bus/1.0.0 of dependent bas/1.0.0
+ trace: postponed_configurations::add: create {bas | bus->{bas/2,1}}
+ trace: collect_build_prerequisites: postpone bas/1.0.0
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent baz/1.0.0
+ trace: collect_build_prerequisites: resume baz/1.0.0
+ trace: collect_build_prerequisites: end baz/1.0.0
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent foo/1.0.0
+ trace: collect_build_prerequisites: resume foo/1.0.0
+ trace: collect_build_prerequisites: end foo/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {bas baz foo | libbar->{bas/1,1 baz/1,1} libfoo->{baz/1,1 foo/1,1}}!
+ trace: collect_build_postponed (2): begin {bat | libbaz->{bat/1,1}}
+ %.*
+ trace: collect_build_postponed (2): cfg-negotiate begin {bat | libbaz->{bat/1,1}}
+ %.*
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbaz/1.0.0
+ trace: collect_build_prerequisites: end libbaz/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent bat/1.0.0
+ trace: collect_build_prerequisites: resume bat/1.0.0
+ trace: collect_build_prerequisites: end bat/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {bat | libbaz->{bat/1,1}}!
+ trace: collect_build_postponed (3): begin {bas | bus->{bas/2,1}}
+ %.*
+ trace: collect_build_postponed (3): cfg-negotiate begin {bas | bus->{bas/2,1}}
+ %.*
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin bus/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency libbaz/1.0.0 of dependent bus/1.0.0
+ trace: postponed_configurations::add: add {bus 1,1: libbaz} to {bat | libbaz->{bat/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent bus/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbaz/1.0.0 of dependent bus/1.0.0 is already (being) recursively collected, skipping
+ %.*
+ trace: collect_build_prerequisites: cannot cfg-postpone dependency foo/1.0.0 of dependent bus/1.0.0 (collected prematurely), throwing postpone_dependency
+ trace: pkg_build: collection failed due to prematurely collected dependency (foo), retry from scratch
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ %.*
+ trace: collect_build: add baz/1.0.0
+ trace: collect_build: add bas/1.0.0
+ trace: collect_build: add bus/1.0.0
+ trace: collect_build: add foo/1.0.0
+ trace: collect_build: add bat/1.0.0
+ trace: collect_build_prerequisites: begin baz/1.0.0
+ %.*
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent baz/1.0.0
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent baz/1.0.0
+ trace: postponed_configurations::add: create {baz | libbar->{baz/1,1} libfoo->{baz/1,1}}
+ trace: collect_build_prerequisites: postpone baz/1.0.0
+ trace: collect_build_prerequisites: begin bas/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bas/1.0.0
+ trace: postponed_configurations::add: add {bas 1,1: libbar} to {baz | libbar->{baz/1,1} libfoo->{baz/1,1}}
+ trace: collect_build_prerequisites: postpone bas/1.0.0
+ trace: pkg_build: dep-postpone user-specified bus
+ trace: pkg_build: dep-postpone user-specified foo
+ trace: collect_build_prerequisites: begin bat/1.0.0
+ %.*
+ trace: collect_build: add libbaz/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbaz/1.0.0 of dependent bat/1.0.0
+ trace: postponed_configurations::add: create {bat | libbaz->{bat/1,1}}
+ trace: collect_build_prerequisites: postpone bat/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {bas baz | libbar->{bas/1,1 baz/1,1} libfoo->{baz/1,1}}
+ %.*
+ trace: collect_build_postponed (1): cfg-negotiate begin {bas baz | libbar->{bas/1,1 baz/1,1} libfoo->{baz/1,1}}
+ %.*
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbar/1.0.0
+ trace: collect_build_prerequisites: end libbar/1.0.0
+ trace: collect_build_prerequisites: begin libfoo/1.0.0
+ trace: collect_build_prerequisites: end libfoo/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent bas/1.0.0
+ trace: collect_build_prerequisites: resume bas/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency bus/1.0.0 of dependent bas/1.0.0
+ trace: postponed_configurations::add: create {bas | bus->{bas/2,1}}
+ trace: collect_build_prerequisites: postpone bas/1.0.0
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent baz/1.0.0
+ trace: collect_build_prerequisites: resume baz/1.0.0
+ trace: collect_build_prerequisites: end baz/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {bas baz | libbar->{bas/1,1 baz/1,1} libfoo->{baz/1,1}}!
+ trace: collect_build_postponed (2): begin {bat | libbaz->{bat/1,1}}
+ %.*
+ trace: collect_build_postponed (2): cfg-negotiate begin {bat | libbaz->{bat/1,1}}
+ %.*
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbaz/1.0.0
+ trace: collect_build_prerequisites: end libbaz/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent bat/1.0.0
+ trace: collect_build_prerequisites: resume bat/1.0.0
+ trace: collect_build_prerequisites: end bat/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {bat | libbaz->{bat/1,1}}!
+ trace: collect_build_postponed (3): begin {bas | bus->{bas/2,1}}
+ %.*
+ trace: collect_build_postponed (3): cfg-negotiate begin {bas | bus->{bas/2,1}}
+ %.*
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin bus/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency libbaz/1.0.0 of dependent bus/1.0.0
+ trace: postponed_configurations::add: add {bus 1,1: libbaz} to {bat | libbaz->{bat/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent bus/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbaz/1.0.0 of dependent bus/1.0.0 is already (being) recursively collected, skipping
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency foo/1.0.0 of dependent bus/1.0.0
+ trace: postponed_configurations::add: create {bus | foo->{bus/2,1}}
+ trace: collect_build_prerequisites: postpone bus/1.0.0
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent bas/1.0.0
+ trace: collect_build_prerequisites: resume bas/1.0.0
+ trace: collect_build_prerequisites: end bas/1.0.0
+ trace: collect_build_postponed (3): cfg-negotiate end {bas | bus->{bas/2,1}}!
+ trace: collect_build_postponed (4): begin {bus | foo->{bus/2,1}}
+ %.*
+ trace: collect_build_postponed (4): cfg-negotiate begin {bus | foo->{bus/2,1}}
+ %.*
+ trace: collect_build_postponed (4): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin foo/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent foo/1.0.0
+ trace: postponed_configurations::add: add {foo 1,1: libfoo} to {bas baz | libbar->{bas/1,1 baz/1,1} libfoo->{baz/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent foo/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libfoo/1.0.0 of dependent foo/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end foo/1.0.0
+ trace: collect_build_postponed (4): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (4): select cfg-negotiated dependency alternative for dependent bus/1.0.0
+ trace: collect_build_prerequisites: resume bus/1.0.0
+ trace: collect_build_prerequisites: end bus/1.0.0
+ trace: collect_build_postponed (4): cfg-negotiate end {bus | foo->{bus/2,1}}!
+ trace: collect_build_postponed (4): end {bus | foo->{bus/2,1}}
+ trace: collect_build_postponed (3): end {bas | bus->{bas/2,1}}
+ trace: collect_build_postponed (2): end {bat | libbaz->{bat/1,1}}
+ trace: collect_build_postponed (1): end {bas baz | libbar->{bas/1,1 baz/1,1} libfoo->{baz/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !baz configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured 1.0.0
+ !foo configured 1.0.0
+ libfoo configured 1.0.0
+ !bus configured 1.0.0
+ !foo configured 1.0.0
+ libfoo configured 1.0.0
+ libbaz configured 1.0.0
+ !bas configured 1.0.0
+ !bus configured 1.0.0
+ !foo configured 1.0.0
+ libfoo configured 1.0.0
+ libbaz configured 1.0.0
+ libbar configured 1.0.0
+ !bat configured 1.0.0
+ libbaz configured 1.0.0
+ EOO
+
+ $pkg_drop baz bas bus foo bat
+ }
+
+ : existing
+ :
+ {
+ +$clone_cfg
+
+ : negotiate
+ :
+ {
+ $clone_cfg;
+
+ # Dependencies:
+ #
+ # bar: depends: libbar(c)
+ #
+ # dex: depends: bar(c)
+ # depends: libfoo(c)
+ #
+ # dox: dex(c)
+ #
+ # dix: depends: libbar(c)
+ # depends: libbox(c) # causes postponement and initial cluster finished negotiating
+ # depends: dox(c)
+ #
+ $* dex --verbose 1 2>!;
+
+ $* dix 2>&1 | $filter 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add dix/1.0.0
+ trace: collect_build_prerequisites: begin dix/1.0.0
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent dix/1.0.0
+ trace: postponed_configurations::add: create {dix | libbar->{dix/1,1}}
+ trace: collect_build_prerequisites: postpone dix/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {dix | libbar->{dix/1,1}}
+ trace: collect_build_prerequisites: pre-reeval bar/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bar/1.0.0: 1,1
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {dix | libbar->{dix/1,1}}
+ trace: collect_build: add bar/1.0.0
+ trace: collect_build_prerequisites: reeval bar/1.0.0
+ trace: postponed_configurations::add: add {bar^ 1,1: libbar} to {dix | libbar->{dix/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent bar/1.0.0 results in {bar^ dix | libbar->{bar/1,1 dix/1,1}}
+ trace: collect_build_prerequisites: re-evaluated bar/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {bar^ dix | libbar->{bar/1,1 dix/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbar/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent bar/1.0.0
+ trace: collect_build_prerequisites: resume bar/1.0.0
+ trace: collect_build_prerequisites: end bar/1.0.0
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent dix/1.0.0
+ trace: collect_build_prerequisites: resume dix/1.0.0
+ trace: collect_build: add libbox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent dix/1.0.0
+ trace: postponed_configurations::add: create {dix | libbox->{dix/2,1}}
+ trace: collect_build_prerequisites: postpone dix/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {bar^ dix | libbar->{bar/1,1 dix/1,1}}!
+ trace: collect_build_postponed (2): begin {dix | libbox->{dix/2,1}}
+ trace: collect_build_postponed (2): cfg-negotiate begin {dix | libbox->{dix/2,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbox/1.0.0
+ trace: collect_build_prerequisites: end libbox/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent dix/1.0.0
+ trace: collect_build_prerequisites: resume dix/1.0.0
+ trace: collect_build: add dox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency dox/1.0.0 of dependent dix/1.0.0
+ trace: postponed_configurations::add: create {dix | dox->{dix/3,1}}
+ trace: collect_build_prerequisites: postpone dix/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {dix | libbox->{dix/2,1}}!
+ trace: collect_build_postponed (3): begin {dix | dox->{dix/3,1}}
+ trace: collect_build_postponed (3): cfg-negotiate begin {dix | dox->{dix/3,1}}
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin dox/1.0.0
+ trace: collect_build: add dex/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency dex/1.0.0 of dependent dox/1.0.0
+ trace: postponed_configurations::add: create {dox | dex->{dox/1,1}}
+ trace: collect_build_prerequisites: postpone dox/1.0.0
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent dix/1.0.0
+ trace: collect_build_prerequisites: resume dix/1.0.0
+ trace: collect_build_prerequisites: end dix/1.0.0
+ trace: collect_build_postponed (3): cfg-negotiate end {dix | dox->{dix/3,1}}!
+ trace: collect_build_postponed (4): begin {dox | dex->{dox/1,1}}
+ trace: collect_build_postponed (4): cfg-negotiate begin {dox | dex->{dox/1,1}}
+ trace: collect_build_postponed (4): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin dex/1.0.0
+ trace: collect_build_prerequisites: cannot cfg-postpone dependency bar/1.0.0 of dependent dex/1.0.0 (collected prematurely), throwing postpone_dependency
+ trace: pkg_build: collection failed due to prematurely collected dependency (bar), retry from scratch
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add dix/1.0.0
+ trace: collect_build_prerequisites: begin dix/1.0.0
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent dix/1.0.0
+ trace: postponed_configurations::add: create {dix | libbar->{dix/1,1}}
+ trace: collect_build_prerequisites: postpone dix/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {dix | libbar->{dix/1,1}}
+ trace: collect_build_prerequisites: pre-reeval bar/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bar/1.0.0: 1,1
+ trace: collect_build_postponed (1): skip dep-postponed existing dependent bar of dependency libbar
+ trace: collect_build: add bar/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {dix | libbar->{dix/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbar/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent dix/1.0.0
+ trace: collect_build_prerequisites: resume dix/1.0.0
+ trace: collect_build: add libbox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent dix/1.0.0
+ trace: postponed_configurations::add: create {dix | libbox->{dix/2,1}}
+ trace: collect_build_prerequisites: postpone dix/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {dix | libbar->{dix/1,1}}!
+ trace: collect_build_postponed (2): begin {dix | libbox->{dix/2,1}}
+ trace: collect_build_postponed (2): cfg-negotiate begin {dix | libbox->{dix/2,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbox/1.0.0
+ trace: collect_build_prerequisites: end libbox/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent dix/1.0.0
+ trace: collect_build_prerequisites: resume dix/1.0.0
+ trace: collect_build: add dox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency dox/1.0.0 of dependent dix/1.0.0
+ trace: postponed_configurations::add: create {dix | dox->{dix/3,1}}
+ trace: collect_build_prerequisites: postpone dix/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {dix | libbox->{dix/2,1}}!
+ trace: collect_build_postponed (3): begin {dix | dox->{dix/3,1}}
+ trace: collect_build_postponed (3): cfg-negotiate begin {dix | dox->{dix/3,1}}
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin dox/1.0.0
+ trace: collect_build: add dex/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency dex/1.0.0 of dependent dox/1.0.0
+ trace: postponed_configurations::add: create {dox | dex->{dox/1,1}}
+ trace: collect_build_prerequisites: postpone dox/1.0.0
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent dix/1.0.0
+ trace: collect_build_prerequisites: resume dix/1.0.0
+ trace: collect_build_prerequisites: end dix/1.0.0
+ trace: collect_build_postponed (3): cfg-negotiate end {dix | dox->{dix/3,1}}!
+ trace: collect_build_postponed (4): begin {dox | dex->{dox/1,1}}
+ trace: collect_build_postponed (4): cfg-negotiate begin {dox | dex->{dox/1,1}}
+ trace: collect_build_postponed (4): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin dex/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency bar/1.0.0 of dependent dex/1.0.0
+ trace: postponed_configurations::add: create {dex | bar->{dex/1,1}}
+ trace: collect_build_prerequisites: postpone dex/1.0.0
+ trace: collect_build_postponed (4): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (4): select cfg-negotiated dependency alternative for dependent dox/1.0.0
+ trace: collect_build_prerequisites: resume dox/1.0.0
+ trace: collect_build_prerequisites: end dox/1.0.0
+ trace: collect_build_postponed (4): cfg-negotiate end {dox | dex->{dox/1,1}}!
+ trace: collect_build_postponed (5): begin {dex | bar->{dex/1,1}}
+ trace: collect_build_postponed (5): skip being built existing dependent dex of dependency bar
+ trace: collect_build_postponed (5): cfg-negotiate begin {dex | bar->{dex/1,1}}
+ trace: collect_build_postponed (5): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin bar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bar/1.0.0
+ trace: postponed_configurations::add: add {bar 1,1: libbar} to {dix | libbar->{dix/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent bar/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent bar/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end bar/1.0.0
+ trace: collect_build_postponed (5): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (5): select cfg-negotiated dependency alternative for dependent dex/1.0.0
+ trace: collect_build_prerequisites: resume dex/1.0.0
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent dex/1.0.0
+ trace: postponed_configurations::add: create {dex | libfoo->{dex/2,1}}
+ trace: collect_build_prerequisites: postpone dex/1.0.0
+ trace: collect_build_postponed (5): cfg-negotiate end {dex | bar->{dex/1,1}}!
+ trace: collect_build_postponed (6): begin {dex | libfoo->{dex/2,1}}
+ trace: collect_build_postponed (6): skip being built existing dependent dex of dependency libfoo
+ trace: collect_build_postponed (6): cfg-negotiate begin {dex | libfoo->{dex/2,1}}
+ trace: collect_build_postponed (6): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libfoo/1.0.0
+ trace: collect_build_postponed (6): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (6): select cfg-negotiated dependency alternative for dependent dex/1.0.0
+ trace: collect_build_prerequisites: resume dex/1.0.0
+ trace: collect_build_prerequisites: end dex/1.0.0
+ trace: collect_build_postponed (6): cfg-negotiate end {dex | libfoo->{dex/2,1}}!
+ trace: collect_build_postponed (6): end {dex | libfoo->{dex/2,1}}
+ trace: collect_build_postponed (5): end {dex | bar->{dex/1,1}}
+ trace: collect_build_postponed (4): end {dox | dex->{dox/1,1}}
+ trace: collect_build_postponed (3): end {dix | dox->{dix/3,1}}
+ trace: collect_build_postponed (2): end {dix | libbox->{dix/2,1}}
+ trace: collect_build_postponed (1): end {dix | libbar->{dix/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ build plan:
+ new libbox/1.0.0 (required by dix)
+ config.libbox.extras=true (set by dix)
+ new dox/1.0.0 (required by dix)
+ config.dox.extras=true (set by dix)
+ new dix/1.0.0
+ trace: execute_plan: simulate: no
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !dex configured 1.0.0
+ bar configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured 1.0.0
+ !dix configured 1.0.0
+ dox configured 1.0.0
+ !dex configured 1.0.0
+ bar configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured 1.0.0
+ libbar configured 1.0.0
+ libbox configured 1.0.0
+ EOO
+
+ # @@ Note that if --drop-dependent is unspecified, the following
+ # command fails with:
+ #
+ # following dependent packages will have to be dropped as well:
+ # dox (requires dex)
+ # error: refusing to drop dependent packages with just --yes
+ # info: specify --drop-dependent to confirm
+ #
+ # Feels wrong.
+ #
+ $pkg_drop --drop-dependent dex dix
+ }
+ }
+ }
+ }
+
+ : unaccept-alternative
+ :
+ {
+ +$clone_cfg
+
+ : unacceptable
+ :
+ {
+ $clone_cfg;
+
+ $* foo fox/0.1.0 ?libbar 2>&1 | $filter 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add foo/1.0.0
+ trace: collect_build: add fox/0.1.0
+ trace: collect_build_prerequisites: begin foo/1.0.0
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent foo/1.0.0
+ trace: postponed_configurations::add: create {foo | libfoo->{foo/1,1}}
+ trace: collect_build_prerequisites: postpone foo/1.0.0
+ trace: collect_build_prerequisites: begin fox/0.1.0
+ trace: collect_build_prerequisites: alt-postpone dependent fox/0.1.0 since max index is reached: 0
+ info: dependency alternative: libfoo
+ {
+ prefer
+ {
+ config.libfoo.extras = true
+ }
+ accept (false)
+ }
+ trace: collect_build_prerequisites: postpone fox/0.1.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {foo | libfoo->{foo/1,1}}
+ trace: collect_build_postponed (1): cfg-negotiate begin {foo | libfoo->{foo/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libfoo/1.0.0
+ trace: collect_build_prerequisites: end libfoo/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent foo/1.0.0
+ trace: collect_build_prerequisites: resume foo/1.0.0
+ trace: collect_build_prerequisites: end foo/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {foo | libfoo->{foo/1,1}}!
+ trace: collect_build_postponed (1): index 1 collect alt-postponed fox/0.1.0
+ trace: collect_build_prerequisites: resume fox/0.1.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent fox/0.1.0
+ trace: postponed_configurations::add: add {fox 1,1: libfoo} to {foo | libfoo->{foo/1,1}}!
+ trace: collect_build_prerequisites: unable to cfg-negotiate dependency alternative 1,1 for dependent fox/0.1.0, throwing unaccept_alternative
+ trace: pkg_build: collection failed due to unacceptable alternative, retry from scratch
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add foo/1.0.0
+ trace: collect_build: add fox/0.1.0
+ trace: collect_build_prerequisites: begin foo/1.0.0
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent foo/1.0.0
+ trace: postponed_configurations::add: create {foo | libfoo->{foo/1,1}}
+ trace: collect_build_prerequisites: postpone foo/1.0.0
+ trace: collect_build_prerequisites: begin fox/0.1.0
+ trace: collect_build_prerequisites: dependency alternative 1,1 for dependent fox/0.1.0 is unacceptable, skipping
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/1.0.0 of dependent fox/0.1.0
+ trace: collect_build_prerequisites: begin libbar/1.0.0
+ trace: collect_build_prerequisites: end libbar/1.0.0
+ trace: collect_build_prerequisites: end fox/0.1.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {foo | libfoo->{foo/1,1}}
+ trace: collect_build_postponed (1): cfg-negotiate begin {foo | libfoo->{foo/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libfoo/1.0.0
+ trace: collect_build_prerequisites: end libfoo/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent foo/1.0.0
+ trace: collect_build_prerequisites: resume foo/1.0.0
+ trace: collect_build_prerequisites: end foo/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {foo | libfoo->{foo/1,1}}!
+ trace: collect_build_postponed (1): end {foo | libfoo->{foo/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: evaluate_dependency: libbar/1.0.0: unchanged
+ %.*
+ trace: evaluate_dependency: libbar/1.0.0: unchanged
+ %.*
+ build plan:
+ new libfoo/1.0.0 (required by foo)
+ config.libfoo.extras=true (set by foo)
+ new foo/1.0.0
+ config.foo.libfoo_extras=true (set by foo)
+ new libbar/1.0.0 (required by fox)
+ new fox/0.1.0
+ trace: execute_plan: simulate: no
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !foo configured 1.0.0
+ libfoo configured 1.0.0
+ !fox configured !0.1.0 available 1.0.0 0.2.0
+ libbar configured 1.0.0
+ EOO
+
+ $pkg_drop foo fox
+ }
+
+ : negotiation-cycle1
+ :
+ {
+ $clone_cfg;
+
+ $* foo fox/0.2.0 2>&1 | $filter 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add foo/1.0.0
+ trace: collect_build: add fox/0.2.0
+ trace: collect_build_prerequisites: begin foo/1.0.0
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent foo/1.0.0
+ trace: postponed_configurations::add: create {foo | libfoo->{foo/1,1}}
+ trace: collect_build_prerequisites: postpone foo/1.0.0
+ trace: collect_build_prerequisites: begin fox/0.2.0
+ trace: collect_build_prerequisites: alt-postpone dependent fox/0.2.0 since max index is reached: 0
+ info: dependency alternative: libfoo
+ {
+ prefer
+ {
+ config.libfoo.extras = false
+ }
+ accept (!$config.libfoo.extras)
+ }
+ trace: collect_build_prerequisites: postpone fox/0.2.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {foo | libfoo->{foo/1,1}}
+ trace: collect_build_postponed (1): cfg-negotiate begin {foo | libfoo->{foo/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libfoo/1.0.0
+ trace: collect_build_prerequisites: end libfoo/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent foo/1.0.0
+ trace: collect_build_prerequisites: resume foo/1.0.0
+ trace: collect_build_prerequisites: end foo/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {foo | libfoo->{foo/1,1}}!
+ trace: collect_build_postponed (1): index 1 collect alt-postponed fox/0.2.0
+ trace: collect_build_prerequisites: resume fox/0.2.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent fox/0.2.0
+ trace: postponed_configurations::add: add {fox 1,1: libfoo} to {foo | libfoo->{foo/1,1}}!
+ trace: collect_build_prerequisites: cfg-postponing dependent fox/0.2.0 involves (being) negotiated configurations and results in {foo fox | libfoo->{foo/1,1 fox/1,1}}!, throwing retry_configuration
+ trace: collect_build_postponed (0): cfg-negotiation of {foo | libfoo->{foo/1,1}} failed due to dependent fox, refining configuration
+ trace: collect_build_postponed (1): begin {foo | libfoo->{foo/1,1}}
+ trace: collect_build_postponed (1): cfg-negotiate begin {foo | libfoo->{foo/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libfoo/1.0.0
+ trace: collect_build_prerequisites: end libfoo/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent foo/1.0.0
+ trace: collect_build_prerequisites: resume foo/1.0.0
+ trace: collect_build_prerequisites: end foo/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {foo | libfoo->{foo/1,1}}!
+ trace: collect_build_postponed (1): index 1 collect alt-postponed fox/0.2.0
+ trace: collect_build_prerequisites: resume fox/0.2.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent fox/0.2.0
+ trace: postponed_configurations::add: add {fox 1,1: libfoo} to {foo | libfoo->{foo/1,1}}!
+ trace: collect_build_prerequisites: cfg-postponing dependent fox/0.2.0 involves (being) negotiated configurations and results in {foo fox | libfoo->{foo/1,1 fox/1,1}}!, throwing retry_configuration
+ trace: collect_build_postponed (0): cfg-negotiation of {foo | libfoo->{foo/1,1}} failed due to dependent fox, refining configuration
+ trace: collect_build_postponed (1): begin {foo | libfoo->{foo/1,1}}
+ trace: collect_build_postponed (1): cfg-negotiate begin {foo | libfoo->{foo/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libfoo/1.0.0
+ trace: collect_build_prerequisites: end libfoo/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent foo/1.0.0
+ trace: collect_build_prerequisites: resume foo/1.0.0
+ trace: collect_build_prerequisites: end foo/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {foo | libfoo->{foo/1,1}}!
+ trace: collect_build_postponed (1): index 1 collect alt-postponed fox/0.2.0
+ trace: collect_build_prerequisites: resume fox/0.2.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent fox/0.2.0
+ trace: postponed_configurations::add: add {fox 1,1: libfoo} to {foo | libfoo->{foo/1,1}}!
+ trace: collect_build_prerequisites: unable to cfg-negotiate dependency alternative 1,1 for dependent fox/0.2.0, throwing unaccept_alternative
+ trace: pkg_build: collection failed due to unacceptable alternative, retry from scratch
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add foo/1.0.0
+ trace: collect_build: add fox/0.2.0
+ trace: collect_build_prerequisites: begin foo/1.0.0
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent foo/1.0.0
+ trace: postponed_configurations::add: create {foo | libfoo->{foo/1,1}}
+ trace: collect_build_prerequisites: postpone foo/1.0.0
+ trace: collect_build_prerequisites: begin fox/0.2.0
+ trace: collect_build_prerequisites: dependency alternative 1,1 for dependent fox/0.2.0 is unacceptable, skipping
+ trace: collect_build_prerequisites: alt-postpone dependent fox/0.2.0 since max index is reached: 1
+ info: dependency alternative: libfoo
+ {
+ prefer
+ {
+ config.libfoo.extras = true
+ }
+ accept (true)
+ }
+ trace: collect_build_prerequisites: postpone fox/0.2.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {foo | libfoo->{foo/1,1}}
+ trace: collect_build_postponed (1): cfg-negotiate begin {foo | libfoo->{foo/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libfoo/1.0.0
+ trace: collect_build_prerequisites: end libfoo/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent foo/1.0.0
+ trace: collect_build_prerequisites: resume foo/1.0.0
+ trace: collect_build_prerequisites: end foo/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {foo | libfoo->{foo/1,1}}!
+ trace: collect_build_postponed (1): index 1 collect alt-postponed fox/0.2.0
+ trace: collect_build_prerequisites: resume fox/0.2.0
+ trace: collect_build_prerequisites: dependency alternative 1,1 for dependent fox/0.2.0 is unacceptable, skipping
+ trace: collect_build_prerequisites: alt-postpone dependent fox/0.2.0 since max index is reached: 1
+ info: dependency alternative: libfoo
+ {
+ prefer
+ {
+ config.libfoo.extras = true
+ }
+ accept (true)
+ }
+ trace: collect_build_prerequisites: postpone fox/0.2.0
+ trace: collect_build_postponed (1): index 2 collect alt-postponed fox/0.2.0
+ trace: collect_build_prerequisites: resume fox/0.2.0
+ trace: collect_build_prerequisites: dependency alternative 1,1 for dependent fox/0.2.0 is unacceptable, skipping
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent fox/0.2.0
+ trace: postponed_configurations::add: add {fox 1,2: libfoo} to {foo | libfoo->{foo/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent fox/0.2.0 is negotiated
+ trace: collect_build_prerequisites: dependency libfoo/1.0.0 of dependent fox/0.2.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end fox/0.2.0
+ trace: collect_build_postponed (1): end {foo | libfoo->{foo/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ build plan:
+ new libfoo/1.0.0 (required by foo, fox)
+ config.libfoo.extras=true (set by foo)
+ new foo/1.0.0
+ config.foo.libfoo_extras=true (set by foo)
+ new fox/0.2.0
+ trace: execute_plan: simulate: no
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !foo configured 1.0.0
+ libfoo configured 1.0.0
+ !fox configured !0.2.0 available 1.0.0
+ libfoo configured 1.0.0
+ EOO
+
+ $pkg_drop foo fox
+ }
+
+ : negotiation-cycle2
+ :
+ : Similar to the above but peforms one more negotiation half-cycle (see
+ : negotiate_configuration() for details).
+ :
+ {
+ $clone_cfg;
+
+ # Note that this 'one more half-cycle' case doesn't reproduce always
+ # and depends on the negotiate_configuration() calls order for the fox
+ # and foo dependents. This order is not actually deterministic since
+ # depends on the order of these packages in the postponed_packages set
+ # (of the set<build_package*> type). Thus, the 'one more half-cycle'
+ # case only takes place if the order of the packages as they appear on
+ # the command line is preserved.
+ #
+ $* fox/0.2.0 foo/0.2.0 ?libfoo 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ %.*
+ trace: collect_build: add fox/0.2.0
+ trace: collect_build: add foo/0.2.0
+ trace: collect_build_prerequisites: begin fox/0.2.0
+ %.*
+ trace: collect_build_prerequisites: alt-postpone dependent fox/0.2.0 since max index is reached: 0
+ info: dependency alternative: libfoo
+ {
+ prefer
+ {
+ config.libfoo.extras = false
+ }
+
+ accept (!$config.libfoo.extras)
+ }
+ trace: collect_build_prerequisites: postpone fox/0.2.0
+ trace: collect_build_prerequisites: begin foo/0.2.0
+ %.*
+ trace: collect_build_prerequisites: alt-postpone dependent foo/0.2.0 since max index is reached: 0
+ info: dependency alternative: libfoo
+ {
+ require
+ {
+ config.libfoo.extras = true
+ }
+ }
+ trace: collect_build_prerequisites: postpone foo/0.2.0
+ %.*
+ %.+: unable to cfg-negotiate dependency alternative 1,1 for dependent fox/0.2.0, throwing unaccept_alternative%
+ trace: pkg_build: collection failed due to unacceptable alternative, retry from scratch
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ %.*
+ trace: collect_build_prerequisites: dependency alternative 1,1 for dependent fox/0.2.0 is unacceptable, skipping
+ %.*
+ build plan:
+ new libfoo/1.0.0 (required by foo, fox)
+ config.libfoo.extras=true (set by foo)
+ new fox/0.2.0
+ new foo/0.2.0
+ trace: execute_plan: simulate: no
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !fox configured !0.2.0 available 1.0.0
+ libfoo configured 1.0.0
+ !foo configured !0.2.0 available 1.0.0
+ libfoo configured 1.0.0
+ EOO
+
+ $pkg_drop foo fox
+ }
+ }
+
+ : skip-existing-dependent
+ :
+ {
+ $clone_cfg;
+
+ # Dependencies:
+ #
+ # tix: depends: libbar(c)
+ # depends: tex(c)
+ #
+ # tex: depends: libbar(c)
+ # depends: libfoo(c)
+ #
+ $* tix ?libfoo/0.1.0 --verbose 1 2>!;
+
+ $pkg_status -r >>EOO;
+ !tix configured 1.0.0
+ libbar configured 1.0.0
+ tex configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured !0.1.0 available 1.0.0
+ EOO
+
+ $* --upgrade --recursive 2>&1 | $filter 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add tix/1.0.0
+ trace: collect_build_prerequisites: skip configured tix/1.0.0
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: evaluate_dependency: libfoo/0.1.0: update to libfoo/1.0.0
+ %.*
+ trace: pkg_build: refine package collection/plan execution
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: pre-reeval tex/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tex/1.0.0: 1,1
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of existing dependent tex/1.0.0 due to dependency libfoo/1.0.0
+ trace: collect_build: add tex/1.0.0
+ trace: postponed_configurations::add: create {tex^ | libbar->{tex/1,1}}
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {tex^ | libbar->{tex/1,1}}
+ trace: collect_build_prerequisites: pre-reeval tex/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tex/1.0.0: 1,1
+ trace: collect_build_prerequisites: pre-reeval tix/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tix/1.0.0: 1,1
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {tex^ | libbar->{tex/1,1}}
+ trace: collect_build_prerequisites: reeval tex/1.0.0
+ trace: postponed_configurations::add: add {tex^ 1,1: libbar} to {tex^ | libbar->{tex/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent tex/1.0.0 results in {tex^ | libbar->{tex/1,1}}
+ trace: collect_build_prerequisites: re-evaluated tex/1.0.0
+ trace: collect_build_prerequisites: reeval tix/1.0.0
+ trace: postponed_configurations::add: add {tix^ 1,1: libbar} to {tex^ | libbar->{tex/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent tix/1.0.0 results in {tex^ tix^ | libbar->{tex/1,1 tix/1,1}}
+ trace: collect_build_prerequisites: re-evaluated tix/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {tex^ tix^ | libbar->{tex/1,1 tix/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbar/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tex/1.0.0
+ trace: collect_build_prerequisites: resume tex/1.0.0
+ trace: collect_build: pick libfoo/1.0.0 over libfoo/0.1.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent tex/1.0.0
+ trace: postponed_configurations::add: create {tex | libfoo->{tex/2,1}}
+ trace: collect_build_prerequisites: postpone tex/1.0.0
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tix/1.0.0
+ trace: collect_build_prerequisites: resume tix/1.0.0
+ trace: collect_build_prerequisites: cannot cfg-postpone dependency tex/1.0.0 of dependent tix/1.0.0 (collected prematurely), throwing postpone_dependency
+ trace: pkg_build: collection failed due to prematurely collected dependency (tex), retry from scratch
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add tix/1.0.0
+ trace: collect_build_prerequisites: skip configured tix/1.0.0
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: pre-reeval tex/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tex/1.0.0: 1,1
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of existing dependent tex/1.0.0 due to dependency libfoo/1.0.0
+ trace: collect_build: add tex/1.0.0
+ trace: postponed_configurations::add: create {tex^ | libbar->{tex/1,1}}
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {tex^ | libbar->{tex/1,1}}
+ trace: collect_build_prerequisites: pre-reeval tex/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tex/1.0.0: 1,1
+ trace: collect_build_prerequisites: pre-reeval tix/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tix/1.0.0: 1,1
+ trace: collect_build_postponed (1): skip dep-postponed existing dependent tex of dependency libbar
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {tex^ | libbar->{tex/1,1}}
+ trace: collect_build_prerequisites: reeval tix/1.0.0
+ trace: postponed_configurations::add: add {tix^ 1,1: libbar} to {tex^ | libbar->{tex/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent tix/1.0.0 results in {tex^ tix^ | libbar->{tex/1,1 tix/1,1}}
+ trace: collect_build_prerequisites: re-evaluated tix/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {tex^ tix^ | libbar->{tex/1,1 tix/1,1}}
+ trace: collect_build_postponed (1): skip dep-postponed existing dependent tex
+ trace: collect_build_postponed (1): skip dep-postponed existing dependent tex
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbar/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): skip dep-postponed existing dependent tex
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tix/1.0.0
+ trace: collect_build_prerequisites: resume tix/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency tex/1.0.0 of dependent tix/1.0.0
+ trace: postponed_configurations::add: create {tix | tex->{tix/2,1}}
+ trace: collect_build_prerequisites: postpone tix/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {tex^ tix^ | libbar->{tex/1,1 tix/1,1}}!
+ trace: collect_build_postponed (2): begin {tix | tex->{tix/2,1}}
+ trace: collect_build_postponed (2): skip being built existing dependent tix of dependency tex
+ trace: collect_build_postponed (2): cfg-negotiate begin {tix | tex->{tix/2,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin tex/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tex/1.0.0
+ trace: postponed_configurations::add: add {tex 1,1: libbar} to {tex^ tix^ | libbar->{tex/1,1 tix/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent tex/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent tex/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build: pick libfoo/1.0.0 over libfoo/0.1.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent tex/1.0.0
+ trace: postponed_configurations::add: create {tex | libfoo->{tex/2,1}}
+ trace: collect_build_prerequisites: postpone tex/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent tix/1.0.0
+ trace: collect_build_prerequisites: resume tix/1.0.0
+ trace: collect_build_prerequisites: end tix/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {tix | tex->{tix/2,1}}!
+ trace: collect_build_postponed (3): begin {tex | libfoo->{tex/2,1}}
+ trace: collect_build_postponed (3): skip being built existing dependent tex of dependency libfoo
+ trace: collect_build_postponed (3): cfg-negotiate begin {tex | libfoo->{tex/2,1}}
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libfoo/1.0.0
+ trace: collect_build_prerequisites: end libfoo/1.0.0
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent tex/1.0.0
+ trace: collect_build_prerequisites: resume tex/1.0.0
+ trace: collect_build_prerequisites: end tex/1.0.0
+ trace: collect_build_postponed (3): cfg-negotiate end {tex | libfoo->{tex/2,1}}!
+ trace: collect_build_postponed (3): end {tex | libfoo->{tex/2,1}}
+ trace: collect_build_postponed (2): end {tix | tex->{tix/2,1}}
+ trace: collect_build_postponed (1): end {tex^ | libbar->{tex/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ build plan:
+ upgrade libfoo/1.0.0
+ config.libfoo.extras=true (set by tex)
+ reconfigure/update tex/1.0.0 (required by tix)
+ config.tex.extras=true (set by tix)
+ config.tex.libfoo_extras=true (set by tex)
+ reconfigure/update tix/1.0.0
+ trace: execute_plan: simulate: no
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !tix configured 1.0.0
+ libbar configured 1.0.0
+ tex configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured !1.0.0
+ EOO
+
+ $pkg_drop tix
+ }
+
+ : skip-existing-dependent-last
+ :
+ : Similar to the above but the skipped existing dependent is the last/only
+ : dependent in the cluster.
+ :
+ {
+ $clone_cfg;
+
+ # Dependencies:
+ #
+ # tiz: depends: tex(c)
+ # depends: libbar(c)
+ #
+ # tex: depends: libbar(c)
+ # depends: libfoo(c)
+ #
+ $* tiz ?libfoo/0.1.0 --verbose 1 2>!;
+
+ $pkg_status -r >>EOO;
+ !tiz configured 1.0.0
+ libbar configured 1.0.0
+ tex configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured !0.1.0 available 1.0.0
+ EOO
+
+ $* --upgrade --recursive 2>&1 | $filter 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add tiz/1.0.0
+ trace: collect_build_prerequisites: skip configured tiz/1.0.0
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: evaluate_dependency: libfoo/0.1.0: update to libfoo/1.0.0
+ %.*
+ trace: pkg_build: refine package collection/plan execution
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: pre-reeval tex/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tex/1.0.0: 1,1
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of existing dependent tex/1.0.0 due to dependency libfoo/1.0.0
+ trace: collect_build: add tex/1.0.0
+ trace: postponed_configurations::add: create {tex^ | libbar->{tex/1,1}}
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {tex^ | libbar->{tex/1,1}}
+ trace: collect_build_prerequisites: pre-reeval tex/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tex/1.0.0: 1,1
+ trace: collect_build_prerequisites: pre-reeval tiz/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tiz/1.0.0: 1,1
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {tex^ | libbar->{tex/1,1}}
+ trace: collect_build_prerequisites: reeval tex/1.0.0
+ trace: postponed_configurations::add: add {tex^ 1,1: libbar} to {tex^ | libbar->{tex/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent tex/1.0.0 results in {tex^ | libbar->{tex/1,1}}
+ trace: collect_build_prerequisites: re-evaluated tex/1.0.0
+ trace: collect_build_prerequisites: reeval tiz/1.0.0
+ trace: collect_build_prerequisites: cannot re-evaluate existing dependent tiz/1.0.0 due to dependency tex/1.0.0 (collected prematurely), throwing postpone_dependency
+ trace: pkg_build: collection failed due to prematurely collected dependency (tex), retry from scratch
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add tiz/1.0.0
+ trace: collect_build_prerequisites: skip configured tiz/1.0.0
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: pre-reeval tex/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tex/1.0.0: 1,1
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of existing dependent tex/1.0.0 due to dependency libfoo/1.0.0
+ trace: collect_build: add tex/1.0.0
+ trace: postponed_configurations::add: create {tex^ | libbar->{tex/1,1}}
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {tex^ | libbar->{tex/1,1}}
+ trace: collect_build_prerequisites: pre-reeval tex/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tex/1.0.0: 1,1
+ trace: collect_build_prerequisites: pre-reeval tiz/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tiz/1.0.0: 1,1
+ trace: collect_build_postponed (1): skip dep-postponed existing dependent tex of dependency libbar
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {tex^ | libbar->{tex/1,1}}
+ trace: collect_build_prerequisites: reeval tiz/1.0.0
+ trace: postponed_configurations::add: create {tiz^ | tex->{tiz/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent tiz/1.0.0 results in {tiz^ | tex->{tiz/1,1}}
+ trace: collect_build_prerequisites: re-evaluated tiz/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {tex^ | libbar->{tex/1,1}}
+ trace: collect_build_postponed (1): skip dep-postponed existing dependent tex
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbar/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): skip dep-postponed existing dependent tex
+ trace: collect_build_postponed (1): cfg-negotiate end {tex^ | libbar->{tex/1,1}}!
+ trace: collect_build_postponed (2): begin {tiz^ | tex->{tiz/1,1}}
+ trace: collect_build_postponed (2): skip being built existing dependent tiz of dependency tex
+ trace: collect_build_postponed (2): cfg-negotiate begin {tiz^ | tex->{tiz/1,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin tex/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tex/1.0.0
+ trace: postponed_configurations::add: add {tex 1,1: libbar} to {tex^ | libbar->{tex/1,1}}!
+ trace: collect_build_prerequisites: cfg-postponing dependent tex/1.0.0 involves (being) negotiated configurations and results in {tex^ | libbar->{tex/1,1}}!, throwing retry_configuration
+ trace: collect_build_postponed (0): cfg-negotiation of {tex^ | libbar->{tex/1,1}} failed due to dependent tex, refining configuration
+ trace: collect_build_postponed (1): begin {tex^ | libbar->{tex/1,1}}
+ trace: collect_build_prerequisites: pre-reeval tex/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tex/1.0.0: 1,1
+ trace: collect_build_prerequisites: pre-reeval tiz/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tiz/1.0.0: 1,1
+ trace: collect_build_postponed (1): skip dep-postponed existing dependent tex of dependency libbar
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {tex^ | libbar->{tex/1,1}}
+ trace: collect_build_prerequisites: reeval tiz/1.0.0
+ trace: postponed_configurations::add: create {tiz^ | tex->{tiz/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent tiz/1.0.0 results in {tiz^ | tex->{tiz/1,1}}
+ trace: collect_build_prerequisites: re-evaluated tiz/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {tex^ | libbar->{tex/1,1}}
+ trace: collect_build_postponed (1): skip dep-postponed existing dependent tex
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbar/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): skip dep-postponed existing dependent tex
+ trace: collect_build_postponed (1): cfg-negotiate end {tex^ | libbar->{tex/1,1}}!
+ trace: collect_build_postponed (2): begin {tiz^ | tex->{tiz/1,1}}
+ trace: collect_build_postponed (2): skip being built existing dependent tiz of dependency tex
+ trace: collect_build_postponed (2): cfg-negotiate begin {tiz^ | tex->{tiz/1,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin tex/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tex/1.0.0
+ trace: postponed_configurations::add: add {tex 1,1: libbar} to {tex^ | libbar->{tex/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent tex/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent tex/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build: pick libfoo/1.0.0 over libfoo/0.1.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent tex/1.0.0
+ trace: postponed_configurations::add: create {tex | libfoo->{tex/2,1}}
+ trace: collect_build_prerequisites: postpone tex/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent tiz/1.0.0
+ trace: collect_build_prerequisites: resume tiz/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tiz/1.0.0
+ trace: postponed_configurations::add: add {tiz 2,1: libbar} to {tex^ | libbar->{tex/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent tiz/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent tiz/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end tiz/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {tiz^ | tex->{tiz/1,1}}!
+ trace: collect_build_postponed (3): begin {tex | libfoo->{tex/2,1}}
+ trace: collect_build_postponed (3): skip being built existing dependent tex of dependency libfoo
+ trace: collect_build_postponed (3): cfg-negotiate begin {tex | libfoo->{tex/2,1}}
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libfoo/1.0.0
+ trace: collect_build_prerequisites: end libfoo/1.0.0
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent tex/1.0.0
+ trace: collect_build_prerequisites: resume tex/1.0.0
+ trace: collect_build_prerequisites: end tex/1.0.0
+ trace: collect_build_postponed (3): cfg-negotiate end {tex | libfoo->{tex/2,1}}!
+ trace: collect_build_postponed (3): end {tex | libfoo->{tex/2,1}}
+ trace: collect_build_postponed (2): end {tiz^ | tex->{tiz/1,1}}
+ trace: collect_build_postponed (1): end {tex^ | libbar->{tex/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ build plan:
+ upgrade libfoo/1.0.0
+ config.libfoo.extras=true (set by tex)
+ reconfigure/update tex/1.0.0 (required by tiz)
+ config.tex.extras=true (set by tiz)
+ config.tex.libfoo_extras=true (set by tex)
+ reconfigure/update tiz/1.0.0
+ config.tiz.tex_extras=true (set by tiz)
+ trace: execute_plan: simulate: no
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !tiz configured 1.0.0
+ libbar configured 1.0.0
+ tex configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured !1.0.0
+ EOO
+
+ $pkg_drop tiz
+ }
+
+ : skip-existing-dependent-cluster
+ :
+ : Similar to skip-existing-dependent but skip the existing dependent
+ : because it is already in a cluster as a dependency.
+ :
+ {
+ $clone_cfg;
+
+ # Dependencies:
+ #
+ # bac: depends: libbar(c)
+ # depends: libbaz(c)
+ # depends: libfoo(c)
+ #
+ # bas: depends: libbar(c)
+ # depends: bus(c)
+ #
+ # bus: depends: libbaz(c)
+ # depends: foo(c)
+ #
+ # foo: depends: libfoo(c)
+ #
+ # box: depends: {libbar libfoo} (c) | libbox
+ #
+ $* bac bas 2>!;
+
+ $pkg_status -r >>EOO;
+ !bac configured 1.0.0
+ libbar configured 1.0.0
+ libbaz configured 1.0.0
+ libfoo configured 1.0.0
+ !bas configured 1.0.0
+ bus configured 1.0.0
+ foo configured 1.0.0
+ libfoo configured 1.0.0
+ libbaz configured 1.0.0
+ libbar configured 1.0.0
+ EOO
+
+ $* box 2>&1 | $filter 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add box/1.0.0
+ trace: collect_build_prerequisites: begin box/1.0.0
+ trace: collect_build_prerequisites: alt-postpone dependent box/1.0.0 since max index is reached: 0
+ info: dependency alternative: {libbar libfoo}
+ {
+ require
+ {
+ config.libbar.extras = true
+ config.libfoo.extras = true
+ }
+ }
+ trace: collect_build_prerequisites: postpone box/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (0): index 1 collect alt-postponed box/1.0.0
+ trace: collect_build_prerequisites: resume box/1.0.0
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent box/1.0.0
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent box/1.0.0
+ trace: postponed_configurations::add: create {box | libbar->{box/1,1} libfoo->{box/1,1}}
+ trace: collect_build_prerequisites: postpone box/1.0.0
+ trace: collect_build_postponed (1): begin {box | libbar->{box/1,1} libfoo->{box/1,1}}
+ trace: collect_build_prerequisites: pre-reeval bac/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bac/1.0.0: 1,1
+ trace: collect_build_prerequisites: pre-reeval bas/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bas/1.0.0: 1,1
+ trace: collect_build_prerequisites: pre-reeval foo/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated foo/1.0.0: 1,1
+ trace: collect_build_prerequisites: pre-reeval bac/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bac/1.0.0: 1,1
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {box | libbar->{box/1,1} libfoo->{box/1,1}}
+ trace: collect_build: add bac/1.0.0
+ trace: collect_build_prerequisites: reeval bac/1.0.0
+ trace: postponed_configurations::add: add {bac^ 1,1: libbar} to {box | libbar->{box/1,1} libfoo->{box/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent bac/1.0.0 results in {bac^ box | libbar->{bac/1,1 box/1,1} libfoo->{box/1,1}}
+ trace: collect_build_prerequisites: re-evaluated bac/1.0.0
+ trace: collect_build: add bas/1.0.0
+ trace: collect_build_prerequisites: reeval bas/1.0.0
+ trace: postponed_configurations::add: add {bas^ 1,1: libbar} to {bac^ box | libbar->{bac/1,1 box/1,1} libfoo->{box/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent bas/1.0.0 results in {bac^ bas^ box | libbar->{bac/1,1 bas/1,1 box/1,1} libfoo->{box/1,1}}
+ trace: collect_build_prerequisites: re-evaluated bas/1.0.0
+ trace: collect_build: add foo/1.0.0
+ trace: collect_build_prerequisites: reeval foo/1.0.0
+ trace: postponed_configurations::add: add {foo^ 1,1: libfoo} to {bac^ bas^ box | libbar->{bac/1,1 bas/1,1 box/1,1} libfoo->{box/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent foo/1.0.0 results in {bac^ bas^ box foo^ | libbar->{bac/1,1 bas/1,1 box/1,1} libfoo->{box/1,1 foo/1,1}}
+ trace: collect_build_prerequisites: re-evaluated foo/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {bac^ bas^ box foo^ | libbar->{bac/1,1 bas/1,1 box/1,1} libfoo->{box/1,1 foo/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbar/1.0.0
+ trace: collect_build_prerequisites: skip configured libfoo/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent bac/1.0.0
+ trace: collect_build_prerequisites: resume bac/1.0.0
+ trace: collect_build: add libbaz/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbaz/1.0.0 of dependent bac/1.0.0
+ trace: postponed_configurations::add: create {bac | libbaz->{bac/2,1}}
+ trace: collect_build_prerequisites: postpone bac/1.0.0
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent bas/1.0.0
+ trace: collect_build_prerequisites: resume bas/1.0.0
+ trace: collect_build: add bus/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency bus/1.0.0 of dependent bas/1.0.0
+ trace: postponed_configurations::add: create {bas | bus->{bas/2,1}}
+ trace: collect_build_prerequisites: postpone bas/1.0.0
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent box/1.0.0
+ trace: collect_build_prerequisites: resume box/1.0.0
+ trace: collect_build_prerequisites: end box/1.0.0
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent foo/1.0.0
+ trace: collect_build_prerequisites: resume foo/1.0.0
+ trace: collect_build_prerequisites: end foo/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {bac^ bas^ box foo^ | libbar->{bac/1,1 bas/1,1 box/1,1} libfoo->{box/1,1 foo/1,1}}!
+ trace: collect_build_postponed (2): begin {bac | libbaz->{bac/2,1}}
+ trace: collect_build_prerequisites: pre-reeval bus/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bus/1.0.0: 1,1
+ trace: collect_build_postponed (2): skip being built existing dependent bac of dependency libbaz
+ trace: collect_build_postponed (2): skip existing dependent bus of dependency libbaz since dependent already in cluster {bas | bus->{bas/2,1}} (as a dependency)
+ trace: collect_build_postponed (2): cfg-negotiate begin {bac | libbaz->{bac/2,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbaz/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent bac/1.0.0
+ trace: collect_build_prerequisites: resume bac/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent bac/1.0.0
+ trace: postponed_configurations::add: add {bac 3,1: libfoo} to {bac^ bas^ box foo^ | libbar->{bac/1,1 bas/1,1 box/1,1} libfoo->{box/1,1 foo/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent bac/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libfoo/1.0.0 of dependent bac/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end bac/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {bac | libbaz->{bac/2,1}}!
+ trace: collect_build_postponed (3): begin {bas | bus->{bas/2,1}}
+ trace: collect_build_postponed (3): skip being built existing dependent bas of dependency bus
+ trace: collect_build_postponed (3): cfg-negotiate begin {bas | bus->{bas/2,1}}
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin bus/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbaz/1.0.0 of dependent bus/1.0.0
+ trace: postponed_configurations::add: add {bus 1,1: libbaz} to {bac | libbaz->{bac/2,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent bus/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbaz/1.0.0 of dependent bus/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: cannot cfg-postpone dependency foo/1.0.0 of dependent bus/1.0.0 (collected prematurely), throwing postpone_dependency
+ trace: pkg_build: collection failed due to prematurely collected dependency (foo), retry from scratch
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add box/1.0.0
+ trace: collect_build_prerequisites: begin box/1.0.0
+ trace: collect_build_prerequisites: alt-postpone dependent box/1.0.0 since max index is reached: 0
+ info: dependency alternative: {libbar libfoo}
+ {
+ require
+ {
+ config.libbar.extras = true
+ config.libfoo.extras = true
+ }
+ }
+ trace: collect_build_prerequisites: postpone box/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (0): index 1 collect alt-postponed box/1.0.0
+ trace: collect_build_prerequisites: resume box/1.0.0
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent box/1.0.0
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent box/1.0.0
+ trace: postponed_configurations::add: create {box | libbar->{box/1,1} libfoo->{box/1,1}}
+ trace: collect_build_prerequisites: postpone box/1.0.0
+ trace: collect_build_postponed (1): begin {box | libbar->{box/1,1} libfoo->{box/1,1}}
+ trace: collect_build_prerequisites: pre-reeval bac/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bac/1.0.0: 1,1
+ trace: collect_build_prerequisites: pre-reeval bas/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bas/1.0.0: 1,1
+ trace: collect_build_prerequisites: pre-reeval foo/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated foo/1.0.0: 1,1
+ trace: collect_build_prerequisites: pre-reeval bac/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bac/1.0.0: 1,1
+ trace: collect_build_postponed (1): skip dep-postponed existing dependent foo of dependency libfoo
+ trace: collect_build: add foo/1.0.0
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {box | libbar->{box/1,1} libfoo->{box/1,1}}
+ trace: collect_build: add bac/1.0.0
+ trace: collect_build_prerequisites: reeval bac/1.0.0
+ trace: postponed_configurations::add: add {bac^ 1,1: libbar} to {box | libbar->{box/1,1} libfoo->{box/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent bac/1.0.0 results in {bac^ box | libbar->{bac/1,1 box/1,1} libfoo->{box/1,1}}
+ trace: collect_build_prerequisites: re-evaluated bac/1.0.0
+ trace: collect_build: add bas/1.0.0
+ trace: collect_build_prerequisites: reeval bas/1.0.0
+ trace: postponed_configurations::add: add {bas^ 1,1: libbar} to {bac^ box | libbar->{bac/1,1 box/1,1} libfoo->{box/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent bas/1.0.0 results in {bac^ bas^ box | libbar->{bac/1,1 bas/1,1 box/1,1} libfoo->{box/1,1}}
+ trace: collect_build_prerequisites: re-evaluated bas/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {bac^ bas^ box | libbar->{bac/1,1 bas/1,1 box/1,1} libfoo->{box/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbar/1.0.0
+ trace: collect_build_prerequisites: skip configured libfoo/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent bac/1.0.0
+ trace: collect_build_prerequisites: resume bac/1.0.0
+ trace: collect_build: add libbaz/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbaz/1.0.0 of dependent bac/1.0.0
+ trace: postponed_configurations::add: create {bac | libbaz->{bac/2,1}}
+ trace: collect_build_prerequisites: postpone bac/1.0.0
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent bas/1.0.0
+ trace: collect_build_prerequisites: resume bas/1.0.0
+ trace: collect_build: add bus/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency bus/1.0.0 of dependent bas/1.0.0
+ trace: postponed_configurations::add: create {bas | bus->{bas/2,1}}
+ trace: collect_build_prerequisites: postpone bas/1.0.0
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent box/1.0.0
+ trace: collect_build_prerequisites: resume box/1.0.0
+ trace: collect_build_prerequisites: end box/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {bac^ bas^ box | libbar->{bac/1,1 bas/1,1 box/1,1} libfoo->{box/1,1}}!
+ trace: collect_build_postponed (2): begin {bac | libbaz->{bac/2,1}}
+ trace: collect_build_prerequisites: pre-reeval bus/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bus/1.0.0: 1,1
+ trace: collect_build_postponed (2): skip being built existing dependent bac of dependency libbaz
+ trace: collect_build_postponed (2): skip existing dependent bus of dependency libbaz since dependent already in cluster {bas | bus->{bas/2,1}} (as a dependency)
+ trace: collect_build_postponed (2): cfg-negotiate begin {bac | libbaz->{bac/2,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbaz/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent bac/1.0.0
+ trace: collect_build_prerequisites: resume bac/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent bac/1.0.0
+ trace: postponed_configurations::add: add {bac 3,1: libfoo} to {bac^ bas^ box | libbar->{bac/1,1 bas/1,1 box/1,1} libfoo->{box/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent bac/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libfoo/1.0.0 of dependent bac/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end bac/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {bac | libbaz->{bac/2,1}}!
+ trace: collect_build_postponed (3): begin {bas | bus->{bas/2,1}}
+ trace: collect_build_postponed (3): skip being built existing dependent bas of dependency bus
+ trace: collect_build_postponed (3): cfg-negotiate begin {bas | bus->{bas/2,1}}
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin bus/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbaz/1.0.0 of dependent bus/1.0.0
+ trace: postponed_configurations::add: add {bus 1,1: libbaz} to {bac | libbaz->{bac/2,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent bus/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbaz/1.0.0 of dependent bus/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: cfg-postpone dependency foo/1.0.0 of dependent bus/1.0.0
+ trace: postponed_configurations::add: create {bus | foo->{bus/2,1}}
+ trace: collect_build_prerequisites: postpone bus/1.0.0
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent bas/1.0.0
+ trace: collect_build_prerequisites: resume bas/1.0.0
+ trace: collect_build_prerequisites: end bas/1.0.0
+ trace: collect_build_postponed (3): cfg-negotiate end {bas | bus->{bas/2,1}}!
+ trace: collect_build_postponed (4): begin {bus | foo->{bus/2,1}}
+ trace: collect_build_postponed (4): skip being built existing dependent bus of dependency foo
+ trace: collect_build_postponed (4): cfg-negotiate begin {bus | foo->{bus/2,1}}
+ trace: collect_build_postponed (4): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin foo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent foo/1.0.0
+ trace: postponed_configurations::add: add {foo 1,1: libfoo} to {bac^ bas^ box | libbar->{bac/1,1 bas/1,1 box/1,1} libfoo->{bac/3,1 box/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent foo/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libfoo/1.0.0 of dependent foo/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end foo/1.0.0
+ trace: collect_build_postponed (4): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (4): select cfg-negotiated dependency alternative for dependent bus/1.0.0
+ trace: collect_build_prerequisites: resume bus/1.0.0
+ trace: collect_build_prerequisites: end bus/1.0.0
+ trace: collect_build_postponed (4): cfg-negotiate end {bus | foo->{bus/2,1}}!
+ trace: collect_build_postponed (4): end {bus | foo->{bus/2,1}}
+ trace: collect_build_postponed (3): end {bas | bus->{bas/2,1}}
+ trace: collect_build_postponed (2): end {bac | libbaz->{bac/2,1}}
+ trace: collect_build_postponed (1): end {box | libbar->{box/1,1} libfoo->{box/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ build plan:
+ new box/1.0.0
+ trace: execute_plan: simulate: no
+ %.*
+ EOE
+
+ $pkg_drop bac bas box
+ }
+
+ : replace-configured-dependency
+ :
+ {
+ $clone_cfg;
+
+ # Dependencies:
+ #
+ # diz: depends: dox(c)
+ # depends: libbox(c)
+ # depends: libbar(c)
+ #
+ # dox: depends: dex(c)
+ #
+ # dex: depends: bar(c)
+ # depends: libfoo(c)
+ #
+ # bar: depends: libbar(c)
+ #
+ $* diz libbar/0.1.0 ?libbox/0.1.0 --verbose 1 2>!;
+
+ $pkg_status -r >>EOO;
+ !libbar configured !0.1.0 available 1.0.0
+ !diz configured 1.0.0
+ dox configured 1.0.0
+ dex configured 1.0.0
+ bar configured 1.0.0
+ !libbar configured !0.1.0 available 1.0.0
+ libfoo configured 1.0.0
+ !libbar configured !0.1.0 available 1.0.0
+ libbox configured !0.1.0 available 1.0.0
+ EOO
+
+ cat cfg/libbox-0.1.0/build/config.build >>~%EOO%;
+ %.*
+ config.libbox.extras = true
+ EOO
+
+ $* --upgrade --recursive 2>&1 | $filter 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build: add diz/1.0.0
+ trace: collect_build_prerequisites: pre-reeval bar/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bar/1.0.0: 1,1
+ trace: collect_build_prerequisites: pre-reeval diz/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated diz/1.0.0: 1,1
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of existing dependent bar/1.0.0 due to dependency libbar/1.0.0
+ trace: collect_build: add bar/1.0.0
+ trace: postponed_configurations::add: create {bar^ | libbar->{bar/1,1}}
+ trace: collect_build: add dox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency dox/1.0.0 of existing dependent diz/1.0.0 due to dependency libbar/1.0.0
+ trace: postponed_configurations::add: create {diz^ | dox->{diz/1,1}}
+ trace: collect_build_prerequisites: skip configured diz/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {bar^ | libbar->{bar/1,1}}
+ trace: collect_build_prerequisites: pre-reeval bar/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bar/1.0.0: 1,1
+ trace: collect_build_prerequisites: pre-reeval diz/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated diz/1.0.0: 1,1
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {bar^ | libbar->{bar/1,1}}
+ trace: collect_build_prerequisites: reeval bar/1.0.0
+ trace: collect_build: pick libbar/1.0.0 over libbar/0.1.0
+ trace: postponed_configurations::add: add {bar^ 1,1: libbar} to {bar^ | libbar->{bar/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent bar/1.0.0 results in {bar^ | libbar->{bar/1,1}}
+ trace: collect_build_prerequisites: re-evaluated bar/1.0.0
+ trace: collect_build_prerequisites: reeval diz/1.0.0
+ trace: postponed_configurations::add: add {diz^ 1,1: dox} to {diz^ | dox->{diz/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent diz/1.0.0 results in {diz^ | dox->{diz/1,1}}
+ trace: collect_build_prerequisites: re-evaluated diz/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {bar^ | libbar->{bar/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbar/1.0.0
+ trace: collect_build_prerequisites: end libbar/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent bar/1.0.0
+ trace: collect_build_prerequisites: resume bar/1.0.0
+ trace: collect_build_prerequisites: end bar/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {bar^ | libbar->{bar/1,1}}!
+ trace: collect_build_postponed (2): begin {diz^ | dox->{diz/1,1}}
+ trace: collect_build_postponed (2): skip being built existing dependent diz of dependency dox
+ trace: collect_build_postponed (2): cfg-negotiate begin {diz^ | dox->{diz/1,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin dox/1.0.0
+ trace: collect_build: add dex/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency dex/1.0.0 of dependent dox/1.0.0
+ trace: postponed_configurations::add: create {dox | dex->{dox/1,1}}
+ trace: collect_build_prerequisites: postpone dox/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent diz/1.0.0
+ trace: collect_build_prerequisites: resume diz/1.0.0
+ trace: collect_build: add libbox/0.1.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/0.1.0 of dependent diz/1.0.0
+ trace: postponed_configurations::add: create {diz | libbox->{diz/2,1}}
+ trace: collect_build_prerequisites: postpone diz/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {diz^ | dox->{diz/1,1}}!
+ trace: collect_build_postponed (3): begin {dox | dex->{dox/1,1}}
+ trace: collect_build_postponed (3): skip being built existing dependent dox of dependency dex
+ trace: collect_build_postponed (3): cfg-negotiate begin {dox | dex->{dox/1,1}}
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin dex/1.0.0
+ trace: collect_build_prerequisites: cannot cfg-postpone dependency bar/1.0.0 of dependent dex/1.0.0 (collected prematurely), throwing postpone_dependency
+ trace: pkg_build: collection failed due to prematurely collected dependency (bar), retry from scratch
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build: add diz/1.0.0
+ trace: collect_build_prerequisites: pre-reeval bar/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bar/1.0.0: 1,1
+ trace: collect_build_prerequisites: pre-reeval diz/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated diz/1.0.0: 1,1
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of existing dependent bar/1.0.0 due to dependency libbar/1.0.0
+ trace: collect_build: add bar/1.0.0
+ trace: postponed_configurations::add: create {bar^ | libbar->{bar/1,1}}
+ trace: collect_build: add dox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency dox/1.0.0 of existing dependent diz/1.0.0 due to dependency libbar/1.0.0
+ trace: postponed_configurations::add: create {diz^ | dox->{diz/1,1}}
+ trace: collect_build_prerequisites: skip configured diz/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {bar^ | libbar->{bar/1,1}}
+ trace: collect_build_prerequisites: pre-reeval bar/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bar/1.0.0: 1,1
+ trace: collect_build_prerequisites: pre-reeval diz/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated diz/1.0.0: 1,1
+ trace: collect_build_postponed (1): skip dep-postponed existing dependent bar of dependency libbar
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {bar^ | libbar->{bar/1,1}}
+ trace: collect_build_prerequisites: reeval diz/1.0.0
+ trace: postponed_configurations::add: add {diz^ 1,1: dox} to {diz^ | dox->{diz/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent diz/1.0.0 results in {diz^ | dox->{diz/1,1}}
+ trace: collect_build_prerequisites: re-evaluated diz/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {bar^ | libbar->{bar/1,1}}
+ trace: collect_build_postponed (1): skip dep-postponed existing dependent bar
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbar/1.0.0
+ trace: collect_build_prerequisites: end libbar/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): skip dep-postponed existing dependent bar
+ trace: collect_build_postponed (1): cfg-negotiate end {bar^ | libbar->{bar/1,1}}!
+ trace: collect_build_postponed (2): begin {diz^ | dox->{diz/1,1}}
+ trace: collect_build_postponed (2): skip being built existing dependent diz of dependency dox
+ trace: collect_build_postponed (2): cfg-negotiate begin {diz^ | dox->{diz/1,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin dox/1.0.0
+ trace: collect_build: add dex/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency dex/1.0.0 of dependent dox/1.0.0
+ trace: postponed_configurations::add: create {dox | dex->{dox/1,1}}
+ trace: collect_build_prerequisites: postpone dox/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent diz/1.0.0
+ trace: collect_build_prerequisites: resume diz/1.0.0
+ trace: collect_build: add libbox/0.1.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/0.1.0 of dependent diz/1.0.0
+ trace: postponed_configurations::add: create {diz | libbox->{diz/2,1}}
+ trace: collect_build_prerequisites: postpone diz/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {diz^ | dox->{diz/1,1}}!
+ trace: collect_build_postponed (3): begin {dox | dex->{dox/1,1}}
+ trace: collect_build_postponed (3): skip being built existing dependent dox of dependency dex
+ trace: collect_build_postponed (3): cfg-negotiate begin {dox | dex->{dox/1,1}}
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin dex/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency bar/1.0.0 of dependent dex/1.0.0
+ trace: postponed_configurations::add: create {dex | bar->{dex/1,1}}
+ trace: collect_build_prerequisites: postpone dex/1.0.0
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent dox/1.0.0
+ trace: collect_build_prerequisites: resume dox/1.0.0
+ trace: collect_build_prerequisites: end dox/1.0.0
+ trace: collect_build_postponed (3): cfg-negotiate end {dox | dex->{dox/1,1}}!
+ trace: collect_build_postponed (4): begin {diz | libbox->{diz/2,1}}
+ trace: collect_build_postponed (4): skip being built existing dependent diz of dependency libbox
+ trace: collect_build_postponed (4): cfg-negotiate begin {diz | libbox->{diz/2,1}}
+ trace: collect_build_postponed (4): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbox/0.1.0
+ trace: collect_build_postponed (4): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (4): select cfg-negotiated dependency alternative for dependent diz/1.0.0
+ trace: collect_build_prerequisites: resume diz/1.0.0
+ trace: collect_build: pick libbar/1.0.0 over libbar/0.1.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent diz/1.0.0
+ trace: postponed_configurations::add: add {diz 3,1: libbar} to {bar^ | libbar->{bar/1,1}}!
+ trace: collect_build_prerequisites: cfg-postponing dependent diz/1.0.0 involves (being) negotiated configurations and results in {bar^ diz | libbar->{bar/1,1 diz/3,1}}!, throwing retry_configuration
+ trace: collect_build_postponed (0): cfg-negotiation of {bar^ | libbar->{bar/1,1}} failed due to dependent diz, refining configuration
+ trace: collect_build_postponed (1): begin {bar^ | libbar->{bar/1,1}}
+ trace: collect_build_prerequisites: pre-reeval bar/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bar/1.0.0: 1,1
+ trace: collect_build_prerequisites: pre-reeval diz/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated diz/1.0.0: 1,1
+ trace: collect_build_postponed (1): skip dep-postponed existing dependent bar of dependency libbar
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {bar^ | libbar->{bar/1,1}}
+ trace: collect_build_prerequisites: reeval diz/1.0.0
+ trace: postponed_configurations::add: add {diz^ 1,1: dox} to {diz^ | dox->{diz/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent diz/1.0.0 results in {diz^ | dox->{diz/1,1}}
+ trace: collect_build_prerequisites: re-evaluated diz/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {bar^ | libbar->{bar/1,1}}
+ trace: collect_build_postponed (1): skip dep-postponed existing dependent bar
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbar/1.0.0
+ trace: collect_build_prerequisites: end libbar/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): skip dep-postponed existing dependent bar
+ trace: collect_build_postponed (1): cfg-negotiate end {bar^ | libbar->{bar/1,1}}!
+ trace: collect_build_postponed (2): begin {diz^ | dox->{diz/1,1}}
+ trace: collect_build_postponed (2): skip being built existing dependent diz of dependency dox
+ trace: collect_build_postponed (2): cfg-negotiate begin {diz^ | dox->{diz/1,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin dox/1.0.0
+ trace: collect_build: add dex/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency dex/1.0.0 of dependent dox/1.0.0
+ trace: postponed_configurations::add: create {dox | dex->{dox/1,1}}
+ trace: collect_build_prerequisites: postpone dox/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent diz/1.0.0
+ trace: collect_build_prerequisites: resume diz/1.0.0
+ trace: collect_build: add libbox/0.1.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/0.1.0 of dependent diz/1.0.0
+ trace: postponed_configurations::add: create {diz | libbox->{diz/2,1}}
+ trace: collect_build_prerequisites: postpone diz/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {diz^ | dox->{diz/1,1}}!
+ trace: collect_build_postponed (3): begin {dox | dex->{dox/1,1}}
+ trace: collect_build_postponed (3): skip being built existing dependent dox of dependency dex
+ trace: collect_build_postponed (3): cfg-negotiate begin {dox | dex->{dox/1,1}}
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin dex/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency bar/1.0.0 of dependent dex/1.0.0
+ trace: postponed_configurations::add: create {dex | bar->{dex/1,1}}
+ trace: collect_build_prerequisites: postpone dex/1.0.0
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent dox/1.0.0
+ trace: collect_build_prerequisites: resume dox/1.0.0
+ trace: collect_build_prerequisites: end dox/1.0.0
+ trace: collect_build_postponed (3): cfg-negotiate end {dox | dex->{dox/1,1}}!
+ trace: collect_build_postponed (4): begin {diz | libbox->{diz/2,1}}
+ trace: collect_build_postponed (4): skip being built existing dependent diz of dependency libbox
+ trace: collect_build_postponed (4): cfg-negotiate begin {diz | libbox->{diz/2,1}}
+ trace: collect_build_postponed (4): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbox/0.1.0
+ trace: collect_build_postponed (4): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (4): select cfg-negotiated dependency alternative for dependent diz/1.0.0
+ trace: collect_build_prerequisites: resume diz/1.0.0
+ trace: collect_build: pick libbar/1.0.0 over libbar/0.1.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent diz/1.0.0
+ trace: postponed_configurations::add: add {diz 3,1: libbar} to {bar^ | libbar->{bar/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent diz/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent diz/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end diz/1.0.0
+ trace: collect_build_postponed (4): cfg-negotiate end {diz | libbox->{diz/2,1}}!
+ trace: collect_build_postponed (5): begin {dex | bar->{dex/1,1}}
+ trace: collect_build_postponed (5): skip being built existing dependent dex of dependency bar
+ trace: collect_build_postponed (5): cfg-negotiate begin {dex | bar->{dex/1,1}}
+ trace: collect_build_postponed (5): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin bar/1.0.0
+ trace: collect_build: pick libbar/1.0.0 over libbar/0.1.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bar/1.0.0
+ trace: postponed_configurations::add: add {bar 1,1: libbar} to {bar^ diz | libbar->{bar/1,1 diz/3,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent bar/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent bar/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end bar/1.0.0
+ trace: collect_build_postponed (5): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (5): select cfg-negotiated dependency alternative for dependent dex/1.0.0
+ trace: collect_build_prerequisites: resume dex/1.0.0
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent dex/1.0.0
+ trace: postponed_configurations::add: create {dex | libfoo->{dex/2,1}}
+ trace: collect_build_prerequisites: postpone dex/1.0.0
+ trace: collect_build_postponed (5): cfg-negotiate end {dex | bar->{dex/1,1}}!
+ trace: collect_build_postponed (6): begin {dex | libfoo->{dex/2,1}}
+ trace: collect_build_postponed (6): skip being built existing dependent dex of dependency libfoo
+ trace: collect_build_postponed (6): cfg-negotiate begin {dex | libfoo->{dex/2,1}}
+ trace: collect_build_postponed (6): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libfoo/1.0.0
+ trace: collect_build_postponed (6): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (6): select cfg-negotiated dependency alternative for dependent dex/1.0.0
+ trace: collect_build_prerequisites: resume dex/1.0.0
+ trace: collect_build_prerequisites: end dex/1.0.0
+ trace: collect_build_postponed (6): cfg-negotiate end {dex | libfoo->{dex/2,1}}!
+ trace: collect_build_postponed (6): end {dex | libfoo->{dex/2,1}}
+ trace: collect_build_postponed (5): end {dex | bar->{dex/1,1}}
+ trace: collect_build_postponed (4): end {diz | libbox->{diz/2,1}}
+ trace: collect_build_postponed (3): end {dox | dex->{dox/1,1}}
+ trace: collect_build_postponed (2): end {diz^ | dox->{diz/1,1}}
+ trace: collect_build_postponed (1): end {bar^ | libbar->{bar/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: evaluate_dependency: libbox/0.1.0: update to libbox/1.0.0
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build: add diz/1.0.0
+ trace: collect_build_prerequisites: pre-reeval bar/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bar/1.0.0: 1,1
+ trace: collect_build_prerequisites: pre-reeval diz/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated diz/1.0.0: 1,1
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of existing dependent bar/1.0.0 due to dependency libbar/1.0.0
+ trace: collect_build: add bar/1.0.0
+ trace: postponed_configurations::add: create {bar^ | libbar->{bar/1,1}}
+ trace: collect_build: add dox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency dox/1.0.0 of existing dependent diz/1.0.0 due to dependency libbar/1.0.0
+ trace: postponed_configurations::add: create {diz^ | dox->{diz/1,1}}
+ trace: collect_build_prerequisites: skip configured diz/1.0.0
+ trace: collect_build: add libbox/1.0.0
+ trace: collect_build_prerequisites: pre-reeval diz/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated diz/1.0.0: 1,1
+ trace: collect_build_prerequisites: cfg-postpone dependency dox/1.0.0 of existing dependent diz/1.0.0 due to dependency libbox/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {bar^ | libbar->{bar/1,1}}
+ trace: collect_build_prerequisites: pre-reeval bar/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bar/1.0.0: 1,1
+ trace: collect_build_prerequisites: pre-reeval diz/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated diz/1.0.0: 1,1
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {bar^ | libbar->{bar/1,1}}
+ trace: collect_build_prerequisites: reeval bar/1.0.0
+ trace: collect_build: pick libbar/1.0.0 over libbar/0.1.0
+ trace: postponed_configurations::add: add {bar^ 1,1: libbar} to {bar^ | libbar->{bar/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent bar/1.0.0 results in {bar^ | libbar->{bar/1,1}}
+ trace: collect_build_prerequisites: re-evaluated bar/1.0.0
+ trace: collect_build_prerequisites: reeval diz/1.0.0
+ trace: postponed_configurations::add: add {diz^ 1,1: dox} to {diz^ | dox->{diz/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent diz/1.0.0 results in {diz^ | dox->{diz/1,1}}
+ trace: collect_build_prerequisites: re-evaluated diz/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {bar^ | libbar->{bar/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbar/1.0.0
+ trace: collect_build_prerequisites: end libbar/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent bar/1.0.0
+ trace: collect_build_prerequisites: resume bar/1.0.0
+ trace: collect_build_prerequisites: end bar/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {bar^ | libbar->{bar/1,1}}!
+ trace: collect_build_postponed (2): begin {diz^ | dox->{diz/1,1}}
+ trace: collect_build_postponed (2): skip being built existing dependent diz of dependency dox
+ trace: collect_build_postponed (2): cfg-negotiate begin {diz^ | dox->{diz/1,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin dox/1.0.0
+ trace: collect_build: add dex/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency dex/1.0.0 of dependent dox/1.0.0
+ trace: postponed_configurations::add: create {dox | dex->{dox/1,1}}
+ trace: collect_build_prerequisites: postpone dox/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent diz/1.0.0
+ trace: collect_build_prerequisites: resume diz/1.0.0
+ trace: collect_build: pick libbox/1.0.0 over libbox/0.1.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent diz/1.0.0
+ trace: postponed_configurations::add: create {diz | libbox->{diz/2,1}}
+ trace: collect_build_prerequisites: postpone diz/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {diz^ | dox->{diz/1,1}}!
+ trace: collect_build_postponed (3): begin {dox | dex->{dox/1,1}}
+ trace: collect_build_postponed (3): skip being built existing dependent dox of dependency dex
+ trace: collect_build_postponed (3): cfg-negotiate begin {dox | dex->{dox/1,1}}
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin dex/1.0.0
+ trace: collect_build_prerequisites: cannot cfg-postpone dependency bar/1.0.0 of dependent dex/1.0.0 (collected prematurely), throwing postpone_dependency
+ trace: pkg_build: collection failed due to prematurely collected dependency (bar), retry from scratch
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build: add diz/1.0.0
+ trace: collect_build_prerequisites: pre-reeval bar/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bar/1.0.0: 1,1
+ trace: collect_build_prerequisites: pre-reeval diz/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated diz/1.0.0: 1,1
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of existing dependent bar/1.0.0 due to dependency libbar/1.0.0
+ trace: collect_build: add bar/1.0.0
+ trace: postponed_configurations::add: create {bar^ | libbar->{bar/1,1}}
+ trace: collect_build: add dox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency dox/1.0.0 of existing dependent diz/1.0.0 due to dependency libbar/1.0.0
+ trace: postponed_configurations::add: create {diz^ | dox->{diz/1,1}}
+ trace: collect_build_prerequisites: skip configured diz/1.0.0
+ trace: collect_build: add libbox/1.0.0
+ trace: collect_build_prerequisites: pre-reeval diz/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated diz/1.0.0: 1,1
+ trace: collect_build_prerequisites: cfg-postpone dependency dox/1.0.0 of existing dependent diz/1.0.0 due to dependency libbox/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {bar^ | libbar->{bar/1,1}}
+ trace: collect_build_prerequisites: pre-reeval bar/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bar/1.0.0: 1,1
+ trace: collect_build_prerequisites: pre-reeval diz/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated diz/1.0.0: 1,1
+ trace: collect_build_postponed (1): skip dep-postponed existing dependent bar of dependency libbar
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {bar^ | libbar->{bar/1,1}}
+ trace: collect_build_prerequisites: reeval diz/1.0.0
+ trace: postponed_configurations::add: add {diz^ 1,1: dox} to {diz^ | dox->{diz/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent diz/1.0.0 results in {diz^ | dox->{diz/1,1}}
+ trace: collect_build_prerequisites: re-evaluated diz/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {bar^ | libbar->{bar/1,1}}
+ trace: collect_build_postponed (1): skip dep-postponed existing dependent bar
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbar/1.0.0
+ trace: collect_build_prerequisites: end libbar/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): skip dep-postponed existing dependent bar
+ trace: collect_build_postponed (1): cfg-negotiate end {bar^ | libbar->{bar/1,1}}!
+ trace: collect_build_postponed (2): begin {diz^ | dox->{diz/1,1}}
+ trace: collect_build_postponed (2): skip being built existing dependent diz of dependency dox
+ trace: collect_build_postponed (2): cfg-negotiate begin {diz^ | dox->{diz/1,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin dox/1.0.0
+ trace: collect_build: add dex/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency dex/1.0.0 of dependent dox/1.0.0
+ trace: postponed_configurations::add: create {dox | dex->{dox/1,1}}
+ trace: collect_build_prerequisites: postpone dox/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent diz/1.0.0
+ trace: collect_build_prerequisites: resume diz/1.0.0
+ trace: collect_build: pick libbox/1.0.0 over libbox/0.1.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent diz/1.0.0
+ trace: postponed_configurations::add: create {diz | libbox->{diz/2,1}}
+ trace: collect_build_prerequisites: postpone diz/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {diz^ | dox->{diz/1,1}}!
+ trace: collect_build_postponed (3): begin {dox | dex->{dox/1,1}}
+ trace: collect_build_postponed (3): skip being built existing dependent dox of dependency dex
+ trace: collect_build_postponed (3): cfg-negotiate begin {dox | dex->{dox/1,1}}
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin dex/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency bar/1.0.0 of dependent dex/1.0.0
+ trace: postponed_configurations::add: create {dex | bar->{dex/1,1}}
+ trace: collect_build_prerequisites: postpone dex/1.0.0
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent dox/1.0.0
+ trace: collect_build_prerequisites: resume dox/1.0.0
+ trace: collect_build_prerequisites: end dox/1.0.0
+ trace: collect_build_postponed (3): cfg-negotiate end {dox | dex->{dox/1,1}}!
+ trace: collect_build_postponed (4): begin {diz | libbox->{diz/2,1}}
+ trace: collect_build_postponed (4): skip being built existing dependent diz of dependency libbox
+ trace: collect_build_postponed (4): cfg-negotiate begin {diz | libbox->{diz/2,1}}
+ trace: collect_build_postponed (4): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbox/1.0.0
+ trace: collect_build_prerequisites: end libbox/1.0.0
+ trace: collect_build_postponed (4): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (4): select cfg-negotiated dependency alternative for dependent diz/1.0.0
+ trace: collect_build_prerequisites: resume diz/1.0.0
+ trace: collect_build: pick libbar/1.0.0 over libbar/0.1.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent diz/1.0.0
+ trace: postponed_configurations::add: add {diz 3,1: libbar} to {bar^ | libbar->{bar/1,1}}!
+ trace: collect_build_prerequisites: cfg-postponing dependent diz/1.0.0 involves (being) negotiated configurations and results in {bar^ diz | libbar->{bar/1,1 diz/3,1}}!, throwing retry_configuration
+ trace: collect_build_postponed (0): cfg-negotiation of {bar^ | libbar->{bar/1,1}} failed due to dependent diz, refining configuration
+ trace: collect_build_postponed (1): begin {bar^ | libbar->{bar/1,1}}
+ trace: collect_build_prerequisites: pre-reeval bar/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bar/1.0.0: 1,1
+ trace: collect_build_prerequisites: pre-reeval diz/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated diz/1.0.0: 1,1
+ trace: collect_build_postponed (1): skip dep-postponed existing dependent bar of dependency libbar
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {bar^ | libbar->{bar/1,1}}
+ trace: collect_build_prerequisites: reeval diz/1.0.0
+ trace: postponed_configurations::add: add {diz^ 1,1: dox} to {diz^ | dox->{diz/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent diz/1.0.0 results in {diz^ | dox->{diz/1,1}}
+ trace: collect_build_prerequisites: re-evaluated diz/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {bar^ | libbar->{bar/1,1}}
+ trace: collect_build_postponed (1): skip dep-postponed existing dependent bar
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbar/1.0.0
+ trace: collect_build_prerequisites: end libbar/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): skip dep-postponed existing dependent bar
+ trace: collect_build_postponed (1): cfg-negotiate end {bar^ | libbar->{bar/1,1}}!
+ trace: collect_build_postponed (2): begin {diz^ | dox->{diz/1,1}}
+ trace: collect_build_postponed (2): skip being built existing dependent diz of dependency dox
+ trace: collect_build_postponed (2): cfg-negotiate begin {diz^ | dox->{diz/1,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin dox/1.0.0
+ trace: collect_build: add dex/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency dex/1.0.0 of dependent dox/1.0.0
+ trace: postponed_configurations::add: create {dox | dex->{dox/1,1}}
+ trace: collect_build_prerequisites: postpone dox/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent diz/1.0.0
+ trace: collect_build_prerequisites: resume diz/1.0.0
+ trace: collect_build: pick libbox/1.0.0 over libbox/0.1.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent diz/1.0.0
+ trace: postponed_configurations::add: create {diz | libbox->{diz/2,1}}
+ trace: collect_build_prerequisites: postpone diz/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {diz^ | dox->{diz/1,1}}!
+ trace: collect_build_postponed (3): begin {dox | dex->{dox/1,1}}
+ trace: collect_build_postponed (3): skip being built existing dependent dox of dependency dex
+ trace: collect_build_postponed (3): cfg-negotiate begin {dox | dex->{dox/1,1}}
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin dex/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency bar/1.0.0 of dependent dex/1.0.0
+ trace: postponed_configurations::add: create {dex | bar->{dex/1,1}}
+ trace: collect_build_prerequisites: postpone dex/1.0.0
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent dox/1.0.0
+ trace: collect_build_prerequisites: resume dox/1.0.0
+ trace: collect_build_prerequisites: end dox/1.0.0
+ trace: collect_build_postponed (3): cfg-negotiate end {dox | dex->{dox/1,1}}!
+ trace: collect_build_postponed (4): begin {diz | libbox->{diz/2,1}}
+ trace: collect_build_postponed (4): skip being built existing dependent diz of dependency libbox
+ trace: collect_build_postponed (4): cfg-negotiate begin {diz | libbox->{diz/2,1}}
+ trace: collect_build_postponed (4): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbox/1.0.0
+ trace: collect_build_prerequisites: end libbox/1.0.0
+ trace: collect_build_postponed (4): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (4): select cfg-negotiated dependency alternative for dependent diz/1.0.0
+ trace: collect_build_prerequisites: resume diz/1.0.0
+ trace: collect_build: pick libbar/1.0.0 over libbar/0.1.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent diz/1.0.0
+ trace: postponed_configurations::add: add {diz 3,1: libbar} to {bar^ | libbar->{bar/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent diz/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent diz/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end diz/1.0.0
+ trace: collect_build_postponed (4): cfg-negotiate end {diz | libbox->{diz/2,1}}!
+ trace: collect_build_postponed (5): begin {dex | bar->{dex/1,1}}
+ trace: collect_build_postponed (5): skip being built existing dependent dex of dependency bar
+ trace: collect_build_postponed (5): cfg-negotiate begin {dex | bar->{dex/1,1}}
+ trace: collect_build_postponed (5): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin bar/1.0.0
+ trace: collect_build: pick libbar/1.0.0 over libbar/0.1.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bar/1.0.0
+ trace: postponed_configurations::add: add {bar 1,1: libbar} to {bar^ diz | libbar->{bar/1,1 diz/3,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent bar/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent bar/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end bar/1.0.0
+ trace: collect_build_postponed (5): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (5): select cfg-negotiated dependency alternative for dependent dex/1.0.0
+ trace: collect_build_prerequisites: resume dex/1.0.0
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent dex/1.0.0
+ trace: postponed_configurations::add: create {dex | libfoo->{dex/2,1}}
+ trace: collect_build_prerequisites: postpone dex/1.0.0
+ trace: collect_build_postponed (5): cfg-negotiate end {dex | bar->{dex/1,1}}!
+ trace: collect_build_postponed (6): begin {dex | libfoo->{dex/2,1}}
+ trace: collect_build_postponed (6): skip being built existing dependent dex of dependency libfoo
+ trace: collect_build_postponed (6): cfg-negotiate begin {dex | libfoo->{dex/2,1}}
+ trace: collect_build_postponed (6): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libfoo/1.0.0
+ trace: collect_build_postponed (6): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (6): select cfg-negotiated dependency alternative for dependent dex/1.0.0
+ trace: collect_build_prerequisites: resume dex/1.0.0
+ trace: collect_build_prerequisites: end dex/1.0.0
+ trace: collect_build_postponed (6): cfg-negotiate end {dex | libfoo->{dex/2,1}}!
+ trace: collect_build_postponed (6): end {dex | libfoo->{dex/2,1}}
+ trace: collect_build_postponed (5): end {dex | bar->{dex/1,1}}
+ trace: collect_build_postponed (4): end {diz | libbox->{diz/2,1}}
+ trace: collect_build_postponed (3): end {dox | dex->{dox/1,1}}
+ trace: collect_build_postponed (2): end {diz^ | dox->{diz/1,1}}
+ trace: collect_build_postponed (1): end {bar^ | libbar->{bar/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ build plan:
+ upgrade libbar/1.0.0
+ config.libbar.extras=true (set by diz)
+ reconfigure/update bar/1.0.0 (required by dex)
+ config.bar.extras=true (set by dex)
+ reconfigure/update dex/1.0.0 (required by dox)
+ config.dex.extras=true (set by dox)
+ reconfigure/update dox/1.0.0 (required by diz)
+ config.dox.extras=true (set by diz)
+ upgrade libbox/1.0.0
+ config.libbox.extras=true (set by diz)
+ reconfigure/update diz/1.0.0
+ trace: execute_plan: simulate: no
+ %.*
+ EOE
+
+ cat cfg/libbox-1.0.0/build2/config.build2 >>~%EOO%;
+ %.*
+ config.libbox.extras = true
+ EOO
+
+ $pkg_status -r >>EOO;
+ !libbar configured 1.0.0
+ !diz configured 1.0.0
+ dox configured 1.0.0
+ dex configured 1.0.0
+ bar configured 1.0.0
+ !libbar configured 1.0.0
+ libfoo configured 1.0.0
+ !libbar configured 1.0.0
+ libbox configured !1.0.0
+ EOO
+
+ $pkg_drop diz libbar --drop-dependent
+ }
+
+ : multiple-dependency-postpones
+ :
+ {
+ $clone_cfg;
+
+ # Dependencies:
+ #
+ # diz: depends: dox(c)
+ # depends: libbox(c)
+ # depends: libbar(c)
+ #
+ # dox: depends: dex(c)
+ #
+ # dex: depends: bar(c)
+ # depends: libfoo(c)
+ #
+ # bar: depends: libbar(c)
+ #
+ $* diz dex libbar/0.1.0 --verbose 1 2>!;
+
+ $pkg_status -r >>EOO;
+ !libbar configured !0.1.0 available 1.0.0
+ !dex configured 1.0.0
+ bar configured 1.0.0
+ !libbar configured !0.1.0 available 1.0.0
+ libfoo configured 1.0.0
+ !diz configured 1.0.0
+ dox configured 1.0.0
+ !dex configured 1.0.0
+ bar configured 1.0.0
+ !libbar configured !0.1.0 available 1.0.0
+ libfoo configured 1.0.0
+ !libbar configured !0.1.0 available 1.0.0
+ libbox configured 1.0.0
+ EOO
+
+ $* --upgrade --recursive 2>&1 | $filter 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build: add dex/1.0.0
+ trace: collect_build: add diz/1.0.0
+ trace: collect_build_prerequisites: pre-reeval bar/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bar/1.0.0: 1,1
+ trace: collect_build_prerequisites: pre-reeval diz/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated diz/1.0.0: 1,1
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of existing dependent bar/1.0.0 due to dependency libbar/1.0.0
+ trace: collect_build: add bar/1.0.0
+ trace: postponed_configurations::add: create {bar^ | libbar->{bar/1,1}}
+ trace: collect_build: add dox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency dox/1.0.0 of existing dependent diz/1.0.0 due to dependency libbar/1.0.0
+ trace: postponed_configurations::add: create {diz^ | dox->{diz/1,1}}
+ trace: collect_build_prerequisites: skip configured dex/1.0.0
+ trace: collect_build_prerequisites: skip configured diz/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {bar^ | libbar->{bar/1,1}}
+ trace: collect_build_prerequisites: pre-reeval bar/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bar/1.0.0: 1,1
+ trace: collect_build_prerequisites: pre-reeval diz/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated diz/1.0.0: 1,1
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {bar^ | libbar->{bar/1,1}}
+ trace: collect_build_prerequisites: reeval bar/1.0.0
+ trace: collect_build: pick libbar/1.0.0 over libbar/0.1.0
+ trace: postponed_configurations::add: add {bar^ 1,1: libbar} to {bar^ | libbar->{bar/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent bar/1.0.0 results in {bar^ | libbar->{bar/1,1}}
+ trace: collect_build_prerequisites: re-evaluated bar/1.0.0
+ trace: collect_build_prerequisites: reeval diz/1.0.0
+ trace: postponed_configurations::add: add {diz^ 1,1: dox} to {diz^ | dox->{diz/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent diz/1.0.0 results in {diz^ | dox->{diz/1,1}}
+ trace: collect_build_prerequisites: re-evaluated diz/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {bar^ | libbar->{bar/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbar/1.0.0
+ trace: collect_build_prerequisites: end libbar/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent bar/1.0.0
+ trace: collect_build_prerequisites: resume bar/1.0.0
+ trace: collect_build_prerequisites: end bar/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {bar^ | libbar->{bar/1,1}}!
+ trace: collect_build_postponed (2): begin {diz^ | dox->{diz/1,1}}
+ trace: collect_build_postponed (2): skip being built existing dependent diz of dependency dox
+ trace: collect_build_postponed (2): cfg-negotiate begin {diz^ | dox->{diz/1,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin dox/1.0.0
+ trace: collect_build_prerequisites: cannot cfg-postpone dependency dex/1.0.0 of dependent dox/1.0.0 (collected prematurely), throwing postpone_dependency
+ trace: pkg_build: collection failed due to prematurely collected dependency (dex), retry from scratch
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build: add dex/1.0.0
+ trace: collect_build: add diz/1.0.0
+ trace: collect_build_prerequisites: pre-reeval bar/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bar/1.0.0: 1,1
+ trace: collect_build_prerequisites: pre-reeval diz/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated diz/1.0.0: 1,1
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of existing dependent bar/1.0.0 due to dependency libbar/1.0.0
+ trace: collect_build: add bar/1.0.0
+ trace: postponed_configurations::add: create {bar^ | libbar->{bar/1,1}}
+ trace: collect_build: add dox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency dox/1.0.0 of existing dependent diz/1.0.0 due to dependency libbar/1.0.0
+ trace: postponed_configurations::add: create {diz^ | dox->{diz/1,1}}
+ trace: pkg_build: dep-postpone user-specified dex
+ trace: collect_build_prerequisites: skip configured diz/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {bar^ | libbar->{bar/1,1}}
+ trace: collect_build_prerequisites: pre-reeval bar/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bar/1.0.0: 1,1
+ trace: collect_build_prerequisites: pre-reeval diz/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated diz/1.0.0: 1,1
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {bar^ | libbar->{bar/1,1}}
+ trace: collect_build_prerequisites: reeval bar/1.0.0
+ trace: collect_build: pick libbar/1.0.0 over libbar/0.1.0
+ trace: postponed_configurations::add: add {bar^ 1,1: libbar} to {bar^ | libbar->{bar/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent bar/1.0.0 results in {bar^ | libbar->{bar/1,1}}
+ trace: collect_build_prerequisites: re-evaluated bar/1.0.0
+ trace: collect_build_prerequisites: reeval diz/1.0.0
+ trace: postponed_configurations::add: add {diz^ 1,1: dox} to {diz^ | dox->{diz/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent diz/1.0.0 results in {diz^ | dox->{diz/1,1}}
+ trace: collect_build_prerequisites: re-evaluated diz/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {bar^ | libbar->{bar/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbar/1.0.0
+ trace: collect_build_prerequisites: end libbar/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent bar/1.0.0
+ trace: collect_build_prerequisites: resume bar/1.0.0
+ trace: collect_build_prerequisites: end bar/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {bar^ | libbar->{bar/1,1}}!
+ trace: collect_build_postponed (2): begin {diz^ | dox->{diz/1,1}}
+ trace: collect_build_postponed (2): skip being built existing dependent diz of dependency dox
+ trace: collect_build_postponed (2): cfg-negotiate begin {diz^ | dox->{diz/1,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin dox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency dex/1.0.0 of dependent dox/1.0.0
+ trace: postponed_configurations::add: create {dox | dex->{dox/1,1}}
+ trace: collect_build_prerequisites: postpone dox/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent diz/1.0.0
+ trace: collect_build_prerequisites: resume diz/1.0.0
+ trace: collect_build: add libbox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent diz/1.0.0
+ trace: postponed_configurations::add: create {diz | libbox->{diz/2,1}}
+ trace: collect_build_prerequisites: postpone diz/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {diz^ | dox->{diz/1,1}}!
+ trace: collect_build_postponed (3): begin {dox | dex->{dox/1,1}}
+ trace: collect_build_postponed (3): skip being built existing dependent dox of dependency dex
+ trace: collect_build_postponed (3): cfg-negotiate begin {dox | dex->{dox/1,1}}
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin dex/1.0.0
+ trace: collect_build_prerequisites: cannot cfg-postpone dependency bar/1.0.0 of dependent dex/1.0.0 (collected prematurely), throwing postpone_dependency
+ trace: pkg_build: collection failed due to prematurely collected dependency (bar), retry from scratch
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build: add dex/1.0.0
+ trace: collect_build: add diz/1.0.0
+ trace: collect_build_prerequisites: pre-reeval bar/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bar/1.0.0: 1,1
+ trace: collect_build_prerequisites: pre-reeval diz/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated diz/1.0.0: 1,1
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of existing dependent bar/1.0.0 due to dependency libbar/1.0.0
+ trace: collect_build: add bar/1.0.0
+ trace: postponed_configurations::add: create {bar^ | libbar->{bar/1,1}}
+ trace: collect_build: add dox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency dox/1.0.0 of existing dependent diz/1.0.0 due to dependency libbar/1.0.0
+ trace: postponed_configurations::add: create {diz^ | dox->{diz/1,1}}
+ trace: pkg_build: dep-postpone user-specified dex
+ trace: collect_build_prerequisites: skip configured diz/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {bar^ | libbar->{bar/1,1}}
+ trace: collect_build_prerequisites: pre-reeval bar/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bar/1.0.0: 1,1
+ trace: collect_build_prerequisites: pre-reeval diz/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated diz/1.0.0: 1,1
+ trace: collect_build_postponed (1): skip dep-postponed existing dependent bar of dependency libbar
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {bar^ | libbar->{bar/1,1}}
+ trace: collect_build_prerequisites: reeval diz/1.0.0
+ trace: postponed_configurations::add: add {diz^ 1,1: dox} to {diz^ | dox->{diz/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent diz/1.0.0 results in {diz^ | dox->{diz/1,1}}
+ trace: collect_build_prerequisites: re-evaluated diz/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {bar^ | libbar->{bar/1,1}}
+ trace: collect_build_postponed (1): skip dep-postponed existing dependent bar
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbar/1.0.0
+ trace: collect_build_prerequisites: end libbar/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): skip dep-postponed existing dependent bar
+ trace: collect_build_postponed (1): cfg-negotiate end {bar^ | libbar->{bar/1,1}}!
+ trace: collect_build_postponed (2): begin {diz^ | dox->{diz/1,1}}
+ trace: collect_build_postponed (2): skip being built existing dependent diz of dependency dox
+ trace: collect_build_postponed (2): cfg-negotiate begin {diz^ | dox->{diz/1,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin dox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency dex/1.0.0 of dependent dox/1.0.0
+ trace: postponed_configurations::add: create {dox | dex->{dox/1,1}}
+ trace: collect_build_prerequisites: postpone dox/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent diz/1.0.0
+ trace: collect_build_prerequisites: resume diz/1.0.0
+ trace: collect_build: add libbox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent diz/1.0.0
+ trace: postponed_configurations::add: create {diz | libbox->{diz/2,1}}
+ trace: collect_build_prerequisites: postpone diz/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {diz^ | dox->{diz/1,1}}!
+ trace: collect_build_postponed (3): begin {dox | dex->{dox/1,1}}
+ trace: collect_build_postponed (3): skip being built existing dependent dox of dependency dex
+ trace: collect_build_postponed (3): cfg-negotiate begin {dox | dex->{dox/1,1}}
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin dex/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency bar/1.0.0 of dependent dex/1.0.0
+ trace: postponed_configurations::add: create {dex | bar->{dex/1,1}}
+ trace: collect_build_prerequisites: postpone dex/1.0.0
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent dox/1.0.0
+ trace: collect_build_prerequisites: resume dox/1.0.0
+ trace: collect_build_prerequisites: end dox/1.0.0
+ trace: collect_build_postponed (3): cfg-negotiate end {dox | dex->{dox/1,1}}!
+ trace: collect_build_postponed (4): begin {diz | libbox->{diz/2,1}}
+ trace: collect_build_postponed (4): skip being built existing dependent diz of dependency libbox
+ trace: collect_build_postponed (4): cfg-negotiate begin {diz | libbox->{diz/2,1}}
+ trace: collect_build_postponed (4): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbox/1.0.0
+ trace: collect_build_postponed (4): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (4): select cfg-negotiated dependency alternative for dependent diz/1.0.0
+ trace: collect_build_prerequisites: resume diz/1.0.0
+ trace: collect_build: pick libbar/1.0.0 over libbar/0.1.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent diz/1.0.0
+ trace: postponed_configurations::add: add {diz 3,1: libbar} to {bar^ | libbar->{bar/1,1}}!
+ trace: collect_build_prerequisites: cfg-postponing dependent diz/1.0.0 involves (being) negotiated configurations and results in {bar^ diz | libbar->{bar/1,1 diz/3,1}}!, throwing retry_configuration
+ trace: collect_build_postponed (0): cfg-negotiation of {bar^ | libbar->{bar/1,1}} failed due to dependent diz, refining configuration
+ trace: collect_build_postponed (1): begin {bar^ | libbar->{bar/1,1}}
+ trace: collect_build_prerequisites: pre-reeval bar/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bar/1.0.0: 1,1
+ trace: collect_build_prerequisites: pre-reeval diz/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated diz/1.0.0: 1,1
+ trace: collect_build_postponed (1): skip dep-postponed existing dependent bar of dependency libbar
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {bar^ | libbar->{bar/1,1}}
+ trace: collect_build_prerequisites: reeval diz/1.0.0
+ trace: postponed_configurations::add: add {diz^ 1,1: dox} to {diz^ | dox->{diz/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent diz/1.0.0 results in {diz^ | dox->{diz/1,1}}
+ trace: collect_build_prerequisites: re-evaluated diz/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {bar^ | libbar->{bar/1,1}}
+ trace: collect_build_postponed (1): skip dep-postponed existing dependent bar
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbar/1.0.0
+ trace: collect_build_prerequisites: end libbar/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): skip dep-postponed existing dependent bar
+ trace: collect_build_postponed (1): cfg-negotiate end {bar^ | libbar->{bar/1,1}}!
+ trace: collect_build_postponed (2): begin {diz^ | dox->{diz/1,1}}
+ trace: collect_build_postponed (2): skip being built existing dependent diz of dependency dox
+ trace: collect_build_postponed (2): cfg-negotiate begin {diz^ | dox->{diz/1,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin dox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency dex/1.0.0 of dependent dox/1.0.0
+ trace: postponed_configurations::add: create {dox | dex->{dox/1,1}}
+ trace: collect_build_prerequisites: postpone dox/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent diz/1.0.0
+ trace: collect_build_prerequisites: resume diz/1.0.0
+ trace: collect_build: add libbox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent diz/1.0.0
+ trace: postponed_configurations::add: create {diz | libbox->{diz/2,1}}
+ trace: collect_build_prerequisites: postpone diz/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {diz^ | dox->{diz/1,1}}!
+ trace: collect_build_postponed (3): begin {dox | dex->{dox/1,1}}
+ trace: collect_build_postponed (3): skip being built existing dependent dox of dependency dex
+ trace: collect_build_postponed (3): cfg-negotiate begin {dox | dex->{dox/1,1}}
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin dex/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency bar/1.0.0 of dependent dex/1.0.0
+ trace: postponed_configurations::add: create {dex | bar->{dex/1,1}}
+ trace: collect_build_prerequisites: postpone dex/1.0.0
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent dox/1.0.0
+ trace: collect_build_prerequisites: resume dox/1.0.0
+ trace: collect_build_prerequisites: end dox/1.0.0
+ trace: collect_build_postponed (3): cfg-negotiate end {dox | dex->{dox/1,1}}!
+ trace: collect_build_postponed (4): begin {diz | libbox->{diz/2,1}}
+ trace: collect_build_postponed (4): skip being built existing dependent diz of dependency libbox
+ trace: collect_build_postponed (4): cfg-negotiate begin {diz | libbox->{diz/2,1}}
+ trace: collect_build_postponed (4): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbox/1.0.0
+ trace: collect_build_postponed (4): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (4): select cfg-negotiated dependency alternative for dependent diz/1.0.0
+ trace: collect_build_prerequisites: resume diz/1.0.0
+ trace: collect_build: pick libbar/1.0.0 over libbar/0.1.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent diz/1.0.0
+ trace: postponed_configurations::add: add {diz 3,1: libbar} to {bar^ | libbar->{bar/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent diz/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent diz/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end diz/1.0.0
+ trace: collect_build_postponed (4): cfg-negotiate end {diz | libbox->{diz/2,1}}!
+ trace: collect_build_postponed (5): begin {dex | bar->{dex/1,1}}
+ trace: collect_build_postponed (5): skip being built existing dependent dex of dependency bar
+ trace: collect_build_postponed (5): cfg-negotiate begin {dex | bar->{dex/1,1}}
+ trace: collect_build_postponed (5): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin bar/1.0.0
+ trace: collect_build: pick libbar/1.0.0 over libbar/0.1.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bar/1.0.0
+ trace: postponed_configurations::add: add {bar 1,1: libbar} to {bar^ diz | libbar->{bar/1,1 diz/3,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent bar/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent bar/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end bar/1.0.0
+ trace: collect_build_postponed (5): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (5): select cfg-negotiated dependency alternative for dependent dex/1.0.0
+ trace: collect_build_prerequisites: resume dex/1.0.0
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent dex/1.0.0
+ trace: postponed_configurations::add: create {dex | libfoo->{dex/2,1}}
+ trace: collect_build_prerequisites: postpone dex/1.0.0
+ trace: collect_build_postponed (5): cfg-negotiate end {dex | bar->{dex/1,1}}!
+ trace: collect_build_postponed (6): begin {dex | libfoo->{dex/2,1}}
+ trace: collect_build_postponed (6): skip being built existing dependent dex of dependency libfoo
+ trace: collect_build_postponed (6): cfg-negotiate begin {dex | libfoo->{dex/2,1}}
+ trace: collect_build_postponed (6): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libfoo/1.0.0
+ trace: collect_build_postponed (6): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (6): select cfg-negotiated dependency alternative for dependent dex/1.0.0
+ trace: collect_build_prerequisites: resume dex/1.0.0
+ trace: collect_build_prerequisites: end dex/1.0.0
+ trace: collect_build_postponed (6): cfg-negotiate end {dex | libfoo->{dex/2,1}}!
+ trace: collect_build_postponed (6): end {dex | libfoo->{dex/2,1}}
+ trace: collect_build_postponed (5): end {dex | bar->{dex/1,1}}
+ trace: collect_build_postponed (4): end {diz | libbox->{diz/2,1}}
+ trace: collect_build_postponed (3): end {dox | dex->{dox/1,1}}
+ trace: collect_build_postponed (2): end {diz^ | dox->{diz/1,1}}
+ trace: collect_build_postponed (1): end {bar^ | libbar->{bar/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ build plan:
+ upgrade libbar/1.0.0
+ config.libbar.extras=true (set by diz)
+ reconfigure/update bar/1.0.0 (required by dex)
+ config.bar.extras=true (set by dex)
+ reconfigure/update dex/1.0.0
+ config.dex.extras=true (set by dox)
+ reconfigure/update dox/1.0.0 (required by diz)
+ config.dox.extras=true (set by diz)
+ reconfigure/update diz/1.0.0
+ trace: execute_plan: simulate: no
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !libbar configured 1.0.0
+ !dex configured 1.0.0
+ bar configured 1.0.0
+ !libbar configured 1.0.0
+ libfoo configured 1.0.0
+ !diz configured 1.0.0
+ dox configured 1.0.0
+ !dex configured 1.0.0
+ bar configured 1.0.0
+ !libbar configured 1.0.0
+ libfoo configured 1.0.0
+ !libbar configured 1.0.0
+ libbox configured 1.0.0
+ EOO
+
+ $pkg_drop diz dex libbar --drop-dependent
+ }
+
+ : collected-dependency-non-negotiated-cluster
+ :
+ {
+ $clone_cfg;
+
+ # Dependencies:
+ #
+ # tiz: depends: tex(c)
+ # depends: libbar(c)
+ #
+ # tex: depends: libbar(c)
+ # depends: libfoo(c)
+ #
+ $* tiz libbar/0.1.0 --verbose 1 2>!;
+
+ $pkg_status -r >>EOO;
+ !libbar configured !0.1.0 available 1.0.0
+ !tiz configured 1.0.0
+ !libbar configured !0.1.0 available 1.0.0
+ tex configured 1.0.0
+ !libbar configured !0.1.0 available 1.0.0
+ libfoo configured 1.0.0
+ EOO
+
+ $* --upgrade --recursive 2>&1 | $filter 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build: add tiz/1.0.0
+ trace: collect_build_prerequisites: pre-reeval tex/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tex/1.0.0: 1,1
+ trace: collect_build_prerequisites: pre-reeval tiz/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tiz/1.0.0: 1,1
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of existing dependent tex/1.0.0 due to dependency libbar/1.0.0
+ trace: collect_build: add tex/1.0.0
+ trace: postponed_configurations::add: create {tex^ | libbar->{tex/1,1}}
+ trace: collect_build_prerequisites: cfg-postpone dependency tex/1.0.0 of existing dependent tiz/1.0.0 due to dependency libbar/1.0.0
+ trace: postponed_configurations::add: create {tiz^ | tex->{tiz/1,1}}
+ trace: collect_build_prerequisites: skip configured tiz/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {tex^ | libbar->{tex/1,1}}
+ trace: collect_build_prerequisites: pre-reeval tex/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tex/1.0.0: 1,1
+ trace: collect_build_prerequisites: pre-reeval tiz/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tiz/1.0.0: 1,1
+ trace: collect_build_postponed (1): skip existing dependent tex of dependency libbar since dependent already in cluster {tiz^ | tex->{tiz/1,1}} (as a dependency)
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {tex^ | libbar->{tex/1,1}}
+ trace: collect_build_prerequisites: reeval tiz/1.0.0
+ trace: postponed_configurations::add: add {tiz^ 1,1: tex} to {tiz^ | tex->{tiz/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent tiz/1.0.0 results in {tiz^ | tex->{tiz/1,1}}
+ trace: collect_build_prerequisites: re-evaluated tiz/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {tex^ | libbar->{tex/1,1}}
+ trace: collect_build_postponed (1): skip dep-postponed existing dependent tex
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbar/1.0.0
+ trace: collect_build_prerequisites: end libbar/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): skip dep-postponed existing dependent tex
+ trace: collect_build_postponed (1): cfg-negotiate end {tex^ | libbar->{tex/1,1}}!
+ trace: collect_build_postponed (2): begin {tiz^ | tex->{tiz/1,1}}
+ trace: collect_build_postponed (2): skip being built existing dependent tiz of dependency tex
+ trace: collect_build_postponed (2): cfg-negotiate begin {tiz^ | tex->{tiz/1,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin tex/1.0.0
+ trace: collect_build: pick libbar/1.0.0 over libbar/0.1.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tex/1.0.0
+ trace: postponed_configurations::add: add {tex 1,1: libbar} to {tex^ | libbar->{tex/1,1}}!
+ trace: collect_build_prerequisites: cfg-postponing dependent tex/1.0.0 involves (being) negotiated configurations and results in {tex^ | libbar->{tex/1,1}}!, throwing retry_configuration
+ trace: collect_build_postponed (0): cfg-negotiation of {tex^ | libbar->{tex/1,1}} failed due to dependent tex, refining configuration
+ trace: collect_build_postponed (1): begin {tex^ | libbar->{tex/1,1}}
+ trace: collect_build_prerequisites: pre-reeval tex/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tex/1.0.0: 1,1
+ trace: collect_build_prerequisites: pre-reeval tiz/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tiz/1.0.0: 1,1
+ trace: collect_build_postponed (1): skip existing dependent tex of dependency libbar since dependent already in cluster {tiz^ | tex->{tiz/1,1}} (as a dependency)
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {tex^ | libbar->{tex/1,1}}
+ trace: collect_build_prerequisites: reeval tiz/1.0.0
+ trace: postponed_configurations::add: add {tiz^ 1,1: tex} to {tiz^ | tex->{tiz/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent tiz/1.0.0 results in {tiz^ | tex->{tiz/1,1}}
+ trace: collect_build_prerequisites: re-evaluated tiz/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {tex^ | libbar->{tex/1,1}}
+ trace: collect_build_postponed (1): skip dep-postponed existing dependent tex
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbar/1.0.0
+ trace: collect_build_prerequisites: end libbar/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): skip dep-postponed existing dependent tex
+ trace: collect_build_postponed (1): cfg-negotiate end {tex^ | libbar->{tex/1,1}}!
+ trace: collect_build_postponed (2): begin {tiz^ | tex->{tiz/1,1}}
+ trace: collect_build_postponed (2): skip being built existing dependent tiz of dependency tex
+ trace: collect_build_postponed (2): cfg-negotiate begin {tiz^ | tex->{tiz/1,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin tex/1.0.0
+ trace: collect_build: pick libbar/1.0.0 over libbar/0.1.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tex/1.0.0
+ trace: postponed_configurations::add: add {tex 1,1: libbar} to {tex^ | libbar->{tex/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent tex/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent tex/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent tex/1.0.0
+ trace: postponed_configurations::add: create {tex | libfoo->{tex/2,1}}
+ trace: collect_build_prerequisites: postpone tex/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent tiz/1.0.0
+ trace: collect_build_prerequisites: resume tiz/1.0.0
+ trace: collect_build: pick libbar/1.0.0 over libbar/0.1.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tiz/1.0.0
+ trace: postponed_configurations::add: add {tiz 2,1: libbar} to {tex^ | libbar->{tex/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent tiz/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent tiz/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end tiz/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {tiz^ | tex->{tiz/1,1}}!
+ trace: collect_build_postponed (3): begin {tex | libfoo->{tex/2,1}}
+ trace: collect_build_postponed (3): skip being built existing dependent tex of dependency libfoo
+ trace: collect_build_postponed (3): cfg-negotiate begin {tex | libfoo->{tex/2,1}}
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libfoo/1.0.0
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent tex/1.0.0
+ trace: collect_build_prerequisites: resume tex/1.0.0
+ trace: collect_build_prerequisites: end tex/1.0.0
+ trace: collect_build_postponed (3): cfg-negotiate end {tex | libfoo->{tex/2,1}}!
+ trace: collect_build_postponed (3): end {tex | libfoo->{tex/2,1}}
+ trace: collect_build_postponed (2): end {tiz^ | tex->{tiz/1,1}}
+ trace: collect_build_postponed (1): end {tex^ | libbar->{tex/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ build plan:
+ upgrade libbar/1.0.0
+ config.libbar.extras=true (set by tex)
+ reconfigure/update tex/1.0.0 (required by tiz)
+ config.tex.extras=true (set by tiz)
+ config.tex.libfoo_extras=true (set by tex)
+ reconfigure/update tiz/1.0.0
+ config.tiz.tex_extras=true (set by tiz)
+ trace: execute_plan: simulate: no
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !libbar configured 1.0.0
+ !tiz configured 1.0.0
+ !libbar configured 1.0.0
+ tex configured 1.0.0
+ !libbar configured 1.0.0
+ libfoo configured 1.0.0
+ EOO
+
+ $pkg_drop tiz libbar --drop-dependent
+ }
+
+ : from-scratch-refinement-drop
+ :
+ {
+ $clone_cfg;
+
+ # Dependencies:
+ #
+ # tiz: depends: tex(c)
+ # depends: libbar(c)
+ #
+ # tex: depends: libbar(c)
+ # depends: libfoo(c)
+ #
+ # toz: depends: libbaz(c)
+ # depends: libfoo(c)
+ # depends: libbar(c)
+ #
+ # tez: depends: libbox(c)
+ # depends: toz == 0.1.0 (c)
+ # depends: libbar (c)
+ #
+ # toz/0.1.0:
+ #
+ $* tiz toz --verbose 1 2>!;
+
+ $pkg_status -r >>EOO;
+ !tiz configured 1.0.0
+ libbar configured 1.0.0
+ tex configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured 1.0.0
+ !toz configured 1.0.0
+ libbar configured 1.0.0
+ libbaz configured 1.0.0
+ libfoo configured 1.0.0
+ EOO
+
+ $* tez 2>&1 | $filter 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add tez/1.0.0
+ trace: collect_build_prerequisites: begin tez/1.0.0
+ trace: collect_build: add libbox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: create {tez | libbox->{tez/1,1}}
+ trace: collect_build_prerequisites: postpone tez/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (1): cfg-negotiate begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbox/1.0.0
+ trace: collect_build_prerequisites: end libbox/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tez/1.0.0
+ trace: collect_build_prerequisites: resume tez/1.0.0
+ trace: collect_build: add toz/0.1.0
+ warning: package tez dependency on (toz == 0.1.0) is forcing downgrade of toz/1.0.0 to 0.1.0
+ trace: collect_build_prerequisites: cfg-postpone dependency toz/0.1.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: create {tez | toz->{tez/2,1}}
+ trace: collect_build_prerequisites: postpone tez/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {tez | libbox->{tez/1,1}}!
+ trace: collect_build_postponed (2): begin {tez | toz->{tez/2,1}}
+ trace: collect_build_postponed (2): cfg-negotiate begin {tez | toz->{tez/2,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin toz/0.1.0
+ trace: collect_build_prerequisites: end toz/0.1.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent tez/1.0.0
+ trace: collect_build_prerequisites: resume tez/1.0.0
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: create {tez | libbar->{tez/3,1}}
+ trace: collect_build_prerequisites: postpone tez/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {tez | toz->{tez/2,1}}!
+ trace: collect_build_postponed (3): begin {tez | libbar->{tez/3,1}}
+ trace: collect_build_prerequisites: pre-reeval tex/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tex/1.0.0: 1,1
+ trace: collect_build_prerequisites: pre-reeval tiz/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tiz/1.0.0: 1,1
+ trace: collect_build_postponed (3): skip being built existing dependent toz of dependency libbar
+ trace: collect_build: add tex/1.0.0
+ trace: collect_build_postponed (3): re-evaluate existing dependents for {tez | libbar->{tez/3,1}}
+ trace: collect_build_prerequisites: reeval tex/1.0.0
+ trace: postponed_configurations::add: add {tex^ 1,1: libbar} to {tez | libbar->{tez/3,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent tex/1.0.0 results in {tex^ tez | libbar->{tex/1,1 tez/3,1}}
+ trace: collect_build_prerequisites: re-evaluated tex/1.0.0
+ trace: collect_build: add tiz/1.0.0
+ trace: collect_build_prerequisites: reeval tiz/1.0.0
+ trace: collect_build_prerequisites: cannot re-evaluate existing dependent tiz/1.0.0 due to dependency tex/1.0.0 (collected prematurely), throwing postpone_dependency
+ trace: pkg_build: collection failed due to prematurely collected dependency (tex), retry from scratch
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add tez/1.0.0
+ trace: collect_build_prerequisites: begin tez/1.0.0
+ trace: collect_build: add libbox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: create {tez | libbox->{tez/1,1}}
+ trace: collect_build_prerequisites: postpone tez/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (1): cfg-negotiate begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbox/1.0.0
+ trace: collect_build_prerequisites: end libbox/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tez/1.0.0
+ trace: collect_build_prerequisites: resume tez/1.0.0
+ trace: collect_build: add toz/0.1.0
+ warning: package tez dependency on (toz == 0.1.0) is forcing downgrade of toz/1.0.0 to 0.1.0
+ trace: collect_build_prerequisites: cfg-postpone dependency toz/0.1.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: create {tez | toz->{tez/2,1}}
+ trace: collect_build_prerequisites: postpone tez/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {tez | libbox->{tez/1,1}}!
+ trace: collect_build_postponed (2): begin {tez | toz->{tez/2,1}}
+ trace: collect_build_postponed (2): cfg-negotiate begin {tez | toz->{tez/2,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin toz/0.1.0
+ trace: collect_build_prerequisites: end toz/0.1.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent tez/1.0.0
+ trace: collect_build_prerequisites: resume tez/1.0.0
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: create {tez | libbar->{tez/3,1}}
+ trace: collect_build_prerequisites: postpone tez/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {tez | toz->{tez/2,1}}!
+ trace: collect_build_postponed (3): begin {tez | libbar->{tez/3,1}}
+ trace: collect_build_prerequisites: pre-reeval tex/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tex/1.0.0: 1,1
+ trace: collect_build_prerequisites: pre-reeval tiz/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tiz/1.0.0: 1,1
+ trace: collect_build_postponed (3): skip being built existing dependent toz of dependency libbar
+ trace: collect_build_postponed (3): skip dep-postponed existing dependent tex of dependency libbar
+ trace: collect_build: add tex/1.0.0
+ trace: collect_build_postponed (3): re-evaluate existing dependents for {tez | libbar->{tez/3,1}}
+ trace: collect_build: add tiz/1.0.0
+ trace: collect_build_prerequisites: reeval tiz/1.0.0
+ trace: postponed_configurations::add: create {tiz^ | tex->{tiz/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent tiz/1.0.0 results in {tiz^ | tex->{tiz/1,1}}
+ trace: collect_build_prerequisites: re-evaluated tiz/1.0.0
+ trace: collect_build_postponed (3): cfg-negotiate begin {tez | libbar->{tez/3,1}}
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbar/1.0.0
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent tez/1.0.0
+ trace: collect_build_prerequisites: resume tez/1.0.0
+ trace: collect_build_prerequisites: end tez/1.0.0
+ trace: collect_build_postponed (3): cfg-negotiate end {tez | libbar->{tez/3,1}}!
+ trace: collect_build_postponed (4): begin {tiz^ | tex->{tiz/1,1}}
+ trace: collect_build_postponed (4): skip being built existing dependent tiz of dependency tex
+ trace: collect_build_postponed (4): cfg-negotiate begin {tiz^ | tex->{tiz/1,1}}
+ trace: collect_build_postponed (4): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin tex/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tex/1.0.0
+ trace: postponed_configurations::add: add {tex 1,1: libbar} to {tez | libbar->{tez/3,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent tex/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent tex/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent tex/1.0.0
+ trace: postponed_configurations::add: create {tex | libfoo->{tex/2,1}}
+ trace: collect_build_prerequisites: postpone tex/1.0.0
+ trace: collect_build_postponed (4): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (4): select cfg-negotiated dependency alternative for dependent tiz/1.0.0
+ trace: collect_build_prerequisites: resume tiz/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tiz/1.0.0
+ trace: postponed_configurations::add: add {tiz 2,1: libbar} to {tex tez | libbar->{tex/1,1 tez/3,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent tiz/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent tiz/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end tiz/1.0.0
+ trace: collect_build_postponed (4): cfg-negotiate end {tiz^ | tex->{tiz/1,1}}!
+ trace: collect_build_postponed (5): begin {tex | libfoo->{tex/2,1}}
+ trace: collect_build_postponed (5): skip being built existing dependent tex of dependency libfoo
+ trace: collect_build_postponed (5): skip being built existing dependent toz of dependency libfoo
+ trace: collect_build_postponed (5): cfg-negotiate begin {tex | libfoo->{tex/2,1}}
+ trace: collect_build_postponed (5): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libfoo/1.0.0
+ trace: collect_build_postponed (5): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (5): select cfg-negotiated dependency alternative for dependent tex/1.0.0
+ trace: collect_build_prerequisites: resume tex/1.0.0
+ trace: collect_build_prerequisites: end tex/1.0.0
+ trace: collect_build_postponed (5): cfg-negotiate end {tex | libfoo->{tex/2,1}}!
+ trace: collect_build_postponed (5): end {tex | libfoo->{tex/2,1}}
+ trace: collect_build_postponed (4): end {tiz^ | tex->{tiz/1,1}}
+ trace: collect_build_postponed (3): end {tez | libbar->{tez/3,1}}
+ trace: collect_build_postponed (2): end {tez | toz->{tez/2,1}}
+ trace: collect_build_postponed (1): end {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: evaluate_dependency: libbaz/1.0.0: unused
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add tez/1.0.0
+ trace: collect_build_prerequisites: begin tez/1.0.0
+ trace: collect_build: add libbox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: create {tez | libbox->{tez/1,1}}
+ trace: collect_build_prerequisites: postpone tez/1.0.0
+ trace: collect_drop: add libbaz
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (1): cfg-negotiate begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbox/1.0.0
+ trace: collect_build_prerequisites: end libbox/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tez/1.0.0
+ trace: collect_build_prerequisites: resume tez/1.0.0
+ trace: collect_build: add toz/0.1.0
+ warning: package tez dependency on (toz == 0.1.0) is forcing downgrade of toz/1.0.0 to 0.1.0
+ trace: collect_build_prerequisites: cfg-postpone dependency toz/0.1.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: create {tez | toz->{tez/2,1}}
+ trace: collect_build_prerequisites: postpone tez/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {tez | libbox->{tez/1,1}}!
+ trace: collect_build_postponed (2): begin {tez | toz->{tez/2,1}}
+ trace: collect_build_postponed (2): cfg-negotiate begin {tez | toz->{tez/2,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin toz/0.1.0
+ trace: collect_build_prerequisites: end toz/0.1.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent tez/1.0.0
+ trace: collect_build_prerequisites: resume tez/1.0.0
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: create {tez | libbar->{tez/3,1}}
+ trace: collect_build_prerequisites: postpone tez/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {tez | toz->{tez/2,1}}!
+ trace: collect_build_postponed (3): begin {tez | libbar->{tez/3,1}}
+ trace: collect_build_prerequisites: pre-reeval tex/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tex/1.0.0: 1,1
+ trace: collect_build_prerequisites: pre-reeval tiz/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tiz/1.0.0: 1,1
+ trace: collect_build_postponed (3): skip being built existing dependent toz of dependency libbar
+ trace: collect_build: add tex/1.0.0
+ trace: collect_build_postponed (3): re-evaluate existing dependents for {tez | libbar->{tez/3,1}}
+ trace: collect_build_prerequisites: reeval tex/1.0.0
+ trace: postponed_configurations::add: add {tex^ 1,1: libbar} to {tez | libbar->{tez/3,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent tex/1.0.0 results in {tex^ tez | libbar->{tex/1,1 tez/3,1}}
+ trace: collect_build_prerequisites: re-evaluated tex/1.0.0
+ trace: collect_build: add tiz/1.0.0
+ trace: collect_build_prerequisites: reeval tiz/1.0.0
+ trace: collect_build_prerequisites: cannot re-evaluate existing dependent tiz/1.0.0 due to dependency tex/1.0.0 (collected prematurely), throwing postpone_dependency
+ trace: pkg_build: collection failed due to prematurely collected dependency (tex), retry from scratch
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add tez/1.0.0
+ trace: collect_build_prerequisites: begin tez/1.0.0
+ trace: collect_build: add libbox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: create {tez | libbox->{tez/1,1}}
+ trace: collect_build_prerequisites: postpone tez/1.0.0
+ trace: collect_drop: add libbaz
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (1): cfg-negotiate begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbox/1.0.0
+ trace: collect_build_prerequisites: end libbox/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tez/1.0.0
+ trace: collect_build_prerequisites: resume tez/1.0.0
+ trace: collect_build: add toz/0.1.0
+ warning: package tez dependency on (toz == 0.1.0) is forcing downgrade of toz/1.0.0 to 0.1.0
+ trace: collect_build_prerequisites: cfg-postpone dependency toz/0.1.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: create {tez | toz->{tez/2,1}}
+ trace: collect_build_prerequisites: postpone tez/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {tez | libbox->{tez/1,1}}!
+ trace: collect_build_postponed (2): begin {tez | toz->{tez/2,1}}
+ trace: collect_build_postponed (2): cfg-negotiate begin {tez | toz->{tez/2,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin toz/0.1.0
+ trace: collect_build_prerequisites: end toz/0.1.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent tez/1.0.0
+ trace: collect_build_prerequisites: resume tez/1.0.0
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: create {tez | libbar->{tez/3,1}}
+ trace: collect_build_prerequisites: postpone tez/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {tez | toz->{tez/2,1}}!
+ trace: collect_build_postponed (3): begin {tez | libbar->{tez/3,1}}
+ trace: collect_build_prerequisites: pre-reeval tex/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tex/1.0.0: 1,1
+ trace: collect_build_prerequisites: pre-reeval tiz/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tiz/1.0.0: 1,1
+ trace: collect_build_postponed (3): skip being built existing dependent toz of dependency libbar
+ trace: collect_build_postponed (3): skip dep-postponed existing dependent tex of dependency libbar
+ trace: collect_build: add tex/1.0.0
+ trace: collect_build_postponed (3): re-evaluate existing dependents for {tez | libbar->{tez/3,1}}
+ trace: collect_build: add tiz/1.0.0
+ trace: collect_build_prerequisites: reeval tiz/1.0.0
+ trace: postponed_configurations::add: create {tiz^ | tex->{tiz/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent tiz/1.0.0 results in {tiz^ | tex->{tiz/1,1}}
+ trace: collect_build_prerequisites: re-evaluated tiz/1.0.0
+ trace: collect_build_postponed (3): cfg-negotiate begin {tez | libbar->{tez/3,1}}
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbar/1.0.0
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent tez/1.0.0
+ trace: collect_build_prerequisites: resume tez/1.0.0
+ trace: collect_build_prerequisites: end tez/1.0.0
+ trace: collect_build_postponed (3): cfg-negotiate end {tez | libbar->{tez/3,1}}!
+ trace: collect_build_postponed (4): begin {tiz^ | tex->{tiz/1,1}}
+ trace: collect_build_postponed (4): skip being built existing dependent tiz of dependency tex
+ trace: collect_build_postponed (4): cfg-negotiate begin {tiz^ | tex->{tiz/1,1}}
+ trace: collect_build_postponed (4): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin tex/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tex/1.0.0
+ trace: postponed_configurations::add: add {tex 1,1: libbar} to {tez | libbar->{tez/3,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent tex/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent tex/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent tex/1.0.0
+ trace: postponed_configurations::add: create {tex | libfoo->{tex/2,1}}
+ trace: collect_build_prerequisites: postpone tex/1.0.0
+ trace: collect_build_postponed (4): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (4): select cfg-negotiated dependency alternative for dependent tiz/1.0.0
+ trace: collect_build_prerequisites: resume tiz/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tiz/1.0.0
+ trace: postponed_configurations::add: add {tiz 2,1: libbar} to {tex tez | libbar->{tex/1,1 tez/3,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent tiz/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent tiz/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end tiz/1.0.0
+ trace: collect_build_postponed (4): cfg-negotiate end {tiz^ | tex->{tiz/1,1}}!
+ trace: collect_build_postponed (5): begin {tex | libfoo->{tex/2,1}}
+ trace: collect_build_postponed (5): skip being built existing dependent tex of dependency libfoo
+ trace: collect_build_postponed (5): skip being built existing dependent toz of dependency libfoo
+ trace: collect_build_postponed (5): cfg-negotiate begin {tex | libfoo->{tex/2,1}}
+ trace: collect_build_postponed (5): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libfoo/1.0.0
+ trace: collect_build_postponed (5): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (5): select cfg-negotiated dependency alternative for dependent tex/1.0.0
+ trace: collect_build_prerequisites: resume tex/1.0.0
+ trace: collect_build_prerequisites: end tex/1.0.0
+ trace: collect_build_postponed (5): cfg-negotiate end {tex | libfoo->{tex/2,1}}!
+ trace: collect_build_postponed (5): end {tex | libfoo->{tex/2,1}}
+ trace: collect_build_postponed (4): end {tiz^ | tex->{tiz/1,1}}
+ trace: collect_build_postponed (3): end {tez | libbar->{tez/3,1}}
+ trace: collect_build_postponed (2): end {tez | toz->{tez/2,1}}
+ trace: collect_build_postponed (1): end {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ build plan:
+ new libbox/1.0.0 (required by tez)
+ config.libbox.extras=true (set by tez)
+ drop libbaz/1.0.0 (unused)
+ downgrade toz/0.1.0 (required by tez)
+ config.toz.extras=true (set by tez)
+ new tez/1.0.0
+ trace: execute_plan: simulate: no
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !tiz configured 1.0.0
+ libbar configured 1.0.0
+ tex configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured 1.0.0
+ !toz configured 0.1.0 available 1.0.0 0.2.0
+ !tez configured 1.0.0
+ libbar configured 1.0.0
+ libbox configured 1.0.0
+ !toz configured 0.1.0 available 1.0.0 0.2.0
+ EOO
+
+ $pkg_drop tiz toz tez
+ }
+
+ : from-scratch-refinement-upgrade
+ :
+ {
+ $clone_cfg;
+
+ # Dependencies:
+ #
+ # tiz: depends: tex(c)
+ # depends: libbar(c)
+ #
+ # tex: depends: libbar(c)
+ # depends: libfoo(c)
+ #
+ # tez: depends: libbox(c)
+ # depends: toz == 0.1.0 (c)
+ #
+ # toz/0.1.0:
+ #
+ # tuz: depends: toz (c)
+ #
+ # tix: depends: libbar(c)
+ # depends: tex(c)
+ #
+ # toz/1.0.0: depends: libbaz(c)
+ # depends: libfoo(c)
+ # depends: libbar(c)
+ #
+ $* tiz tez/0.1.0 tuz --verbose 1 2>!;
+
+ $pkg_status -r >>EOO;
+ !tiz configured 1.0.0
+ libbar configured 1.0.0
+ tex configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured 1.0.0
+ !tez configured !0.1.0 available 1.0.0
+ libbox configured 1.0.0
+ toz configured 0.1.0 available 1.0.0 0.2.0
+ !tuz configured 1.0.0
+ toz configured 0.1.0 available 1.0.0 0.2.0
+ EOO
+
+ $* tix ?tez ?toz 2>&1 | $filter 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add tix/1.0.0
+ trace: collect_build_prerequisites: begin tix/1.0.0
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tix/1.0.0
+ trace: postponed_configurations::add: create {tix | libbar->{tix/1,1}}
+ trace: collect_build_prerequisites: postpone tix/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {tix | libbar->{tix/1,1}}
+ trace: collect_build_prerequisites: pre-reeval tex/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tex/1.0.0: 1,1
+ trace: collect_build_prerequisites: pre-reeval tiz/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tiz/1.0.0: 1,1
+ trace: collect_build: add tex/1.0.0
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {tix | libbar->{tix/1,1}}
+ trace: collect_build_prerequisites: reeval tex/1.0.0
+ trace: postponed_configurations::add: add {tex^ 1,1: libbar} to {tix | libbar->{tix/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent tex/1.0.0 results in {tex^ tix | libbar->{tex/1,1 tix/1,1}}
+ trace: collect_build_prerequisites: re-evaluated tex/1.0.0
+ trace: collect_build: add tiz/1.0.0
+ trace: collect_build_prerequisites: reeval tiz/1.0.0
+ trace: collect_build_prerequisites: cannot re-evaluate existing dependent tiz/1.0.0 due to dependency tex/1.0.0 (collected prematurely), throwing postpone_dependency
+ trace: pkg_build: collection failed due to prematurely collected dependency (tex), retry from scratch
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add tix/1.0.0
+ trace: collect_build_prerequisites: begin tix/1.0.0
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tix/1.0.0
+ trace: postponed_configurations::add: create {tix | libbar->{tix/1,1}}
+ trace: collect_build_prerequisites: postpone tix/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {tix | libbar->{tix/1,1}}
+ trace: collect_build_prerequisites: pre-reeval tex/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tex/1.0.0: 1,1
+ trace: collect_build_prerequisites: pre-reeval tiz/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tiz/1.0.0: 1,1
+ trace: collect_build_postponed (1): skip dep-postponed existing dependent tex of dependency libbar
+ trace: collect_build: add tex/1.0.0
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {tix | libbar->{tix/1,1}}
+ trace: collect_build: add tiz/1.0.0
+ trace: collect_build_prerequisites: reeval tiz/1.0.0
+ trace: postponed_configurations::add: create {tiz^ | tex->{tiz/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent tiz/1.0.0 results in {tiz^ | tex->{tiz/1,1}}
+ trace: collect_build_prerequisites: re-evaluated tiz/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {tix | libbar->{tix/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbar/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tix/1.0.0
+ trace: collect_build_prerequisites: resume tix/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency tex/1.0.0 of dependent tix/1.0.0
+ trace: postponed_configurations::add: add {tix 2,1: tex} to {tiz^ | tex->{tiz/1,1}}
+ trace: collect_build_prerequisites: postpone tix/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {tix | libbar->{tix/1,1}}!
+ trace: collect_build_postponed (2): begin {tix tiz^ | tex->{tix/2,1 tiz/1,1}}
+ trace: collect_build_postponed (2): skip being built existing dependent tiz of dependency tex
+ trace: collect_build_postponed (2): cfg-negotiate begin {tix tiz^ | tex->{tix/2,1 tiz/1,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin tex/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tex/1.0.0
+ trace: postponed_configurations::add: add {tex 1,1: libbar} to {tix | libbar->{tix/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent tex/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent tex/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent tex/1.0.0
+ trace: postponed_configurations::add: create {tex | libfoo->{tex/2,1}}
+ trace: collect_build_prerequisites: postpone tex/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent tix/1.0.0
+ trace: collect_build_prerequisites: resume tix/1.0.0
+ trace: collect_build_prerequisites: end tix/1.0.0
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent tiz/1.0.0
+ trace: collect_build_prerequisites: resume tiz/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tiz/1.0.0
+ trace: postponed_configurations::add: add {tiz 2,1: libbar} to {tex tix | libbar->{tex/1,1 tix/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent tiz/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent tiz/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end tiz/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {tix tiz^ | tex->{tix/2,1 tiz/1,1}}!
+ trace: collect_build_postponed (3): begin {tex | libfoo->{tex/2,1}}
+ trace: collect_build_postponed (3): skip being built existing dependent tex of dependency libfoo
+ trace: collect_build_postponed (3): cfg-negotiate begin {tex | libfoo->{tex/2,1}}
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libfoo/1.0.0
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent tex/1.0.0
+ trace: collect_build_prerequisites: resume tex/1.0.0
+ trace: collect_build_prerequisites: end tex/1.0.0
+ trace: collect_build_postponed (3): cfg-negotiate end {tex | libfoo->{tex/2,1}}!
+ trace: collect_build_postponed (3): end {tex | libfoo->{tex/2,1}}
+ trace: collect_build_postponed (2): end {tix tiz^ | tex->{tix/2,1 tiz/1,1}}
+ trace: collect_build_postponed (1): end {tix | libbar->{tix/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: evaluate_dependency: tez/0.1.0: unused
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add tix/1.0.0
+ trace: collect_build_prerequisites: begin tix/1.0.0
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tix/1.0.0
+ trace: postponed_configurations::add: create {tix | libbar->{tix/1,1}}
+ trace: collect_build_prerequisites: postpone tix/1.0.0
+ trace: collect_drop: overwrite tez
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {tix | libbar->{tix/1,1}}
+ trace: collect_build_prerequisites: pre-reeval tex/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tex/1.0.0: 1,1
+ trace: collect_build_prerequisites: pre-reeval tiz/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tiz/1.0.0: 1,1
+ trace: collect_build: add tex/1.0.0
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {tix | libbar->{tix/1,1}}
+ trace: collect_build_prerequisites: reeval tex/1.0.0
+ trace: postponed_configurations::add: add {tex^ 1,1: libbar} to {tix | libbar->{tix/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent tex/1.0.0 results in {tex^ tix | libbar->{tex/1,1 tix/1,1}}
+ trace: collect_build_prerequisites: re-evaluated tex/1.0.0
+ trace: collect_build: add tiz/1.0.0
+ trace: collect_build_prerequisites: reeval tiz/1.0.0
+ trace: collect_build_prerequisites: cannot re-evaluate existing dependent tiz/1.0.0 due to dependency tex/1.0.0 (collected prematurely), throwing postpone_dependency
+ trace: pkg_build: collection failed due to prematurely collected dependency (tex), retry from scratch
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add tix/1.0.0
+ trace: collect_build_prerequisites: begin tix/1.0.0
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tix/1.0.0
+ trace: postponed_configurations::add: create {tix | libbar->{tix/1,1}}
+ trace: collect_build_prerequisites: postpone tix/1.0.0
+ trace: collect_drop: overwrite tez
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {tix | libbar->{tix/1,1}}
+ trace: collect_build_prerequisites: pre-reeval tex/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tex/1.0.0: 1,1
+ trace: collect_build_prerequisites: pre-reeval tiz/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tiz/1.0.0: 1,1
+ trace: collect_build_postponed (1): skip dep-postponed existing dependent tex of dependency libbar
+ trace: collect_build: add tex/1.0.0
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {tix | libbar->{tix/1,1}}
+ trace: collect_build: add tiz/1.0.0
+ trace: collect_build_prerequisites: reeval tiz/1.0.0
+ trace: postponed_configurations::add: create {tiz^ | tex->{tiz/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent tiz/1.0.0 results in {tiz^ | tex->{tiz/1,1}}
+ trace: collect_build_prerequisites: re-evaluated tiz/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {tix | libbar->{tix/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbar/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tix/1.0.0
+ trace: collect_build_prerequisites: resume tix/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency tex/1.0.0 of dependent tix/1.0.0
+ trace: postponed_configurations::add: add {tix 2,1: tex} to {tiz^ | tex->{tiz/1,1}}
+ trace: collect_build_prerequisites: postpone tix/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {tix | libbar->{tix/1,1}}!
+ trace: collect_build_postponed (2): begin {tix tiz^ | tex->{tix/2,1 tiz/1,1}}
+ trace: collect_build_postponed (2): skip being built existing dependent tiz of dependency tex
+ trace: collect_build_postponed (2): cfg-negotiate begin {tix tiz^ | tex->{tix/2,1 tiz/1,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin tex/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tex/1.0.0
+ trace: postponed_configurations::add: add {tex 1,1: libbar} to {tix | libbar->{tix/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent tex/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent tex/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent tex/1.0.0
+ trace: postponed_configurations::add: create {tex | libfoo->{tex/2,1}}
+ trace: collect_build_prerequisites: postpone tex/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent tix/1.0.0
+ trace: collect_build_prerequisites: resume tix/1.0.0
+ trace: collect_build_prerequisites: end tix/1.0.0
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent tiz/1.0.0
+ trace: collect_build_prerequisites: resume tiz/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tiz/1.0.0
+ trace: postponed_configurations::add: add {tiz 2,1: libbar} to {tex tix | libbar->{tex/1,1 tix/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent tiz/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent tiz/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end tiz/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {tix tiz^ | tex->{tix/2,1 tiz/1,1}}!
+ trace: collect_build_postponed (3): begin {tex | libfoo->{tex/2,1}}
+ trace: collect_build_postponed (3): skip being built existing dependent tex of dependency libfoo
+ trace: collect_build_postponed (3): cfg-negotiate begin {tex | libfoo->{tex/2,1}}
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libfoo/1.0.0
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent tex/1.0.0
+ trace: collect_build_prerequisites: resume tex/1.0.0
+ trace: collect_build_prerequisites: end tex/1.0.0
+ trace: collect_build_postponed (3): cfg-negotiate end {tex | libfoo->{tex/2,1}}!
+ trace: collect_build_postponed (3): end {tex | libfoo->{tex/2,1}}
+ trace: collect_build_postponed (2): end {tix tiz^ | tex->{tix/2,1 tiz/1,1}}
+ trace: collect_build_postponed (1): end {tix | libbar->{tix/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: evaluate_dependency: libbox/1.0.0: unused
+ %.*
+ trace: evaluate_dependency: toz/0.1.0: update to toz/1.0.0
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add tix/1.0.0
+ trace: collect_build_prerequisites: begin tix/1.0.0
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tix/1.0.0
+ trace: postponed_configurations::add: create {tix | libbar->{tix/1,1}}
+ trace: collect_build_prerequisites: postpone tix/1.0.0
+ trace: collect_drop: overwrite tez
+ trace: collect_drop: add libbox
+ trace: collect_build_prerequisites: skip being dropped existing dependent tez of dependency toz
+ trace: collect_build_prerequisites: pre-reeval tuz/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tuz/1.0.0: 1,1
+ trace: collect_build_prerequisites: cfg-postpone dependency toz/1.0.0 of existing dependent tuz/1.0.0 due to dependency toz/1.0.0
+ trace: collect_build: add tuz/1.0.0
+ trace: postponed_configurations::add: create {tuz^ | toz->{tuz/1,1}}
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {tix | libbar->{tix/1,1}}
+ trace: collect_build_prerequisites: pre-reeval tex/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tex/1.0.0: 1,1
+ trace: collect_build_prerequisites: pre-reeval tiz/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tiz/1.0.0: 1,1
+ trace: collect_build: add tex/1.0.0
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {tix | libbar->{tix/1,1}}
+ trace: collect_build_prerequisites: reeval tex/1.0.0
+ trace: postponed_configurations::add: add {tex^ 1,1: libbar} to {tix | libbar->{tix/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent tex/1.0.0 results in {tex^ tix | libbar->{tex/1,1 tix/1,1}}
+ trace: collect_build_prerequisites: re-evaluated tex/1.0.0
+ trace: collect_build: add tiz/1.0.0
+ trace: collect_build_prerequisites: reeval tiz/1.0.0
+ trace: collect_build_prerequisites: cannot re-evaluate existing dependent tiz/1.0.0 due to dependency tex/1.0.0 (collected prematurely), throwing postpone_dependency
+ trace: pkg_build: collection failed due to prematurely collected dependency (tex), retry from scratch
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add tix/1.0.0
+ trace: collect_build_prerequisites: begin tix/1.0.0
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tix/1.0.0
+ trace: postponed_configurations::add: create {tix | libbar->{tix/1,1}}
+ trace: collect_build_prerequisites: postpone tix/1.0.0
+ trace: collect_drop: overwrite tez
+ trace: collect_drop: add libbox
+ trace: collect_build_prerequisites: skip being dropped existing dependent tez of dependency toz
+ trace: collect_build_prerequisites: pre-reeval tuz/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tuz/1.0.0: 1,1
+ trace: collect_build_prerequisites: cfg-postpone dependency toz/1.0.0 of existing dependent tuz/1.0.0 due to dependency toz/1.0.0
+ trace: collect_build: add tuz/1.0.0
+ trace: postponed_configurations::add: create {tuz^ | toz->{tuz/1,1}}
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {tix | libbar->{tix/1,1}}
+ trace: collect_build_prerequisites: pre-reeval tex/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tex/1.0.0: 1,1
+ trace: collect_build_prerequisites: pre-reeval tiz/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tiz/1.0.0: 1,1
+ trace: collect_build_postponed (1): skip dep-postponed existing dependent tex of dependency libbar
+ trace: collect_build: add tex/1.0.0
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {tix | libbar->{tix/1,1}}
+ trace: collect_build: add tiz/1.0.0
+ trace: collect_build_prerequisites: reeval tiz/1.0.0
+ trace: postponed_configurations::add: create {tiz^ | tex->{tiz/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent tiz/1.0.0 results in {tiz^ | tex->{tiz/1,1}}
+ trace: collect_build_prerequisites: re-evaluated tiz/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {tix | libbar->{tix/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbar/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tix/1.0.0
+ trace: collect_build_prerequisites: resume tix/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency tex/1.0.0 of dependent tix/1.0.0
+ trace: postponed_configurations::add: add {tix 2,1: tex} to {tiz^ | tex->{tiz/1,1}}
+ trace: collect_build_prerequisites: postpone tix/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {tix | libbar->{tix/1,1}}!
+ trace: collect_build_postponed (2): begin {tuz^ | toz->{tuz/1,1}}
+ trace: collect_build_postponed (2): skip being dropped existing dependent tez of dependency toz
+ trace: collect_build_prerequisites: pre-reeval tuz/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tuz/1.0.0: 1,1
+ trace: collect_build_postponed (2): re-evaluate existing dependents for {tuz^ | toz->{tuz/1,1}}
+ trace: collect_build_prerequisites: reeval tuz/1.0.0
+ trace: collect_build: pick toz/1.0.0 over toz/0.1.0
+ trace: postponed_configurations::add: add {tuz^ 1,1: toz} to {tuz^ | toz->{tuz/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent tuz/1.0.0 results in {tuz^ | toz->{tuz/1,1}}
+ trace: collect_build_prerequisites: re-evaluated tuz/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate begin {tuz^ | toz->{tuz/1,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin toz/1.0.0
+ trace: collect_build: add libbaz/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbaz/1.0.0 of dependent toz/1.0.0
+ trace: postponed_configurations::add: create {toz | libbaz->{toz/1,1}}
+ trace: collect_build_prerequisites: postpone toz/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent tuz/1.0.0
+ trace: collect_build_prerequisites: resume tuz/1.0.0
+ trace: collect_build_prerequisites: end tuz/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {tuz^ | toz->{tuz/1,1}}!
+ trace: collect_build_postponed (3): begin {tix tiz^ | tex->{tix/2,1 tiz/1,1}}
+ trace: collect_build_postponed (3): skip being built existing dependent tiz of dependency tex
+ trace: collect_build_postponed (3): cfg-negotiate begin {tix tiz^ | tex->{tix/2,1 tiz/1,1}}
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin tex/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tex/1.0.0
+ trace: postponed_configurations::add: add {tex 1,1: libbar} to {tix | libbar->{tix/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent tex/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent tex/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent tex/1.0.0
+ trace: postponed_configurations::add: create {tex | libfoo->{tex/2,1}}
+ trace: collect_build_prerequisites: postpone tex/1.0.0
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent tix/1.0.0
+ trace: collect_build_prerequisites: resume tix/1.0.0
+ trace: collect_build_prerequisites: end tix/1.0.0
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent tiz/1.0.0
+ trace: collect_build_prerequisites: resume tiz/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tiz/1.0.0
+ trace: postponed_configurations::add: add {tiz 2,1: libbar} to {tex tix | libbar->{tex/1,1 tix/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent tiz/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent tiz/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end tiz/1.0.0
+ trace: collect_build_postponed (3): cfg-negotiate end {tix tiz^ | tex->{tix/2,1 tiz/1,1}}!
+ trace: collect_build_postponed (4): begin {toz | libbaz->{toz/1,1}}
+ trace: collect_build_postponed (4): cfg-negotiate begin {toz | libbaz->{toz/1,1}}
+ trace: collect_build_postponed (4): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libbaz/1.0.0
+ trace: collect_build_prerequisites: end libbaz/1.0.0
+ trace: collect_build_postponed (4): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (4): select cfg-negotiated dependency alternative for dependent toz/1.0.0
+ trace: collect_build_prerequisites: resume toz/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent toz/1.0.0
+ trace: postponed_configurations::add: add {toz 2,1: libfoo} to {tex | libfoo->{tex/2,1}}
+ trace: collect_build_prerequisites: postpone toz/1.0.0
+ trace: collect_build_postponed (4): cfg-negotiate end {toz | libbaz->{toz/1,1}}!
+ trace: collect_build_postponed (5): begin {tex toz | libfoo->{tex/2,1 toz/2,1}}
+ trace: collect_build_postponed (5): skip being built existing dependent tex of dependency libfoo
+ trace: collect_build_postponed (5): cfg-negotiate begin {tex toz | libfoo->{tex/2,1 toz/2,1}}
+ trace: collect_build_postponed (5): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libfoo/1.0.0
+ trace: collect_build_postponed (5): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (5): select cfg-negotiated dependency alternative for dependent tex/1.0.0
+ trace: collect_build_prerequisites: resume tex/1.0.0
+ trace: collect_build_prerequisites: end tex/1.0.0
+ trace: collect_build_postponed (5): select cfg-negotiated dependency alternative for dependent toz/1.0.0
+ trace: collect_build_prerequisites: resume toz/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent toz/1.0.0
+ trace: postponed_configurations::add: add {toz 3,1: libbar} to {tex tix tiz | libbar->{tex/1,1 tix/1,1 tiz/2,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent toz/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent toz/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end toz/1.0.0
+ trace: collect_build_postponed (5): cfg-negotiate end {tex toz | libfoo->{tex/2,1 toz/2,1}}!
+ trace: collect_build_postponed (5): end {tex toz | libfoo->{tex/2,1 toz/2,1}}
+ trace: collect_build_postponed (4): end {toz | libbaz->{toz/1,1}}
+ trace: collect_build_postponed (3): end {tix tiz^ | tex->{tix/2,1 tiz/1,1}}
+ trace: collect_build_postponed (2): end {tuz^ | toz->{tuz/1,1}}
+ trace: collect_build_postponed (1): end {tix | libbar->{tix/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ build plan:
+ drop libbox/1.0.0 (unused)
+ new libbaz/1.0.0 (required by toz)
+ config.libbaz.extras=true (set by toz)
+ new tix/1.0.0
+ upgrade toz/1.0.0
+ config.toz.extras=true (set by tuz)
+ drop tez/0.1.0 (unused)
+ reconfigure tuz/1.0.0 (dependent of toz)
+ trace: execute_plan: simulate: no
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !tiz configured 1.0.0
+ libbar configured 1.0.0
+ tex configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured 1.0.0
+ !tuz configured 1.0.0
+ toz configured 1.0.0
+ libbar configured 1.0.0
+ libbaz configured 1.0.0
+ libfoo configured 1.0.0
+ !tix configured 1.0.0
+ libbar configured 1.0.0
+ tex configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured 1.0.0
+ EOO
+
+ $pkg_drop tiz tuz tix
+ }
+
+ : reeval-cycle-resolution
+ :
+ {
+ $clone_cfg;
+
+ # Dependencies:
+ #
+ # bax: depends: libfoo(c)
+ # depends: {libbox libbar} (c)
+ #
+ # tex: depends: libbar(c)
+ # depends: libfoo(c)
+ #
+ # box/0.1.0: depends: libbox(c)
+ #
+ $* bax tex --verbose 1 2>!;
+
+ $pkg_status -r >>EOO;
+ !bax configured 1.0.0
+ libbar configured 1.0.0
+ libbox configured 1.0.0
+ libfoo configured 1.0.0
+ !tex configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured 1.0.0
+ EOO
+
+ $* box/0.2.0 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ %.*
+ trace: collect_build: add box/0.2.0
+ trace: collect_build_prerequisites: begin box/0.2.0
+ %.*
+ trace: collect_build: add libbox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent box/0.2.0
+ trace: postponed_configurations::add: create {box | libbox->{box/1,1}}
+ trace: collect_build_prerequisites: postpone box/0.2.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {box | libbox->{box/1,1}}
+ %.*
+ trace: collect_build_prerequisites: pre-reeval bax/1.0.0
+ %.*
+ trace: collect_build_prerequisites: pre-reevaluated bax/1.0.0: 1,1
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {box | libbox->{box/1,1}}
+ trace: collect_build: add bax/1.0.0
+ trace: collect_build_prerequisites: reeval bax/1.0.0
+ %.*
+ trace: postponed_configurations::add: create {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent bax/1.0.0 results in {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluated bax/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {box | libbox->{box/1,1}}
+ %.*
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbox/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent box/0.2.0
+ trace: collect_build_prerequisites: resume box/0.2.0
+ trace: collect_build_prerequisites: end box/0.2.0
+ trace: collect_build_postponed (1): cfg-negotiate end {box | libbox->{box/1,1}}!
+ trace: collect_build_postponed (2): begin {bax^ | libfoo->{bax/1,1}}
+ %.*
+ trace: collect_build_postponed (2): skip being built existing dependent bax of dependency libfoo
+ %.*
+ trace: collect_build_prerequisites: pre-reeval tex/1.0.0
+ %.*
+ trace: collect_build_prerequisites: pre-reevaluated tex/1.0.0: 1,1
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_postponed (2): re-evaluate existing dependents for {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build: add tex/1.0.0
+ trace: collect_build_prerequisites: reeval tex/1.0.0
+ %.*
+ trace: postponed_configurations::add: create {tex^ | libbar->{tex/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent tex/1.0.0 results in {tex^ | libbar->{tex/1,1}}
+ trace: collect_build_prerequisites: re-evaluated tex/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate begin {bax^ | libfoo->{bax/1,1}}
+ %.*
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libfoo/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent bax/1.0.0
+ trace: collect_build_prerequisites: resume bax/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bax/1.0.0
+ trace: postponed_configurations::add: add {bax 2,1: libbox libbar} to {box | libbox->{box/1,1}}!
+ trace: postponed_configurations::add: merge {tex^ | libbar->{tex/1,1}} into {bax box | libbox->{bax/2,1 box/1,1} libbar->{bax/2,1}}!
+ trace: collect_build_prerequisites: cfg-postponing dependent bax/1.0.0 merges non-negotiated and/or being negotiated configurations in and results in {bax box tex^ | libbox->{bax/2,1 box/1,1} libbar->{bax/2,1 tex/1,1}}!, throwing merge_configuration
+ trace: collect_build_postponed (0): cfg-negotiation of {box | libbox->{box/1,1}} failed due to non-negotiated clusters, force-merging based on shadow cluster {bax box tex^ | libbox->{bax/2,1 box/1,1} libbar->{bax/2,1 tex/1,1}}!
+ trace: collect_build_postponed (1): begin {box | libbox->{box/1,1}}
+ %.*
+ trace: collect_build_prerequisites: pre-reeval bax/1.0.0
+ %.*
+ trace: collect_build_prerequisites: pre-reevaluated bax/1.0.0: 1,1
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {box | libbox->{box/1,1}}
+ trace: collect_build: add bax/1.0.0
+ trace: collect_build_prerequisites: reeval bax/1.0.0
+ %.*
+ trace: postponed_configurations::add: create {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent bax/1.0.0 results in {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluated bax/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {box | libbox->{box/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbox/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent box/0.2.0
+ trace: collect_build_prerequisites: resume box/0.2.0
+ trace: collect_build_prerequisites: end box/0.2.0
+ trace: collect_build_postponed (1): cfg-negotiate end {box | libbox->{box/1,1}}!
+ trace: collect_build_postponed (2): begin {bax^ | libfoo->{bax/1,1}}
+ %.*
+ trace: collect_build_postponed (2): skip being built existing dependent bax of dependency libfoo
+ trace: collect_build_prerequisites: pre-reeval tex/1.0.0
+ %.*
+ trace: collect_build_prerequisites: pre-reevaluated tex/1.0.0: 1,1
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_postponed (2): re-evaluate existing dependents for {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build: add tex/1.0.0
+ trace: collect_build_prerequisites: reeval tex/1.0.0
+ %.*
+ trace: postponed_configurations::add: add {tex^ 1,1: libbar} to {box | libbox->{box/1,1}}! (shadow cluster-based)
+ trace: collect_build_prerequisites: re-evaluating dependent tex/1.0.0 involves negotiated configurations and results in {box tex^ | libbox->{box/1,1} libbar->{tex/1,1}}!, throwing merge_configuration
+ trace: collect_build_postponed (0): cfg-negotiation of {box | libbox->{box/1,1}} failed due to non-negotiated clusters, force-merging based on shadow cluster {box tex^ | libbox->{box/1,1} libbar->{tex/1,1}}!
+ trace: collect_build_postponed (1): begin {box | libbox->{box/1,1}}
+ %.*
+ trace: collect_build_prerequisites: pre-reeval bax/1.0.0
+ %.*
+ trace: collect_build_prerequisites: pre-reevaluated bax/1.0.0: 1,1
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {box | libbox->{box/1,1}}
+ trace: collect_build: add bax/1.0.0
+ trace: collect_build_prerequisites: reeval bax/1.0.0
+ %.*
+ trace: postponed_configurations::add: create {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent bax/1.0.0 results in {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluated bax/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {box | libbox->{box/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbox/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent box/0.2.0
+ trace: collect_build_prerequisites: resume box/0.2.0
+ trace: collect_build_prerequisites: end box/0.2.0
+ trace: collect_build_postponed (1): cfg-negotiate end {box | libbox->{box/1,1}}!
+ trace: collect_build_postponed (2): begin {bax^ | libfoo->{bax/1,1}}
+ %.*
+ trace: collect_build_postponed (2): skip being built existing dependent bax of dependency libfoo
+ trace: collect_build_prerequisites: pre-reeval tex/1.0.0
+ %.*
+ trace: collect_build_prerequisites: pre-reevaluated tex/1.0.0: 1,1
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_postponed (2): re-evaluate existing dependents for {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build: add tex/1.0.0
+ trace: collect_build_prerequisites: reeval tex/1.0.0
+ %.*
+ trace: postponed_configurations::add: add {tex^ 1,1: libbar} to {box | libbox->{box/1,1}}! (shadow cluster-based)
+ trace: collect_build_prerequisites: merge configuration cycle detected for being re-evaluated dependent tex/1.0.0 since {box tex^ | libbox->{box/1,1} libbar->{tex/1,1}}! is a shadow of itself, throwing merge_configuration_cycle
+ trace: collect_build_postponed (2): re-evaluation of existing dependent tex/1.0.0 failed due to merge configuration cycle for {bax^ | libfoo->{bax/1,1}}, throwing recollect_existing_dependents
+ trace: collect_build_postponed (0): cfg-negotiation of {box | libbox->{box/1,1}} failed due to some existing dependents related problem, scheduling their re-collection
+ trace: collect_build_postponed (0): schedule re-collection of existing dependent tex/1.0.0
+ trace: collect_build: add tex/1.0.0
+ trace: collect_build_postponed (1): begin {box | libbox->{box/1,1}}
+ %.*
+ trace: collect_build_prerequisites: pre-reeval bax/1.0.0
+ %.*
+ trace: collect_build_prerequisites: pre-reevaluated bax/1.0.0: 1,1
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {box | libbox->{box/1,1}}
+ trace: collect_build: add bax/1.0.0
+ trace: collect_build_prerequisites: reeval bax/1.0.0
+ %.*
+ trace: postponed_configurations::add: create {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent bax/1.0.0 results in {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluated bax/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {box | libbox->{box/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbox/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent box/0.2.0
+ trace: collect_build_prerequisites: resume box/0.2.0
+ trace: collect_build_prerequisites: end box/0.2.0
+ trace: collect_build_postponed (1): cfg-negotiate end {box | libbox->{box/1,1}}!
+ trace: collect_build_prerequisites: begin tex/1.0.0
+ %.*
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tex/1.0.0
+ trace: postponed_configurations::add: create {tex | libbar->{tex/1,1}}
+ trace: collect_build_prerequisites: postpone tex/1.0.0
+ trace: collect_build_postponed (2): begin {bax^ | libfoo->{bax/1,1}}
+ %.*
+ trace: collect_build_postponed (2): skip being built existing dependent bax of dependency libfoo
+ trace: collect_build_postponed (2): skip being built existing dependent tex of dependency libfoo
+ trace: collect_build_postponed (2): cfg-negotiate begin {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libfoo/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent bax/1.0.0
+ trace: collect_build_prerequisites: resume bax/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bax/1.0.0
+ trace: postponed_configurations::add: add {bax 2,1: libbox libbar} to {box | libbox->{box/1,1}}!
+ trace: postponed_configurations::add: merge {tex | libbar->{tex/1,1}} into {bax box | libbox->{bax/2,1 box/1,1} libbar->{bax/2,1}}!
+ trace: collect_build_prerequisites: cfg-postponing dependent bax/1.0.0 merges non-negotiated and/or being negotiated configurations in and results in {bax box tex | libbox->{bax/2,1 box/1,1} libbar->{bax/2,1 tex/1,1}}!, throwing merge_configuration
+ trace: collect_build_postponed (0): cfg-negotiation of {box | libbox->{box/1,1}} failed due to non-negotiated clusters, force-merging based on shadow cluster {bax box tex | libbox->{bax/2,1 box/1,1} libbar->{bax/2,1 tex/1,1}}!
+ trace: collect_build_postponed (1): begin {box | libbox->{box/1,1}}
+ %.*
+ trace: collect_build_prerequisites: pre-reeval bax/1.0.0
+ %.*
+ trace: collect_build_prerequisites: pre-reevaluated bax/1.0.0: 1,1
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {box | libbox->{box/1,1}}
+ trace: collect_build: add bax/1.0.0
+ trace: collect_build_prerequisites: reeval bax/1.0.0
+ %.*
+ trace: postponed_configurations::add: create {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent bax/1.0.0 results in {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluated bax/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {box | libbox->{box/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbox/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent box/0.2.0
+ trace: collect_build_prerequisites: resume box/0.2.0
+ trace: collect_build_prerequisites: end box/0.2.0
+ trace: collect_build_postponed (1): cfg-negotiate end {box | libbox->{box/1,1}}!
+ trace: collect_build_prerequisites: begin tex/1.0.0
+ %.*
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tex/1.0.0
+ trace: postponed_configurations::add: add {tex 1,1: libbar} to {box | libbox->{box/1,1}}! (shadow cluster-based)
+ %.*
+ trace: collect_build_prerequisites: skip being built existing dependent bax of dependency libbar
+ trace: collect_build_prerequisites: skip being built existing dependent tex of dependency libbar
+ %.*
+ trace: collect_build_prerequisites: cfg-postponing dependent tex/1.0.0 involves (being) negotiated configurations and results in {box tex | libbox->{box/1,1} libbar->{tex/1,1}}!, throwing retry_configuration
+ trace: collect_build_postponed (0): cfg-negotiation of {box | libbox->{box/1,1}} failed due to dependent tex, refining configuration
+ trace: collect_build_postponed (1): begin {box | libbox->{box/1,1}}
+ %.*
+ trace: collect_build_prerequisites: pre-reeval bax/1.0.0
+ %.*
+ trace: collect_build_prerequisites: pre-reevaluated bax/1.0.0: 1,1
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {box | libbox->{box/1,1}}
+ trace: collect_build: add bax/1.0.0
+ trace: collect_build_prerequisites: reeval bax/1.0.0
+ %.*
+ trace: postponed_configurations::add: create {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent bax/1.0.0 results in {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluated bax/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {box | libbox->{box/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbox/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent box/0.2.0
+ trace: collect_build_prerequisites: resume box/0.2.0
+ trace: collect_build_prerequisites: end box/0.2.0
+ trace: collect_build_postponed (1): cfg-negotiate end {box | libbox->{box/1,1}}!
+ trace: collect_build_prerequisites: begin tex/1.0.0
+ %.*
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tex/1.0.0
+ trace: postponed_configurations::add: add {tex 1,1: libbar} to {box | libbox->{box/1,1}}! (shadow cluster-based)
+ %.*
+ trace: collect_build_prerequisites: skip being built existing dependent bax of dependency libbar
+ trace: collect_build_prerequisites: skip being built existing dependent tex of dependency libbar
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent tex/1.0.0 is shadow-negotiated
+ trace: collect_build_prerequisites: collecting cfg-postponed dependency libbar/1.0.0 of dependent tex/1.0.0
+ trace: collect_build_prerequisites: skip configured libbar/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent tex/1.0.0
+ trace: postponed_configurations::add: add {tex 2,1: libfoo} to {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: postpone tex/1.0.0
+ trace: collect_build_postponed (2): begin {bax^ tex | libfoo->{bax/1,1 tex/2,1}}
+ %.*
+ trace: collect_build_postponed (2): skip being built existing dependent bax of dependency libfoo
+ trace: collect_build_postponed (2): skip being built existing dependent tex of dependency libfoo
+ trace: collect_build_postponed (2): cfg-negotiate begin {bax^ tex | libfoo->{bax/1,1 tex/2,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libfoo/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent bax/1.0.0
+ trace: collect_build_prerequisites: resume bax/1.0.0
+ %.*
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bax/1.0.0
+ trace: postponed_configurations::add: add {bax 2,1: libbox libbar} to {box tex | libbox->{box/1,1} libbar->{tex/1,1}}! (shadow cluster-based)
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent bax/1.0.0 is shadow-negotiated
+ trace: collect_build_prerequisites: dependency libbox/1.0.0 of dependent bax/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent bax/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end bax/1.0.0
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent tex/1.0.0
+ trace: collect_build_prerequisites: resume tex/1.0.0
+ trace: collect_build_prerequisites: end tex/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {bax^ tex | libfoo->{bax/1,1 tex/2,1}}!
+ trace: collect_build_postponed (2): end {bax^ tex | libfoo->{bax/1,1 tex/2,1}}
+ trace: collect_build_postponed (1): end {box | libbox->{box/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ build plan:
+ new box/0.2.0
+ trace: execute_plan: simulate: no
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !bax configured 1.0.0
+ libbar configured 1.0.0
+ libbox configured 1.0.0
+ libfoo configured 1.0.0
+ !tex configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured 1.0.0
+ !box configured !0.2.0 available 1.0.0
+ libbox configured 1.0.0
+ EOO
+
+ $pkg_drop bax tex box
+ }
+
+ : up-negotiate-not-collected-existing-dependents
+ :
+ {
+ $clone_cfg;
+
+ # Dependencies:
+ #
+ # bax: depends: libfoo(c)
+ # depends: {libbox libbar} (c)
+ #
+ # tiz: depends: tex(c)
+ # depends: libbar(c)
+ #
+ # tex: depends: libbar(c)
+ # depends: libfoo(c)
+ #
+ # box/0.1.0: depends: libbox(c)
+ #
+ $* bax tiz --verbose 1 2>!;
+
+ $pkg_status -r >>EOO;
+ !bax configured 1.0.0
+ libbar configured 1.0.0
+ libbox configured 1.0.0
+ libfoo configured 1.0.0
+ !tiz configured 1.0.0
+ libbar configured 1.0.0
+ tex configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured 1.0.0
+ EOO
+
+ $* box/0.2.0 2>&1 | $filter 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add box/0.2.0
+ trace: collect_build_prerequisites: begin box/0.2.0
+ trace: collect_build: add libbox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent box/0.2.0
+ trace: postponed_configurations::add: create {box | libbox->{box/1,1}}
+ trace: collect_build_prerequisites: postpone box/0.2.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {box | libbox->{box/1,1}}
+ trace: collect_build_prerequisites: pre-reeval bax/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bax/1.0.0: 1,1
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {box | libbox->{box/1,1}}
+ trace: collect_build: add bax/1.0.0
+ trace: collect_build_prerequisites: reeval bax/1.0.0
+ trace: postponed_configurations::add: create {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent bax/1.0.0 results in {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluated bax/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {box | libbox->{box/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbox/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent box/0.2.0
+ trace: collect_build_prerequisites: resume box/0.2.0
+ trace: collect_build_prerequisites: end box/0.2.0
+ trace: collect_build_postponed (1): cfg-negotiate end {box | libbox->{box/1,1}}!
+ trace: collect_build_postponed (2): begin {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: pre-reeval tex/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tex/1.0.0: 1,1
+ trace: collect_build_postponed (2): skip being built existing dependent bax of dependency libfoo
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_postponed (2): re-evaluate existing dependents for {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build: add tex/1.0.0
+ trace: collect_build_prerequisites: reeval tex/1.0.0
+ trace: postponed_configurations::add: create {tex^ | libbar->{tex/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent tex/1.0.0 results in {tex^ | libbar->{tex/1,1}}
+ trace: collect_build_prerequisites: re-evaluated tex/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate begin {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libfoo/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent bax/1.0.0
+ trace: collect_build_prerequisites: resume bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bax/1.0.0
+ trace: postponed_configurations::add: add {bax 2,1: libbox libbar} to {box | libbox->{box/1,1}}!
+ trace: postponed_configurations::add: merge {tex^ | libbar->{tex/1,1}} into {bax box | libbox->{bax/2,1 box/1,1} libbar->{bax/2,1}}!
+ trace: collect_build_prerequisites: cfg-postponing dependent bax/1.0.0 merges non-negotiated and/or being negotiated configurations in and results in {bax box tex^ | libbox->{bax/2,1 box/1,1} libbar->{bax/2,1 tex/1,1}}!, throwing merge_configuration
+ trace: collect_build_postponed (0): cfg-negotiation of {box | libbox->{box/1,1}} failed due to non-negotiated clusters, force-merging based on shadow cluster {bax box tex^ | libbox->{bax/2,1 box/1,1} libbar->{bax/2,1 tex/1,1}}!
+ trace: collect_build_postponed (1): begin {box | libbox->{box/1,1}}
+ trace: collect_build_prerequisites: pre-reeval bax/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bax/1.0.0: 1,1
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {box | libbox->{box/1,1}}
+ trace: collect_build: add bax/1.0.0
+ trace: collect_build_prerequisites: reeval bax/1.0.0
+ trace: postponed_configurations::add: create {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent bax/1.0.0 results in {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluated bax/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {box | libbox->{box/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbox/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent box/0.2.0
+ trace: collect_build_prerequisites: resume box/0.2.0
+ trace: collect_build_prerequisites: end box/0.2.0
+ trace: collect_build_postponed (1): cfg-negotiate end {box | libbox->{box/1,1}}!
+ trace: collect_build_postponed (2): begin {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: pre-reeval tex/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tex/1.0.0: 1,1
+ trace: collect_build_postponed (2): skip being built existing dependent bax of dependency libfoo
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_postponed (2): re-evaluate existing dependents for {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build: add tex/1.0.0
+ trace: collect_build_prerequisites: reeval tex/1.0.0
+ trace: postponed_configurations::add: add {tex^ 1,1: libbar} to {box | libbox->{box/1,1}}! (shadow cluster-based)
+ trace: collect_build_prerequisites: re-evaluating dependent tex/1.0.0 involves negotiated configurations and results in {box tex^ | libbox->{box/1,1} libbar->{tex/1,1}}!, throwing merge_configuration
+ trace: collect_build_postponed (0): cfg-negotiation of {box | libbox->{box/1,1}} failed due to non-negotiated clusters, force-merging based on shadow cluster {box tex^ | libbox->{box/1,1} libbar->{tex/1,1}}!
+ trace: collect_build_postponed (1): begin {box | libbox->{box/1,1}}
+ trace: collect_build_prerequisites: pre-reeval bax/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bax/1.0.0: 1,1
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {box | libbox->{box/1,1}}
+ trace: collect_build: add bax/1.0.0
+ trace: collect_build_prerequisites: reeval bax/1.0.0
+ trace: postponed_configurations::add: create {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent bax/1.0.0 results in {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluated bax/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {box | libbox->{box/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbox/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent box/0.2.0
+ trace: collect_build_prerequisites: resume box/0.2.0
+ trace: collect_build_prerequisites: end box/0.2.0
+ trace: collect_build_postponed (1): cfg-negotiate end {box | libbox->{box/1,1}}!
+ trace: collect_build_postponed (2): begin {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: pre-reeval tex/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tex/1.0.0: 1,1
+ trace: collect_build_postponed (2): skip being built existing dependent bax of dependency libfoo
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_postponed (2): re-evaluate existing dependents for {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build: add tex/1.0.0
+ trace: collect_build_prerequisites: reeval tex/1.0.0
+ trace: postponed_configurations::add: add {tex^ 1,1: libbar} to {box | libbox->{box/1,1}}! (shadow cluster-based)
+ trace: collect_build_prerequisites: merge configuration cycle detected for being re-evaluated dependent tex/1.0.0 since {box tex^ | libbox->{box/1,1} libbar->{tex/1,1}}! is a shadow of itself, throwing merge_configuration_cycle
+ trace: collect_build_postponed (2): re-evaluation of existing dependent tex/1.0.0 failed due to merge configuration cycle for {bax^ | libfoo->{bax/1,1}}, throwing recollect_existing_dependents
+ trace: collect_build_postponed (0): cfg-negotiation of {box | libbox->{box/1,1}} failed due to some existing dependents related problem, scheduling their re-collection
+ trace: collect_build_postponed (0): schedule re-collection of existing dependent tex/1.0.0
+ trace: collect_build: add tex/1.0.0
+ trace: collect_build_postponed (1): begin {box | libbox->{box/1,1}}
+ trace: collect_build_prerequisites: pre-reeval bax/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bax/1.0.0: 1,1
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {box | libbox->{box/1,1}}
+ trace: collect_build: add bax/1.0.0
+ trace: collect_build_prerequisites: reeval bax/1.0.0
+ trace: postponed_configurations::add: create {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent bax/1.0.0 results in {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluated bax/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {box | libbox->{box/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbox/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent box/0.2.0
+ trace: collect_build_prerequisites: resume box/0.2.0
+ trace: collect_build_prerequisites: end box/0.2.0
+ trace: collect_build_postponed (1): cfg-negotiate end {box | libbox->{box/1,1}}!
+ trace: collect_build_prerequisites: begin tex/1.0.0
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tex/1.0.0
+ trace: postponed_configurations::add: create {tex | libbar->{tex/1,1}}
+ trace: collect_build_prerequisites: postpone tex/1.0.0
+ trace: collect_build_postponed (2): begin {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_postponed (2): skip being built existing dependent tex of dependency libfoo
+ trace: collect_build_postponed (2): skip being built existing dependent bax of dependency libfoo
+ trace: collect_build_postponed (2): cfg-negotiate begin {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libfoo/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent bax/1.0.0
+ trace: collect_build_prerequisites: resume bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bax/1.0.0
+ trace: postponed_configurations::add: add {bax 2,1: libbox libbar} to {box | libbox->{box/1,1}}!
+ trace: postponed_configurations::add: merge {tex | libbar->{tex/1,1}} into {bax box | libbox->{bax/2,1 box/1,1} libbar->{bax/2,1}}!
+ trace: collect_build_prerequisites: cfg-postponing dependent bax/1.0.0 merges non-negotiated and/or being negotiated configurations in and results in {bax box tex | libbox->{bax/2,1 box/1,1} libbar->{bax/2,1 tex/1,1}}!, throwing merge_configuration
+ trace: collect_build_postponed (0): cfg-negotiation of {box | libbox->{box/1,1}} failed due to non-negotiated clusters, force-merging based on shadow cluster {bax box tex | libbox->{bax/2,1 box/1,1} libbar->{bax/2,1 tex/1,1}}!
+ trace: collect_build_postponed (1): begin {box | libbox->{box/1,1}}
+ trace: collect_build_prerequisites: pre-reeval bax/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bax/1.0.0: 1,1
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {box | libbox->{box/1,1}}
+ trace: collect_build: add bax/1.0.0
+ trace: collect_build_prerequisites: reeval bax/1.0.0
+ trace: postponed_configurations::add: create {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent bax/1.0.0 results in {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluated bax/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {box | libbox->{box/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbox/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent box/0.2.0
+ trace: collect_build_prerequisites: resume box/0.2.0
+ trace: collect_build_prerequisites: end box/0.2.0
+ trace: collect_build_postponed (1): cfg-negotiate end {box | libbox->{box/1,1}}!
+ trace: collect_build_prerequisites: begin tex/1.0.0
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tex/1.0.0
+ trace: postponed_configurations::add: add {tex 1,1: libbar} to {box | libbox->{box/1,1}}! (shadow cluster-based)
+ trace: collect_build_prerequisites: skip being built existing dependent tex of dependency libbar
+ trace: collect_build_prerequisites: skip being built existing dependent bax of dependency libbar
+ trace: collect_build_prerequisites: pre-reeval tiz/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tiz/1.0.0: 1,1
+ trace: collect_build_prerequisites: cfg-postponing dependent tex/1.0.0 adds not (being) collected dependencies libbar/1.0.0 with not (being) collected existing dependents to (being) negotiated cluster and results in {box tex | libbox->{box/1,1} libbar->{tex/1,1}}!, throwing recollect_existing_dependents
+ trace: collect_build_postponed (0): cfg-negotiation of {box | libbox->{box/1,1}} failed due to some existing dependents related problem, scheduling their re-collection
+ trace: collect_build_postponed (0): schedule re-collection of existing dependent tiz/1.0.0
+ trace: collect_build: add tiz/1.0.0
+ trace: collect_build_postponed (1): begin {box | libbox->{box/1,1}}
+ trace: collect_build_prerequisites: pre-reeval bax/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bax/1.0.0: 1,1
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {box | libbox->{box/1,1}}
+ trace: collect_build: add bax/1.0.0
+ trace: collect_build_prerequisites: reeval bax/1.0.0
+ trace: postponed_configurations::add: create {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent bax/1.0.0 results in {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluated bax/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {box | libbox->{box/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbox/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent box/0.2.0
+ trace: collect_build_prerequisites: resume box/0.2.0
+ trace: collect_build_prerequisites: end box/0.2.0
+ trace: collect_build_postponed (1): cfg-negotiate end {box | libbox->{box/1,1}}!
+ trace: collect_build_prerequisites: begin tex/1.0.0
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tex/1.0.0
+ trace: postponed_configurations::add: create {tex | libbar->{tex/1,1}}
+ trace: collect_build_prerequisites: postpone tex/1.0.0
+ trace: collect_build_prerequisites: begin tiz/1.0.0
+ trace: collect_build_prerequisites: cannot cfg-postpone dependency tex/1.0.0 of dependent tiz/1.0.0 (collected prematurely), throwing postpone_dependency
+ trace: pkg_build: collection failed due to prematurely collected dependency (tex), retry from scratch
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add box/0.2.0
+ trace: collect_build_prerequisites: begin box/0.2.0
+ trace: collect_build: add libbox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent box/0.2.0
+ trace: postponed_configurations::add: create {box | libbox->{box/1,1}}
+ trace: collect_build_prerequisites: postpone box/0.2.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {box | libbox->{box/1,1}}
+ trace: collect_build_prerequisites: pre-reeval bax/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bax/1.0.0: 1,1
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {box | libbox->{box/1,1}}
+ trace: collect_build: add bax/1.0.0
+ trace: collect_build_prerequisites: reeval bax/1.0.0
+ trace: postponed_configurations::add: create {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent bax/1.0.0 results in {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluated bax/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {box | libbox->{box/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbox/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent box/0.2.0
+ trace: collect_build_prerequisites: resume box/0.2.0
+ trace: collect_build_prerequisites: end box/0.2.0
+ trace: collect_build_postponed (1): cfg-negotiate end {box | libbox->{box/1,1}}!
+ trace: collect_build_postponed (2): begin {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: pre-reeval tex/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tex/1.0.0: 1,1
+ trace: collect_build_postponed (2): skip being built existing dependent bax of dependency libfoo
+ trace: collect_build_postponed (2): skip dep-postponed existing dependent tex of dependency libfoo
+ trace: collect_build: add tex/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate begin {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libfoo/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent bax/1.0.0
+ trace: collect_build_prerequisites: resume bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent bax/1.0.0
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bax/1.0.0
+ trace: postponed_configurations::add: add {bax 2,1: libbox libbar} to {box | libbox->{box/1,1}}!
+ trace: collect_build_prerequisites: pre-reeval tex/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tex/1.0.0: 1,1
+ trace: collect_build_prerequisites: skip being built existing dependent bax of dependency libbar
+ trace: collect_build_prerequisites: pre-reeval tiz/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tiz/1.0.0: 1,1
+ trace: collect_build_prerequisites: cfg-postponing dependent bax/1.0.0 adds not (being) collected dependencies libbar/1.0.0 with not (being) collected existing dependents to (being) negotiated cluster and results in {bax box | libbox->{bax/2,1 box/1,1} libbar->{bax/2,1}}!, throwing recollect_existing_dependents
+ trace: collect_build_postponed (0): cfg-negotiation of {box | libbox->{box/1,1}} failed due to some existing dependents related problem, scheduling their re-collection
+ trace: collect_build_postponed (0): schedule re-collection of existing dependent tex/1.0.0
+ trace: collect_build: add tex/1.0.0
+ trace: collect_build_postponed (0): schedule re-collection of existing dependent tiz/1.0.0
+ trace: collect_build: add tiz/1.0.0
+ trace: collect_build_postponed (1): begin {box | libbox->{box/1,1}}
+ trace: collect_build_prerequisites: pre-reeval bax/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bax/1.0.0: 1,1
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {box | libbox->{box/1,1}}
+ trace: collect_build: add bax/1.0.0
+ trace: collect_build_prerequisites: reeval bax/1.0.0
+ trace: postponed_configurations::add: create {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent bax/1.0.0 results in {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluated bax/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {box | libbox->{box/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbox/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent box/0.2.0
+ trace: collect_build_prerequisites: resume box/0.2.0
+ trace: collect_build_prerequisites: end box/0.2.0
+ trace: collect_build_postponed (1): cfg-negotiate end {box | libbox->{box/1,1}}!
+ trace: collect_build_postponed (1): skip re-collection of dep-postponed package tex
+ trace: collect_build_prerequisites: begin tiz/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency tex/1.0.0 of dependent tiz/1.0.0
+ trace: postponed_configurations::add: create {tiz | tex->{tiz/1,1}}
+ trace: collect_build_prerequisites: postpone tiz/1.0.0
+ trace: collect_build_postponed (1): skip re-collection of dep-postponed package tex
+ trace: collect_build_postponed (2): begin {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_postponed (2): skip being built existing dependent tex of dependency libfoo
+ trace: collect_build_postponed (2): skip being built existing dependent bax of dependency libfoo
+ trace: collect_build_postponed (2): cfg-negotiate begin {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libfoo/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent bax/1.0.0
+ trace: collect_build_prerequisites: resume bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent bax/1.0.0
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bax/1.0.0
+ trace: postponed_configurations::add: add {bax 2,1: libbox libbar} to {box | libbox->{box/1,1}}!
+ trace: collect_build_prerequisites: skip being built existing dependent tex of dependency libbar
+ trace: collect_build_prerequisites: skip being built existing dependent bax of dependency libbar
+ trace: collect_build_prerequisites: skip being built existing dependent tiz of dependency libbar
+ trace: collect_build_prerequisites: cfg-postponing dependent bax/1.0.0 involves (being) negotiated configurations and results in {bax box | libbox->{bax/2,1 box/1,1} libbar->{bax/2,1}}!, throwing retry_configuration
+ trace: collect_build_postponed (0): cfg-negotiation of {box | libbox->{box/1,1}} failed due to dependent bax, refining configuration
+ trace: collect_build_postponed (1): begin {box | libbox->{box/1,1}}
+ trace: collect_build_prerequisites: pre-reeval bax/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bax/1.0.0: 1,1
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {box | libbox->{box/1,1}}
+ trace: collect_build: add bax/1.0.0
+ trace: collect_build_prerequisites: reeval bax/1.0.0
+ trace: postponed_configurations::add: create {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent bax/1.0.0 results in {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluated bax/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {box | libbox->{box/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbox/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent box/0.2.0
+ trace: collect_build_prerequisites: resume box/0.2.0
+ trace: collect_build_prerequisites: end box/0.2.0
+ trace: collect_build_postponed (1): cfg-negotiate end {box | libbox->{box/1,1}}!
+ trace: collect_build_postponed (1): skip re-collection of dep-postponed package tex
+ trace: collect_build_prerequisites: begin tiz/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency tex/1.0.0 of dependent tiz/1.0.0
+ trace: postponed_configurations::add: create {tiz | tex->{tiz/1,1}}
+ trace: collect_build_prerequisites: postpone tiz/1.0.0
+ trace: collect_build_postponed (1): skip re-collection of dep-postponed package tex
+ trace: collect_build_postponed (2): begin {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_postponed (2): skip being built existing dependent tex of dependency libfoo
+ trace: collect_build_postponed (2): skip being built existing dependent bax of dependency libfoo
+ trace: collect_build_postponed (2): cfg-negotiate begin {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libfoo/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent bax/1.0.0
+ trace: collect_build_prerequisites: resume bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent bax/1.0.0
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bax/1.0.0
+ trace: postponed_configurations::add: add {bax 2,1: libbox libbar} to {box | libbox->{box/1,1}}!
+ trace: collect_build_prerequisites: skip being built existing dependent tex of dependency libbar
+ trace: collect_build_prerequisites: skip being built existing dependent bax of dependency libbar
+ trace: collect_build_prerequisites: skip being built existing dependent tiz of dependency libbar
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent bax/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbox/1.0.0 of dependent bax/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: collecting cfg-postponed dependency libbar/1.0.0 of dependent bax/1.0.0
+ trace: collect_build_prerequisites: skip configured libbar/1.0.0
+ trace: collect_build_prerequisites: end bax/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {bax^ | libfoo->{bax/1,1}}!
+ trace: collect_build_postponed (2): skip re-collection of dep-postponed package tex
+ trace: collect_build_postponed (3): begin {tiz | tex->{tiz/1,1}}
+ trace: collect_build_postponed (3): skip being built existing dependent tiz of dependency tex
+ trace: collect_build_postponed (3): cfg-negotiate begin {tiz | tex->{tiz/1,1}}
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin tex/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tex/1.0.0
+ trace: postponed_configurations::add: add {tex 1,1: libbar} to {bax box | libbox->{bax/2,1 box/1,1} libbar->{bax/2,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent tex/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent tex/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent tex/1.0.0
+ trace: postponed_configurations::add: add {tex 2,1: libfoo} to {bax^ | libfoo->{bax/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent tex/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libfoo/1.0.0 of dependent tex/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end tex/1.0.0
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent tiz/1.0.0
+ trace: collect_build_prerequisites: resume tiz/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tiz/1.0.0
+ trace: postponed_configurations::add: add {tiz 2,1: libbar} to {bax box tex | libbox->{bax/2,1 box/1,1} libbar->{bax/2,1 tex/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent tiz/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent tiz/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end tiz/1.0.0
+ trace: collect_build_postponed (3): cfg-negotiate end {tiz | tex->{tiz/1,1}}!
+ trace: collect_build_postponed (3): end {tiz | tex->{tiz/1,1}}
+ trace: collect_build_postponed (2): end {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_postponed (1): end {box | libbox->{box/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ build plan:
+ new box/0.2.0
+ trace: execute_plan: simulate: no
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !bax configured 1.0.0
+ libbar configured 1.0.0
+ libbox configured 1.0.0
+ libfoo configured 1.0.0
+ !tiz configured 1.0.0
+ libbar configured 1.0.0
+ tex configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured 1.0.0
+ !box configured !0.2.0 available 1.0.0
+ libbox configured 1.0.0
+ EOO
+
+ $pkg_drop bax tiz box
+ }
+
+ : up-negotiate-not-collected-existing-dependents2
+ :
+ : Similar to the above but more complicated.
+ :
+ {
+ $clone_cfg;
+
+ # Dependencies:
+ #
+ # bax: depends: libfoo(c)
+ # depends: {libbox libbar} (c)
+ #
+ # tiz: depends: tex(c)
+ # depends: libbar(c)
+ #
+ # tex: depends: libbar(c)
+ # depends: libfoo(c)
+ #
+ # tez: depends: libbox(c)
+ # depends: toz == 0.1.0 (c)
+ # depends: libbar(c)
+ #
+ # tvz: depends: toz == 0.2.0 (c)
+ #
+ # toz/0.1.0:
+ #
+ $* bax tiz --verbose 1 2>!;
+
+ $pkg_status -r >>EOO;
+ !bax configured 1.0.0
+ libbar configured 1.0.0
+ libbox configured 1.0.0
+ libfoo configured 1.0.0
+ !tiz configured 1.0.0
+ libbar configured 1.0.0
+ tex configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured 1.0.0
+ EOO
+
+ $* tez 2>&1 | $filter 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add tez/1.0.0
+ trace: collect_build_prerequisites: begin tez/1.0.0
+ trace: collect_build: add libbox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: create {tez | libbox->{tez/1,1}}
+ trace: collect_build_prerequisites: postpone tez/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_prerequisites: pre-reeval bax/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bax/1.0.0: 1,1
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {tez | libbox->{tez/1,1}}
+ trace: collect_build: add bax/1.0.0
+ trace: collect_build_prerequisites: reeval bax/1.0.0
+ trace: postponed_configurations::add: create {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent bax/1.0.0 results in {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluated bax/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbox/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tez/1.0.0
+ trace: collect_build_prerequisites: resume tez/1.0.0
+ trace: collect_build: add toz/0.1.0
+ trace: collect_build_prerequisites: cfg-postpone dependency toz/0.1.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: create {tez | toz->{tez/2,1}}
+ trace: collect_build_prerequisites: postpone tez/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {tez | libbox->{tez/1,1}}!
+ trace: collect_build_postponed (2): begin {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: pre-reeval tex/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tex/1.0.0: 1,1
+ trace: collect_build_postponed (2): skip being built existing dependent bax of dependency libfoo
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_postponed (2): re-evaluate existing dependents for {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build: add tex/1.0.0
+ trace: collect_build_prerequisites: reeval tex/1.0.0
+ trace: postponed_configurations::add: create {tex^ | libbar->{tex/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent tex/1.0.0 results in {tex^ | libbar->{tex/1,1}}
+ trace: collect_build_prerequisites: re-evaluated tex/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate begin {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libfoo/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent bax/1.0.0
+ trace: collect_build_prerequisites: resume bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bax/1.0.0
+ trace: postponed_configurations::add: add {bax 2,1: libbox libbar} to {tez | libbox->{tez/1,1}}!
+ trace: postponed_configurations::add: merge {tex^ | libbar->{tex/1,1}} into {bax tez | libbox->{bax/2,1 tez/1,1} libbar->{bax/2,1}}!
+ trace: collect_build_prerequisites: cfg-postponing dependent bax/1.0.0 merges non-negotiated and/or being negotiated configurations in and results in {bax tex^ tez | libbox->{bax/2,1 tez/1,1} libbar->{bax/2,1 tex/1,1}}!, throwing merge_configuration
+ trace: collect_build_postponed (0): cfg-negotiation of {tez | libbox->{tez/1,1}} failed due to non-negotiated clusters, force-merging based on shadow cluster {bax tex^ tez | libbox->{bax/2,1 tez/1,1} libbar->{bax/2,1 tex/1,1}}!
+ trace: collect_build_postponed (1): begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_prerequisites: pre-reeval bax/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bax/1.0.0: 1,1
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {tez | libbox->{tez/1,1}}
+ trace: collect_build: add bax/1.0.0
+ trace: collect_build_prerequisites: reeval bax/1.0.0
+ trace: postponed_configurations::add: create {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent bax/1.0.0 results in {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluated bax/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbox/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tez/1.0.0
+ trace: collect_build_prerequisites: resume tez/1.0.0
+ trace: collect_build: add toz/0.1.0
+ trace: collect_build_prerequisites: cfg-postpone dependency toz/0.1.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: create {tez | toz->{tez/2,1}}
+ trace: collect_build_prerequisites: postpone tez/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {tez | libbox->{tez/1,1}}!
+ trace: collect_build_postponed (2): begin {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: pre-reeval tex/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tex/1.0.0: 1,1
+ trace: collect_build_postponed (2): skip being built existing dependent bax of dependency libfoo
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_postponed (2): re-evaluate existing dependents for {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build: add tex/1.0.0
+ trace: collect_build_prerequisites: reeval tex/1.0.0
+ trace: postponed_configurations::add: add {tex^ 1,1: libbar} to {tez | libbox->{tez/1,1}}! (shadow cluster-based)
+ trace: collect_build_prerequisites: re-evaluating dependent tex/1.0.0 involves negotiated configurations and results in {tex^ tez | libbox->{tez/1,1} libbar->{tex/1,1}}!, throwing merge_configuration
+ trace: collect_build_postponed (0): cfg-negotiation of {tez | libbox->{tez/1,1}} failed due to non-negotiated clusters, force-merging based on shadow cluster {tex^ tez | libbox->{tez/1,1} libbar->{tex/1,1}}!
+ trace: collect_build_postponed (1): begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_prerequisites: pre-reeval bax/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bax/1.0.0: 1,1
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {tez | libbox->{tez/1,1}}
+ trace: collect_build: add bax/1.0.0
+ trace: collect_build_prerequisites: reeval bax/1.0.0
+ trace: postponed_configurations::add: create {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent bax/1.0.0 results in {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluated bax/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbox/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tez/1.0.0
+ trace: collect_build_prerequisites: resume tez/1.0.0
+ trace: collect_build: add toz/0.1.0
+ trace: collect_build_prerequisites: cfg-postpone dependency toz/0.1.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: create {tez | toz->{tez/2,1}}
+ trace: collect_build_prerequisites: postpone tez/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {tez | libbox->{tez/1,1}}!
+ trace: collect_build_postponed (2): begin {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: pre-reeval tex/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tex/1.0.0: 1,1
+ trace: collect_build_postponed (2): skip being built existing dependent bax of dependency libfoo
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_postponed (2): re-evaluate existing dependents for {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build: add tex/1.0.0
+ trace: collect_build_prerequisites: reeval tex/1.0.0
+ trace: postponed_configurations::add: add {tex^ 1,1: libbar} to {tez | libbox->{tez/1,1}}! (shadow cluster-based)
+ trace: collect_build_prerequisites: merge configuration cycle detected for being re-evaluated dependent tex/1.0.0 since {tex^ tez | libbox->{tez/1,1} libbar->{tex/1,1}}! is a shadow of itself, throwing merge_configuration_cycle
+ trace: collect_build_postponed (2): re-evaluation of existing dependent tex/1.0.0 failed due to merge configuration cycle for {bax^ | libfoo->{bax/1,1}}, throwing recollect_existing_dependents
+ trace: collect_build_postponed (0): cfg-negotiation of {tez | libbox->{tez/1,1}} failed due to some existing dependents related problem, scheduling their re-collection
+ trace: collect_build_postponed (0): schedule re-collection of existing dependent tex/1.0.0
+ trace: collect_build: add tex/1.0.0
+ trace: collect_build_postponed (1): begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_prerequisites: pre-reeval bax/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bax/1.0.0: 1,1
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {tez | libbox->{tez/1,1}}
+ trace: collect_build: add bax/1.0.0
+ trace: collect_build_prerequisites: reeval bax/1.0.0
+ trace: postponed_configurations::add: create {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent bax/1.0.0 results in {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluated bax/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbox/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tez/1.0.0
+ trace: collect_build_prerequisites: resume tez/1.0.0
+ trace: collect_build: add toz/0.1.0
+ trace: collect_build_prerequisites: cfg-postpone dependency toz/0.1.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: create {tez | toz->{tez/2,1}}
+ trace: collect_build_prerequisites: postpone tez/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {tez | libbox->{tez/1,1}}!
+ trace: collect_build_prerequisites: begin tex/1.0.0
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tex/1.0.0
+ trace: postponed_configurations::add: create {tex | libbar->{tex/1,1}}
+ trace: collect_build_prerequisites: postpone tex/1.0.0
+ trace: collect_build_postponed (2): begin {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_postponed (2): skip being built existing dependent tex of dependency libfoo
+ trace: collect_build_postponed (2): skip being built existing dependent bax of dependency libfoo
+ trace: collect_build_postponed (2): cfg-negotiate begin {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libfoo/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent bax/1.0.0
+ trace: collect_build_prerequisites: resume bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bax/1.0.0
+ trace: postponed_configurations::add: add {bax 2,1: libbox libbar} to {tez | libbox->{tez/1,1}}!
+ trace: postponed_configurations::add: merge {tex | libbar->{tex/1,1}} into {bax tez | libbox->{bax/2,1 tez/1,1} libbar->{bax/2,1}}!
+ trace: collect_build_prerequisites: cfg-postponing dependent bax/1.0.0 merges non-negotiated and/or being negotiated configurations in and results in {bax tex tez | libbox->{bax/2,1 tez/1,1} libbar->{bax/2,1 tex/1,1}}!, throwing merge_configuration
+ trace: collect_build_postponed (0): cfg-negotiation of {tez | libbox->{tez/1,1}} failed due to non-negotiated clusters, force-merging based on shadow cluster {bax tex tez | libbox->{bax/2,1 tez/1,1} libbar->{bax/2,1 tex/1,1}}!
+ trace: collect_build_postponed (1): begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_prerequisites: pre-reeval bax/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bax/1.0.0: 1,1
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {tez | libbox->{tez/1,1}}
+ trace: collect_build: add bax/1.0.0
+ trace: collect_build_prerequisites: reeval bax/1.0.0
+ trace: postponed_configurations::add: create {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent bax/1.0.0 results in {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluated bax/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbox/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tez/1.0.0
+ trace: collect_build_prerequisites: resume tez/1.0.0
+ trace: collect_build: add toz/0.1.0
+ trace: collect_build_prerequisites: cfg-postpone dependency toz/0.1.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: create {tez | toz->{tez/2,1}}
+ trace: collect_build_prerequisites: postpone tez/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {tez | libbox->{tez/1,1}}!
+ trace: collect_build_prerequisites: begin tex/1.0.0
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tex/1.0.0
+ trace: postponed_configurations::add: add {tex 1,1: libbar} to {tez | libbox->{tez/1,1}}! (shadow cluster-based)
+ trace: collect_build_prerequisites: skip being built existing dependent tex of dependency libbar
+ trace: collect_build_prerequisites: skip being built existing dependent bax of dependency libbar
+ trace: collect_build_prerequisites: pre-reeval tiz/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tiz/1.0.0: 1,1
+ trace: collect_build_prerequisites: cfg-postponing dependent tex/1.0.0 adds not (being) collected dependencies libbar/1.0.0 with not (being) collected existing dependents to (being) negotiated cluster and results in {tex tez | libbox->{tez/1,1} libbar->{tex/1,1}}!, throwing recollect_existing_dependents
+ trace: collect_build_postponed (0): cfg-negotiation of {tez | libbox->{tez/1,1}} failed due to some existing dependents related problem, scheduling their re-collection
+ trace: collect_build_postponed (0): schedule re-collection of existing dependent tiz/1.0.0
+ trace: collect_build: add tiz/1.0.0
+ trace: collect_build_postponed (1): begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_prerequisites: pre-reeval bax/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bax/1.0.0: 1,1
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {tez | libbox->{tez/1,1}}
+ trace: collect_build: add bax/1.0.0
+ trace: collect_build_prerequisites: reeval bax/1.0.0
+ trace: postponed_configurations::add: create {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent bax/1.0.0 results in {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluated bax/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbox/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tez/1.0.0
+ trace: collect_build_prerequisites: resume tez/1.0.0
+ trace: collect_build: add toz/0.1.0
+ trace: collect_build_prerequisites: cfg-postpone dependency toz/0.1.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: create {tez | toz->{tez/2,1}}
+ trace: collect_build_prerequisites: postpone tez/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {tez | libbox->{tez/1,1}}!
+ trace: collect_build_prerequisites: begin tex/1.0.0
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tex/1.0.0
+ trace: postponed_configurations::add: create {tex | libbar->{tex/1,1}}
+ trace: collect_build_prerequisites: postpone tex/1.0.0
+ trace: collect_build_prerequisites: begin tiz/1.0.0
+ trace: collect_build_prerequisites: cannot cfg-postpone dependency tex/1.0.0 of dependent tiz/1.0.0 (collected prematurely), throwing postpone_dependency
+ trace: pkg_build: collection failed due to prematurely collected dependency (tex), retry from scratch
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add tez/1.0.0
+ trace: collect_build_prerequisites: begin tez/1.0.0
+ trace: collect_build: add libbox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: create {tez | libbox->{tez/1,1}}
+ trace: collect_build_prerequisites: postpone tez/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_prerequisites: pre-reeval bax/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bax/1.0.0: 1,1
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {tez | libbox->{tez/1,1}}
+ trace: collect_build: add bax/1.0.0
+ trace: collect_build_prerequisites: reeval bax/1.0.0
+ trace: postponed_configurations::add: create {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent bax/1.0.0 results in {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluated bax/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbox/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tez/1.0.0
+ trace: collect_build_prerequisites: resume tez/1.0.0
+ trace: collect_build: add toz/0.1.0
+ trace: collect_build_prerequisites: cfg-postpone dependency toz/0.1.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: create {tez | toz->{tez/2,1}}
+ trace: collect_build_prerequisites: postpone tez/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {tez | libbox->{tez/1,1}}!
+ trace: collect_build_postponed (2): begin {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: pre-reeval tex/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tex/1.0.0: 1,1
+ trace: collect_build_postponed (2): skip being built existing dependent bax of dependency libfoo
+ trace: collect_build_postponed (2): skip dep-postponed existing dependent tex of dependency libfoo
+ trace: collect_build: add tex/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate begin {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libfoo/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent bax/1.0.0
+ trace: collect_build_prerequisites: resume bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent bax/1.0.0
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bax/1.0.0
+ trace: postponed_configurations::add: add {bax 2,1: libbox libbar} to {tez | libbox->{tez/1,1}}!
+ trace: collect_build_prerequisites: pre-reeval tex/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tex/1.0.0: 1,1
+ trace: collect_build_prerequisites: skip being built existing dependent bax of dependency libbar
+ trace: collect_build_prerequisites: pre-reeval tiz/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tiz/1.0.0: 1,1
+ trace: collect_build_prerequisites: cfg-postponing dependent bax/1.0.0 adds not (being) collected dependencies libbar/1.0.0 with not (being) collected existing dependents to (being) negotiated cluster and results in {bax tez | libbox->{bax/2,1 tez/1,1} libbar->{bax/2,1}}!, throwing recollect_existing_dependents
+ trace: collect_build_postponed (0): cfg-negotiation of {tez | libbox->{tez/1,1}} failed due to some existing dependents related problem, scheduling their re-collection
+ trace: collect_build_postponed (0): schedule re-collection of existing dependent tex/1.0.0
+ trace: collect_build: add tex/1.0.0
+ trace: collect_build_postponed (0): schedule re-collection of existing dependent tiz/1.0.0
+ trace: collect_build: add tiz/1.0.0
+ trace: collect_build_postponed (1): begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_prerequisites: pre-reeval bax/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bax/1.0.0: 1,1
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {tez | libbox->{tez/1,1}}
+ trace: collect_build: add bax/1.0.0
+ trace: collect_build_prerequisites: reeval bax/1.0.0
+ trace: postponed_configurations::add: create {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent bax/1.0.0 results in {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluated bax/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbox/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tez/1.0.0
+ trace: collect_build_prerequisites: resume tez/1.0.0
+ trace: collect_build: add toz/0.1.0
+ trace: collect_build_prerequisites: cfg-postpone dependency toz/0.1.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: create {tez | toz->{tez/2,1}}
+ trace: collect_build_prerequisites: postpone tez/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {tez | libbox->{tez/1,1}}!
+ trace: collect_build_postponed (1): skip re-collection of dep-postponed package tex
+ trace: collect_build_prerequisites: begin tiz/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency tex/1.0.0 of dependent tiz/1.0.0
+ trace: postponed_configurations::add: create {tiz | tex->{tiz/1,1}}
+ trace: collect_build_prerequisites: postpone tiz/1.0.0
+ trace: collect_build_postponed (1): skip re-collection of dep-postponed package tex
+ trace: collect_build_postponed (2): begin {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_postponed (2): skip being built existing dependent tex of dependency libfoo
+ trace: collect_build_postponed (2): skip being built existing dependent bax of dependency libfoo
+ trace: collect_build_postponed (2): cfg-negotiate begin {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libfoo/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent bax/1.0.0
+ trace: collect_build_prerequisites: resume bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent bax/1.0.0
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bax/1.0.0
+ trace: postponed_configurations::add: add {bax 2,1: libbox libbar} to {tez | libbox->{tez/1,1}}!
+ trace: collect_build_prerequisites: skip being built existing dependent tex of dependency libbar
+ trace: collect_build_prerequisites: skip being built existing dependent bax of dependency libbar
+ trace: collect_build_prerequisites: skip being built existing dependent tiz of dependency libbar
+ trace: collect_build_prerequisites: cfg-postponing dependent bax/1.0.0 involves (being) negotiated configurations and results in {bax tez | libbox->{bax/2,1 tez/1,1} libbar->{bax/2,1}}!, throwing retry_configuration
+ trace: collect_build_postponed (0): cfg-negotiation of {tez | libbox->{tez/1,1}} failed due to dependent bax, refining configuration
+ trace: collect_build_postponed (1): begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_prerequisites: pre-reeval bax/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bax/1.0.0: 1,1
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {tez | libbox->{tez/1,1}}
+ trace: collect_build: add bax/1.0.0
+ trace: collect_build_prerequisites: reeval bax/1.0.0
+ trace: postponed_configurations::add: create {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent bax/1.0.0 results in {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluated bax/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbox/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tez/1.0.0
+ trace: collect_build_prerequisites: resume tez/1.0.0
+ trace: collect_build: add toz/0.1.0
+ trace: collect_build_prerequisites: cfg-postpone dependency toz/0.1.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: create {tez | toz->{tez/2,1}}
+ trace: collect_build_prerequisites: postpone tez/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {tez | libbox->{tez/1,1}}!
+ trace: collect_build_postponed (1): skip re-collection of dep-postponed package tex
+ trace: collect_build_prerequisites: begin tiz/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency tex/1.0.0 of dependent tiz/1.0.0
+ trace: postponed_configurations::add: create {tiz | tex->{tiz/1,1}}
+ trace: collect_build_prerequisites: postpone tiz/1.0.0
+ trace: collect_build_postponed (1): skip re-collection of dep-postponed package tex
+ trace: collect_build_postponed (2): begin {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_postponed (2): skip being built existing dependent tex of dependency libfoo
+ trace: collect_build_postponed (2): skip being built existing dependent bax of dependency libfoo
+ trace: collect_build_postponed (2): cfg-negotiate begin {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libfoo/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent bax/1.0.0
+ trace: collect_build_prerequisites: resume bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent bax/1.0.0
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bax/1.0.0
+ trace: postponed_configurations::add: add {bax 2,1: libbox libbar} to {tez | libbox->{tez/1,1}}!
+ trace: collect_build_prerequisites: skip being built existing dependent tex of dependency libbar
+ trace: collect_build_prerequisites: skip being built existing dependent bax of dependency libbar
+ trace: collect_build_prerequisites: skip being built existing dependent tiz of dependency libbar
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent bax/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbox/1.0.0 of dependent bax/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: collecting cfg-postponed dependency libbar/1.0.0 of dependent bax/1.0.0
+ trace: collect_build_prerequisites: skip configured libbar/1.0.0
+ trace: collect_build_prerequisites: end bax/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {bax^ | libfoo->{bax/1,1}}!
+ trace: collect_build_postponed (2): skip re-collection of dep-postponed package tex
+ trace: collect_build_postponed (3): begin {tez | toz->{tez/2,1}}
+ trace: collect_build_postponed (3): cfg-negotiate begin {tez | toz->{tez/2,1}}
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin toz/0.1.0
+ trace: collect_build_prerequisites: end toz/0.1.0
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent tez/1.0.0
+ trace: collect_build_prerequisites: resume tez/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: add {tez 3,1: libbar} to {bax tez | libbox->{bax/2,1 tez/1,1} libbar->{bax/2,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent tez/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent tez/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end tez/1.0.0
+ trace: collect_build_postponed (3): cfg-negotiate end {tez | toz->{tez/2,1}}!
+ trace: collect_build_postponed (3): skip re-collection of dep-postponed package tex
+ trace: collect_build_postponed (4): begin {tiz | tex->{tiz/1,1}}
+ trace: collect_build_postponed (4): skip being built existing dependent tiz of dependency tex
+ trace: collect_build_postponed (4): cfg-negotiate begin {tiz | tex->{tiz/1,1}}
+ trace: collect_build_postponed (4): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin tex/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tex/1.0.0
+ trace: postponed_configurations::add: add {tex 1,1: libbar} to {bax tez | libbox->{bax/2,1 tez/1,1} libbar->{bax/2,1 tez/3,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent tex/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent tex/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent tex/1.0.0
+ trace: postponed_configurations::add: add {tex 2,1: libfoo} to {bax^ | libfoo->{bax/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent tex/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libfoo/1.0.0 of dependent tex/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end tex/1.0.0
+ trace: collect_build_postponed (4): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (4): select cfg-negotiated dependency alternative for dependent tiz/1.0.0
+ trace: collect_build_prerequisites: resume tiz/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tiz/1.0.0
+ trace: postponed_configurations::add: add {tiz 2,1: libbar} to {bax tex tez | libbox->{bax/2,1 tez/1,1} libbar->{bax/2,1 tex/1,1 tez/3,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent tiz/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent tiz/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end tiz/1.0.0
+ trace: collect_build_postponed (4): cfg-negotiate end {tiz | tex->{tiz/1,1}}!
+ trace: collect_build_postponed (4): end {tiz | tex->{tiz/1,1}}
+ trace: collect_build_postponed (3): end {tez | toz->{tez/2,1}}
+ trace: collect_build_postponed (2): end {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_postponed (1): end {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ build plan:
+ new toz/0.1.0 (required by tez)
+ config.toz.extras=true (set by tez)
+ new tez/1.0.0
+ trace: execute_plan: simulate: no
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !bax configured 1.0.0
+ libbar configured 1.0.0
+ libbox configured 1.0.0
+ libfoo configured 1.0.0
+ !tiz configured 1.0.0
+ libbar configured 1.0.0
+ tex configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured 1.0.0
+ !tez configured 1.0.0
+ libbar configured 1.0.0
+ libbox configured 1.0.0
+ toz configured 0.1.0 available 1.0.0 0.2.0
+ EOO
+
+ # While at it make sure that we won't be able to build tvz since it
+ # requires toz of the 0.2.0 version, which initially can't be
+ # satisfied. But this get automatically resolved by the unsatisfied
+ # constraints resolution machinery.
+ #
+ $* tvz 2>&1 != 0 | $filter 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add tvz/1.0.0
+ trace: collect_build_prerequisites: begin tvz/1.0.0
+ trace: collect_build: add toz/0.2.0
+ info: package tvz dependency on (toz == 0.2.0) is forcing upgrade of toz/0.1.0 to 0.2.0
+ trace: collect_build_prerequisites: cfg-postpone dependency toz/0.2.0 of dependent tvz/1.0.0
+ trace: postponed_configurations::add: create {tvz | toz->{tvz/1,1}}
+ trace: collect_build_prerequisites: postpone tvz/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {tvz | toz->{tvz/1,1}}
+ trace: collect_build_postponed (1): skip unsatisfied existing dependent tez of dependency toz/0.2.0 due to constraint (toz == 0.1.0)
+ trace: collect_build_postponed (1): cfg-negotiate begin {tvz | toz->{tvz/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin toz/0.2.0
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent toz/0.2.0
+ trace: postponed_configurations::add: create {toz | libfoo->{toz/1,1}}
+ trace: collect_build_prerequisites: postpone toz/0.2.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tvz/1.0.0
+ trace: collect_build_prerequisites: resume tvz/1.0.0
+ trace: collect_build_prerequisites: end tvz/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {tvz | toz->{tvz/1,1}}!
+ trace: collect_build_postponed (2): begin {toz | libfoo->{toz/1,1}}
+ trace: collect_build_prerequisites: pre-reeval tex/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tex/1.0.0: 1,1
+ trace: collect_build_prerequisites: pre-reeval bax/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bax/1.0.0: 1,1
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_postponed (2): re-evaluate existing dependents for {toz | libfoo->{toz/1,1}}
+ trace: collect_build: add bax/1.0.0
+ trace: collect_build_prerequisites: reeval bax/1.0.0
+ trace: postponed_configurations::add: add {bax^ 1,1: libfoo} to {toz | libfoo->{toz/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent bax/1.0.0 results in {bax^ toz | libfoo->{bax/1,1 toz/1,1}}
+ trace: collect_build_prerequisites: re-evaluated bax/1.0.0
+ trace: collect_build: add tex/1.0.0
+ trace: collect_build_prerequisites: reeval tex/1.0.0
+ trace: postponed_configurations::add: create {tex^ | libbar->{tex/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent tex/1.0.0 results in {tex^ | libbar->{tex/1,1}}
+ trace: collect_build_prerequisites: re-evaluated tex/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate begin {bax^ toz | libfoo->{bax/1,1 toz/1,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libfoo/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent bax/1.0.0
+ trace: collect_build_prerequisites: resume bax/1.0.0
+ trace: collect_build: add libbox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bax/1.0.0
+ trace: postponed_configurations::add: add {bax 2,1: libbox libbar} to {tex^ | libbar->{tex/1,1}}
+ trace: collect_build_prerequisites: postpone bax/1.0.0
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent toz/0.2.0
+ trace: collect_build_prerequisites: resume toz/0.2.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent toz/0.2.0
+ trace: postponed_configurations::add: add {toz 2,1: libbar} to {bax tex^ | libbar->{bax/2,1 tex/1,1} libbox->{bax/2,1}}
+ trace: collect_build_prerequisites: postpone toz/0.2.0
+ trace: collect_build_postponed (2): cfg-negotiate end {bax^ toz | libfoo->{bax/1,1 toz/1,1}}!
+ trace: collect_build_postponed (3): begin {bax tex^ toz | libbar->{bax/2,1 tex/1,1 toz/2,1} libbox->{bax/2,1}}
+ trace: collect_build_postponed (3): skip being built existing dependent tex of dependency libbar
+ trace: collect_build_postponed (3): skip being built existing dependent bax of dependency libbar
+ trace: collect_build_prerequisites: pre-reeval tiz/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tiz/1.0.0: 1,1
+ trace: collect_build_prerequisites: pre-reeval tez/1.0.0
+ trace: collect_build_prerequisites: re-evaluation of dependent tez/1.0.0 deviated for depends clause 2: now cannot select alternative, previously 1 was selected
+ trace: collect_build_postponed (3): schedule re-collection of deviated existing dependent tez/1.0.0
+ trace: collect_build: add tez/1.0.0
+ trace: collect_build_postponed (3): skip being built existing dependent bax of dependency libbox
+ trace: collect_build_postponed (3): skip being built existing dependent tez of dependency libbox
+ trace: collect_build_postponed (3): re-evaluate existing dependents for {bax tex^ toz | libbar->{bax/2,1 tex/1,1 toz/2,1} libbox->{bax/2,1}}
+ trace: collect_build: add tiz/1.0.0
+ trace: collect_build_prerequisites: reeval tiz/1.0.0
+ trace: collect_build_prerequisites: cannot re-evaluate existing dependent tiz/1.0.0 due to dependency tex/1.0.0 (collected prematurely), throwing postpone_dependency
+ trace: pkg_build: collection failed due to prematurely collected dependency (tex), retry from scratch
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add tvz/1.0.0
+ trace: collect_build_prerequisites: begin tvz/1.0.0
+ trace: collect_build: add toz/0.2.0
+ info: package tvz dependency on (toz == 0.2.0) is forcing upgrade of toz/0.1.0 to 0.2.0
+ trace: collect_build_prerequisites: cfg-postpone dependency toz/0.2.0 of dependent tvz/1.0.0
+ trace: postponed_configurations::add: create {tvz | toz->{tvz/1,1}}
+ trace: collect_build_prerequisites: postpone tvz/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {tvz | toz->{tvz/1,1}}
+ trace: collect_build_postponed (1): skip unsatisfied existing dependent tez of dependency toz/0.2.0 due to constraint (toz == 0.1.0)
+ trace: collect_build_postponed (1): cfg-negotiate begin {tvz | toz->{tvz/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin toz/0.2.0
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent toz/0.2.0
+ trace: postponed_configurations::add: create {toz | libfoo->{toz/1,1}}
+ trace: collect_build_prerequisites: postpone toz/0.2.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tvz/1.0.0
+ trace: collect_build_prerequisites: resume tvz/1.0.0
+ trace: collect_build_prerequisites: end tvz/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {tvz | toz->{tvz/1,1}}!
+ trace: collect_build_postponed (2): begin {toz | libfoo->{toz/1,1}}
+ trace: collect_build_prerequisites: pre-reeval tex/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tex/1.0.0: 1,1
+ trace: collect_build_prerequisites: pre-reeval bax/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bax/1.0.0: 1,1
+ trace: collect_build_postponed (2): skip dep-postponed existing dependent tex of dependency libfoo
+ trace: collect_build: add tex/1.0.0
+ trace: collect_build_postponed (2): re-evaluate existing dependents for {toz | libfoo->{toz/1,1}}
+ trace: collect_build: add bax/1.0.0
+ trace: collect_build_prerequisites: reeval bax/1.0.0
+ trace: postponed_configurations::add: add {bax^ 1,1: libfoo} to {toz | libfoo->{toz/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent bax/1.0.0 results in {bax^ toz | libfoo->{bax/1,1 toz/1,1}}
+ trace: collect_build_prerequisites: re-evaluated bax/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate begin {bax^ toz | libfoo->{bax/1,1 toz/1,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libfoo/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent bax/1.0.0
+ trace: collect_build_prerequisites: resume bax/1.0.0
+ trace: collect_build: add libbox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent bax/1.0.0
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bax/1.0.0
+ trace: postponed_configurations::add: create {bax | libbox->{bax/2,1} libbar->{bax/2,1}}
+ trace: collect_build_prerequisites: postpone bax/1.0.0
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent toz/0.2.0
+ trace: collect_build_prerequisites: resume toz/0.2.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent toz/0.2.0
+ trace: postponed_configurations::add: add {toz 2,1: libbar} to {bax | libbox->{bax/2,1} libbar->{bax/2,1}}
+ trace: collect_build_prerequisites: postpone toz/0.2.0
+ trace: collect_build_postponed (2): cfg-negotiate end {bax^ toz | libfoo->{bax/1,1 toz/1,1}}!
+ trace: collect_build_postponed (3): begin {bax toz | libbox->{bax/2,1} libbar->{bax/2,1 toz/2,1}}
+ trace: collect_build_postponed (3): skip being built existing dependent bax of dependency libbox
+ trace: collect_build_prerequisites: pre-reeval tez/1.0.0
+ trace: collect_build_prerequisites: re-evaluation of dependent tez/1.0.0 deviated for depends clause 2: now cannot select alternative, previously 1 was selected
+ trace: collect_build_postponed (3): schedule re-collection of deviated existing dependent tez/1.0.0
+ trace: collect_build: add tez/1.0.0
+ trace: collect_build_prerequisites: pre-reeval tex/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tex/1.0.0: 1,1
+ trace: collect_build_postponed (3): skip being built existing dependent bax of dependency libbar
+ trace: collect_build_prerequisites: pre-reeval tiz/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tiz/1.0.0: 1,1
+ trace: collect_build_postponed (3): skip being built existing dependent tez of dependency libbar
+ trace: collect_build_postponed (3): skip dep-postponed existing dependent tex of dependency libbar
+ trace: collect_build_postponed (3): re-evaluate existing dependents for {bax toz | libbox->{bax/2,1} libbar->{bax/2,1 toz/2,1}}
+ trace: collect_build: add tiz/1.0.0
+ trace: collect_build_prerequisites: reeval tiz/1.0.0
+ trace: postponed_configurations::add: create {tiz^ | tex->{tiz/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent tiz/1.0.0 results in {tiz^ | tex->{tiz/1,1}}
+ trace: collect_build_prerequisites: re-evaluated tiz/1.0.0
+ trace: collect_build_postponed (3): cfg-negotiate begin {bax toz | libbox->{bax/2,1} libbar->{bax/2,1 toz/2,1}}
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbox/1.0.0
+ trace: collect_build_prerequisites: skip configured libbar/1.0.0
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent bax/1.0.0
+ trace: collect_build_prerequisites: resume bax/1.0.0
+ trace: collect_build_prerequisites: end bax/1.0.0
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent toz/0.2.0
+ trace: collect_build_prerequisites: resume toz/0.2.0
+ trace: collect_build_prerequisites: end toz/0.2.0
+ trace: collect_build_postponed (3): cfg-negotiate end {bax toz | libbox->{bax/2,1} libbar->{bax/2,1 toz/2,1}}!
+ trace: collect_build_prerequisites: begin tez/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: add {tez 1,1: libbox} to {bax toz | libbox->{bax/2,1} libbar->{bax/2,1 toz/2,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent tez/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbox/1.0.0 of dependent tez/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build: postpone failure for dependent tez unsatisfied with dependency toz/0.2.0 (== 0.1.0)
+ trace: collect_build: pick toz/0.2.0 over toz/0.1.0
+ trace: collect_build_prerequisites: cfg-postpone dependency toz/0.2.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: add {tez 2,1: toz} to {tvz | toz->{tvz/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent tez/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency toz/0.2.0 of dependent tez/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: add {tez 3,1: libbar} to {bax tez toz | libbox->{bax/2,1 tez/1,1} libbar->{bax/2,1 toz/2,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent tez/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent tez/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end tez/1.0.0
+ trace: collect_build_postponed (4): begin {tiz^ | tex->{tiz/1,1}}
+ trace: collect_build_postponed (4): skip being built existing dependent tiz of dependency tex
+ trace: collect_build_postponed (4): cfg-negotiate begin {tiz^ | tex->{tiz/1,1}}
+ trace: collect_build_postponed (4): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin tex/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tex/1.0.0
+ trace: postponed_configurations::add: add {tex 1,1: libbar} to {bax tez toz | libbox->{bax/2,1 tez/1,1} libbar->{bax/2,1 tez/3,1 toz/2,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent tex/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent tex/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent tex/1.0.0
+ trace: postponed_configurations::add: add {tex 2,1: libfoo} to {bax^ toz | libfoo->{bax/1,1 toz/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent tex/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libfoo/1.0.0 of dependent tex/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end tex/1.0.0
+ trace: collect_build_postponed (4): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (4): select cfg-negotiated dependency alternative for dependent tiz/1.0.0
+ trace: collect_build_prerequisites: resume tiz/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tiz/1.0.0
+ trace: postponed_configurations::add: add {tiz 2,1: libbar} to {bax tex tez toz | libbox->{bax/2,1 tez/1,1} libbar->{bax/2,1 tex/1,1 tez/3,1 toz/2,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent tiz/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent tiz/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end tiz/1.0.0
+ trace: collect_build_postponed (4): cfg-negotiate end {tiz^ | tex->{tiz/1,1}}!
+ trace: collect_build_postponed (4): end {tiz^ | tex->{tiz/1,1}}
+ trace: collect_build_postponed (3): end {bax toz | libbox->{bax/2,1} libbar->{bax/2,1 toz/2,1}}
+ trace: collect_build_postponed (2): end {toz | libfoo->{toz/1,1}}
+ trace: collect_build_postponed (1): end {tvz | toz->{tvz/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: pkg_build: try to replace unsatisfactory dependency toz/0.2.0 with some other version
+ trace: try_replace_dependent: try to replace unsatisfied dependent tez/1.0.0 of dependency toz/0.2.0 with some other version
+ trace: try_replace_dependent: try to replace conflicting dependent tvz/1.0.0 of dependency toz/0.2.0 with some other version
+ trace: try_replace_dependency: replace conflicting dependent version tvz/1.0.0 with 0.1.0 by adding constraint 'tvz' -> 'tvz == 0.1.0' on command line
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add tvz/0.1.0
+ trace: collect_build_prerequisites: begin tvz/0.1.0
+ trace: collect_build: add toz/0.2.0
+ info: package tvz dependency on (toz == 0.2.0) is forcing upgrade of toz/0.1.0 to 0.2.0
+ trace: collect_build_prerequisites: cfg-postpone dependency toz/0.2.0 of dependent tvz/0.1.0
+ trace: postponed_configurations::add: create {tvz | toz->{tvz/1,1}}
+ trace: collect_build_prerequisites: postpone tvz/0.1.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {tvz | toz->{tvz/1,1}}
+ trace: collect_build_postponed (1): skip unsatisfied existing dependent tez of dependency toz/0.2.0 due to constraint (toz == 0.1.0)
+ trace: collect_build_postponed (1): cfg-negotiate begin {tvz | toz->{tvz/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin toz/0.2.0
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent toz/0.2.0
+ trace: postponed_configurations::add: create {toz | libfoo->{toz/1,1}}
+ trace: collect_build_prerequisites: postpone toz/0.2.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tvz/0.1.0
+ trace: collect_build_prerequisites: resume tvz/0.1.0
+ trace: collect_build: add bax/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency bax/1.0.0 of dependent tvz/0.1.0
+ trace: collect_build_prerequisites: skip configured bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent tvz/0.1.0
+ trace: postponed_configurations::add: add {tvz 3,1: libfoo} to {toz | libfoo->{toz/1,1}}
+ trace: collect_build_prerequisites: postpone tvz/0.1.0
+ trace: collect_build_postponed (1): cfg-negotiate end {tvz | toz->{tvz/1,1}}!
+ trace: collect_build_postponed (2): begin {toz tvz | libfoo->{toz/1,1 tvz/3,1}}
+ trace: collect_build_prerequisites: pre-reeval tex/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tex/1.0.0: 1,1
+ trace: collect_build_prerequisites: pre-reeval bax/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bax/1.0.0: 1,1
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_postponed (2): re-evaluate existing dependents for {toz tvz | libfoo->{toz/1,1 tvz/3,1}}
+ trace: collect_build_prerequisites: reeval bax/1.0.0
+ trace: postponed_configurations::add: add {bax^ 1,1: libfoo} to {toz tvz | libfoo->{toz/1,1 tvz/3,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent bax/1.0.0 results in {bax^ toz tvz | libfoo->{bax/1,1 toz/1,1 tvz/3,1}}
+ trace: collect_build_prerequisites: re-evaluated bax/1.0.0
+ trace: collect_build: add tex/1.0.0
+ trace: collect_build_prerequisites: reeval tex/1.0.0
+ trace: postponed_configurations::add: create {tex^ | libbar->{tex/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent tex/1.0.0 results in {tex^ | libbar->{tex/1,1}}
+ trace: collect_build_prerequisites: re-evaluated tex/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate begin {bax^ toz tvz | libfoo->{bax/1,1 toz/1,1 tvz/3,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libfoo/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent bax/1.0.0
+ trace: collect_build_prerequisites: resume bax/1.0.0
+ trace: collect_build: add libbox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bax/1.0.0
+ trace: postponed_configurations::add: add {bax 2,1: libbox libbar} to {tex^ | libbar->{tex/1,1}}
+ trace: collect_build_prerequisites: postpone bax/1.0.0
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent toz/0.2.0
+ trace: collect_build_prerequisites: resume toz/0.2.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent toz/0.2.0
+ trace: postponed_configurations::add: add {toz 2,1: libbar} to {bax tex^ | libbar->{bax/2,1 tex/1,1} libbox->{bax/2,1}}
+ trace: collect_build_prerequisites: postpone toz/0.2.0
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent tvz/0.1.0
+ trace: collect_build_prerequisites: resume tvz/0.1.0
+ trace: collect_build_prerequisites: end tvz/0.1.0
+ trace: collect_build_postponed (2): cfg-negotiate end {bax^ toz tvz | libfoo->{bax/1,1 toz/1,1 tvz/3,1}}!
+ trace: collect_build_postponed (3): begin {bax tex^ toz | libbar->{bax/2,1 tex/1,1 toz/2,1} libbox->{bax/2,1}}
+ trace: collect_build_postponed (3): skip being built existing dependent tex of dependency libbar
+ trace: collect_build_postponed (3): skip being built existing dependent bax of dependency libbar
+ trace: collect_build_prerequisites: pre-reeval tiz/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tiz/1.0.0: 1,1
+ trace: collect_build_prerequisites: pre-reeval tez/1.0.0
+ trace: collect_build_prerequisites: re-evaluation of dependent tez/1.0.0 deviated for depends clause 2: now cannot select alternative, previously 1 was selected
+ trace: collect_build_postponed (3): schedule re-collection of deviated existing dependent tez/1.0.0
+ trace: collect_build: add tez/1.0.0
+ trace: collect_build_postponed (3): skip being built existing dependent bax of dependency libbox
+ trace: collect_build_postponed (3): skip being built existing dependent tez of dependency libbox
+ trace: collect_build_postponed (3): re-evaluate existing dependents for {bax tex^ toz | libbar->{bax/2,1 tex/1,1 toz/2,1} libbox->{bax/2,1}}
+ trace: collect_build: add tiz/1.0.0
+ trace: collect_build_prerequisites: reeval tiz/1.0.0
+ trace: collect_build_prerequisites: cannot re-evaluate existing dependent tiz/1.0.0 due to dependency tex/1.0.0 (collected prematurely), throwing postpone_dependency
+ trace: pkg_build: collection failed due to prematurely collected dependency (tex), retry from scratch
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add tvz/0.1.0
+ trace: collect_build_prerequisites: begin tvz/0.1.0
+ trace: collect_build: add toz/0.2.0
+ info: package tvz dependency on (toz == 0.2.0) is forcing upgrade of toz/0.1.0 to 0.2.0
+ trace: collect_build_prerequisites: cfg-postpone dependency toz/0.2.0 of dependent tvz/0.1.0
+ trace: postponed_configurations::add: create {tvz | toz->{tvz/1,1}}
+ trace: collect_build_prerequisites: postpone tvz/0.1.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {tvz | toz->{tvz/1,1}}
+ trace: collect_build_postponed (1): skip unsatisfied existing dependent tez of dependency toz/0.2.0 due to constraint (toz == 0.1.0)
+ trace: collect_build_postponed (1): cfg-negotiate begin {tvz | toz->{tvz/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin toz/0.2.0
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent toz/0.2.0
+ trace: postponed_configurations::add: create {toz | libfoo->{toz/1,1}}
+ trace: collect_build_prerequisites: postpone toz/0.2.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tvz/0.1.0
+ trace: collect_build_prerequisites: resume tvz/0.1.0
+ trace: collect_build: add bax/1.0.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency bax/1.0.0 of dependent tvz/0.1.0
+ trace: collect_build_prerequisites: skip configured bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent tvz/0.1.0
+ trace: postponed_configurations::add: add {tvz 3,1: libfoo} to {toz | libfoo->{toz/1,1}}
+ trace: collect_build_prerequisites: postpone tvz/0.1.0
+ trace: collect_build_postponed (1): cfg-negotiate end {tvz | toz->{tvz/1,1}}!
+ trace: collect_build_postponed (2): begin {toz tvz | libfoo->{toz/1,1 tvz/3,1}}
+ trace: collect_build_prerequisites: pre-reeval tex/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tex/1.0.0: 1,1
+ trace: collect_build_prerequisites: pre-reeval bax/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bax/1.0.0: 1,1
+ trace: collect_build_postponed (2): skip dep-postponed existing dependent tex of dependency libfoo
+ trace: collect_build: add tex/1.0.0
+ trace: collect_build_postponed (2): re-evaluate existing dependents for {toz tvz | libfoo->{toz/1,1 tvz/3,1}}
+ trace: collect_build_prerequisites: reeval bax/1.0.0
+ trace: postponed_configurations::add: add {bax^ 1,1: libfoo} to {toz tvz | libfoo->{toz/1,1 tvz/3,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent bax/1.0.0 results in {bax^ toz tvz | libfoo->{bax/1,1 toz/1,1 tvz/3,1}}
+ trace: collect_build_prerequisites: re-evaluated bax/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate begin {bax^ toz tvz | libfoo->{bax/1,1 toz/1,1 tvz/3,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libfoo/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent bax/1.0.0
+ trace: collect_build_prerequisites: resume bax/1.0.0
+ trace: collect_build: add libbox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent bax/1.0.0
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bax/1.0.0
+ trace: postponed_configurations::add: create {bax | libbox->{bax/2,1} libbar->{bax/2,1}}
+ trace: collect_build_prerequisites: postpone bax/1.0.0
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent toz/0.2.0
+ trace: collect_build_prerequisites: resume toz/0.2.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent toz/0.2.0
+ trace: postponed_configurations::add: add {toz 2,1: libbar} to {bax | libbox->{bax/2,1} libbar->{bax/2,1}}
+ trace: collect_build_prerequisites: postpone toz/0.2.0
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent tvz/0.1.0
+ trace: collect_build_prerequisites: resume tvz/0.1.0
+ trace: collect_build_prerequisites: end tvz/0.1.0
+ trace: collect_build_postponed (2): cfg-negotiate end {bax^ toz tvz | libfoo->{bax/1,1 toz/1,1 tvz/3,1}}!
+ trace: collect_build_postponed (3): begin {bax toz | libbox->{bax/2,1} libbar->{bax/2,1 toz/2,1}}
+ trace: collect_build_postponed (3): skip being built existing dependent bax of dependency libbox
+ trace: collect_build_prerequisites: pre-reeval tez/1.0.0
+ trace: collect_build_prerequisites: re-evaluation of dependent tez/1.0.0 deviated for depends clause 2: now cannot select alternative, previously 1 was selected
+ trace: collect_build_postponed (3): schedule re-collection of deviated existing dependent tez/1.0.0
+ trace: collect_build: add tez/1.0.0
+ trace: collect_build_prerequisites: pre-reeval tex/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tex/1.0.0: 1,1
+ trace: collect_build_postponed (3): skip being built existing dependent bax of dependency libbar
+ trace: collect_build_prerequisites: pre-reeval tiz/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tiz/1.0.0: 1,1
+ trace: collect_build_postponed (3): skip being built existing dependent tez of dependency libbar
+ trace: collect_build_postponed (3): skip dep-postponed existing dependent tex of dependency libbar
+ trace: collect_build_postponed (3): re-evaluate existing dependents for {bax toz | libbox->{bax/2,1} libbar->{bax/2,1 toz/2,1}}
+ trace: collect_build: add tiz/1.0.0
+ trace: collect_build_prerequisites: reeval tiz/1.0.0
+ trace: postponed_configurations::add: create {tiz^ | tex->{tiz/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent tiz/1.0.0 results in {tiz^ | tex->{tiz/1,1}}
+ trace: collect_build_prerequisites: re-evaluated tiz/1.0.0
+ trace: collect_build_postponed (3): cfg-negotiate begin {bax toz | libbox->{bax/2,1} libbar->{bax/2,1 toz/2,1}}
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbox/1.0.0
+ trace: collect_build_prerequisites: skip configured libbar/1.0.0
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent bax/1.0.0
+ trace: collect_build_prerequisites: resume bax/1.0.0
+ trace: collect_build_prerequisites: end bax/1.0.0
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent toz/0.2.0
+ trace: collect_build_prerequisites: resume toz/0.2.0
+ trace: collect_build_prerequisites: end toz/0.2.0
+ trace: collect_build_postponed (3): cfg-negotiate end {bax toz | libbox->{bax/2,1} libbar->{bax/2,1 toz/2,1}}!
+ trace: collect_build_prerequisites: begin tez/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: add {tez 1,1: libbox} to {bax toz | libbox->{bax/2,1} libbar->{bax/2,1 toz/2,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent tez/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbox/1.0.0 of dependent tez/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build: postpone failure for dependent tez unsatisfied with dependency toz/0.2.0 (== 0.1.0)
+ trace: collect_build: pick toz/0.2.0 over toz/0.1.0
+ trace: collect_build_prerequisites: cfg-postpone dependency toz/0.2.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: add {tez 2,1: toz} to {tvz | toz->{tvz/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent tez/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency toz/0.2.0 of dependent tez/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: add {tez 3,1: libbar} to {bax tez toz | libbox->{bax/2,1 tez/1,1} libbar->{bax/2,1 toz/2,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent tez/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent tez/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end tez/1.0.0
+ trace: collect_build_postponed (4): begin {tiz^ | tex->{tiz/1,1}}
+ trace: collect_build_postponed (4): skip being built existing dependent tiz of dependency tex
+ trace: collect_build_postponed (4): cfg-negotiate begin {tiz^ | tex->{tiz/1,1}}
+ trace: collect_build_postponed (4): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin tex/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tex/1.0.0
+ trace: postponed_configurations::add: add {tex 1,1: libbar} to {bax tez toz | libbox->{bax/2,1 tez/1,1} libbar->{bax/2,1 tez/3,1 toz/2,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent tex/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent tex/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent tex/1.0.0
+ trace: postponed_configurations::add: add {tex 2,1: libfoo} to {bax^ toz tvz | libfoo->{bax/1,1 toz/1,1 tvz/3,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent tex/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libfoo/1.0.0 of dependent tex/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end tex/1.0.0
+ trace: collect_build_postponed (4): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (4): select cfg-negotiated dependency alternative for dependent tiz/1.0.0
+ trace: collect_build_prerequisites: resume tiz/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tiz/1.0.0
+ trace: postponed_configurations::add: add {tiz 2,1: libbar} to {bax tex tez toz | libbox->{bax/2,1 tez/1,1} libbar->{bax/2,1 tex/1,1 tez/3,1 toz/2,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent tiz/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent tiz/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end tiz/1.0.0
+ trace: collect_build_postponed (4): cfg-negotiate end {tiz^ | tex->{tiz/1,1}}!
+ trace: collect_build_postponed (4): end {tiz^ | tex->{tiz/1,1}}
+ trace: collect_build_postponed (3): end {bax toz | libbox->{bax/2,1} libbar->{bax/2,1 toz/2,1}}
+ trace: collect_build_postponed (2): end {toz tvz | libfoo->{toz/1,1 tvz/3,1}}
+ trace: collect_build_postponed (1): end {tvz | toz->{tvz/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: pkg_build: try to replace unsatisfactory dependency toz/0.2.0 with some other version
+ trace: try_replace_dependent: try to replace unsatisfied dependent tez/1.0.0 of dependency toz/0.2.0 with some other version
+ trace: try_replace_dependent: try to replace conflicting dependent tvz/0.1.0 of dependency toz/0.2.0 with some other version
+ trace: try_replace_dependency: replacement of conflicting dependent version tvz/0.1.0 is denied since it is specified on command line as 'tvz == 0.1.0'
+ trace: pkg_build: cannot replace any package, rolling back latest command line adjustment ('tvz' -> 'tvz == 0.1.0')
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add tvz/1.0.0
+ trace: collect_build_prerequisites: begin tvz/1.0.0
+ trace: collect_build: add toz/0.2.0
+ info: package tvz dependency on (toz == 0.2.0) is forcing upgrade of toz/0.1.0 to 0.2.0
+ trace: collect_build_prerequisites: cfg-postpone dependency toz/0.2.0 of dependent tvz/1.0.0
+ trace: postponed_configurations::add: create {tvz | toz->{tvz/1,1}}
+ trace: collect_build_prerequisites: postpone tvz/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {tvz | toz->{tvz/1,1}}
+ trace: collect_build_postponed (1): skip unsatisfied existing dependent tez of dependency toz/0.2.0 due to constraint (toz == 0.1.0)
+ trace: collect_build_postponed (1): cfg-negotiate begin {tvz | toz->{tvz/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin toz/0.2.0
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent toz/0.2.0
+ trace: postponed_configurations::add: create {toz | libfoo->{toz/1,1}}
+ trace: collect_build_prerequisites: postpone toz/0.2.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tvz/1.0.0
+ trace: collect_build_prerequisites: resume tvz/1.0.0
+ trace: collect_build_prerequisites: end tvz/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {tvz | toz->{tvz/1,1}}!
+ trace: collect_build_postponed (2): begin {toz | libfoo->{toz/1,1}}
+ trace: collect_build_prerequisites: pre-reeval tex/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tex/1.0.0: 1,1
+ trace: collect_build_prerequisites: pre-reeval bax/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bax/1.0.0: 1,1
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_postponed (2): re-evaluate existing dependents for {toz | libfoo->{toz/1,1}}
+ trace: collect_build: add bax/1.0.0
+ trace: collect_build_prerequisites: reeval bax/1.0.0
+ trace: postponed_configurations::add: add {bax^ 1,1: libfoo} to {toz | libfoo->{toz/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent bax/1.0.0 results in {bax^ toz | libfoo->{bax/1,1 toz/1,1}}
+ trace: collect_build_prerequisites: re-evaluated bax/1.0.0
+ trace: collect_build: add tex/1.0.0
+ trace: collect_build_prerequisites: reeval tex/1.0.0
+ trace: postponed_configurations::add: create {tex^ | libbar->{tex/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent tex/1.0.0 results in {tex^ | libbar->{tex/1,1}}
+ trace: collect_build_prerequisites: re-evaluated tex/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate begin {bax^ toz | libfoo->{bax/1,1 toz/1,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libfoo/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent bax/1.0.0
+ trace: collect_build_prerequisites: resume bax/1.0.0
+ trace: collect_build: add libbox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bax/1.0.0
+ trace: postponed_configurations::add: add {bax 2,1: libbox libbar} to {tex^ | libbar->{tex/1,1}}
+ trace: collect_build_prerequisites: postpone bax/1.0.0
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent toz/0.2.0
+ trace: collect_build_prerequisites: resume toz/0.2.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent toz/0.2.0
+ trace: postponed_configurations::add: add {toz 2,1: libbar} to {bax tex^ | libbar->{bax/2,1 tex/1,1} libbox->{bax/2,1}}
+ trace: collect_build_prerequisites: postpone toz/0.2.0
+ trace: collect_build_postponed (2): cfg-negotiate end {bax^ toz | libfoo->{bax/1,1 toz/1,1}}!
+ trace: collect_build_postponed (3): begin {bax tex^ toz | libbar->{bax/2,1 tex/1,1 toz/2,1} libbox->{bax/2,1}}
+ trace: collect_build_postponed (3): skip being built existing dependent tex of dependency libbar
+ trace: collect_build_postponed (3): skip being built existing dependent bax of dependency libbar
+ trace: collect_build_prerequisites: pre-reeval tiz/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tiz/1.0.0: 1,1
+ trace: collect_build_prerequisites: pre-reeval tez/1.0.0
+ trace: collect_build_prerequisites: re-evaluation of dependent tez/1.0.0 deviated for depends clause 2: now cannot select alternative, previously 1 was selected
+ trace: collect_build_postponed (3): schedule re-collection of deviated existing dependent tez/1.0.0
+ trace: collect_build: add tez/1.0.0
+ trace: collect_build_postponed (3): skip being built existing dependent bax of dependency libbox
+ trace: collect_build_postponed (3): skip being built existing dependent tez of dependency libbox
+ trace: collect_build_postponed (3): re-evaluate existing dependents for {bax tex^ toz | libbar->{bax/2,1 tex/1,1 toz/2,1} libbox->{bax/2,1}}
+ trace: collect_build: add tiz/1.0.0
+ trace: collect_build_prerequisites: reeval tiz/1.0.0
+ trace: collect_build_prerequisites: cannot re-evaluate existing dependent tiz/1.0.0 due to dependency tex/1.0.0 (collected prematurely), throwing postpone_dependency
+ trace: pkg_build: collection failed due to prematurely collected dependency (tex), retry from scratch
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add tvz/1.0.0
+ trace: collect_build_prerequisites: begin tvz/1.0.0
+ trace: collect_build: add toz/0.2.0
+ info: package tvz dependency on (toz == 0.2.0) is forcing upgrade of toz/0.1.0 to 0.2.0
+ trace: collect_build_prerequisites: cfg-postpone dependency toz/0.2.0 of dependent tvz/1.0.0
+ trace: postponed_configurations::add: create {tvz | toz->{tvz/1,1}}
+ trace: collect_build_prerequisites: postpone tvz/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {tvz | toz->{tvz/1,1}}
+ trace: collect_build_postponed (1): skip unsatisfied existing dependent tez of dependency toz/0.2.0 due to constraint (toz == 0.1.0)
+ trace: collect_build_postponed (1): cfg-negotiate begin {tvz | toz->{tvz/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin toz/0.2.0
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent toz/0.2.0
+ trace: postponed_configurations::add: create {toz | libfoo->{toz/1,1}}
+ trace: collect_build_prerequisites: postpone toz/0.2.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tvz/1.0.0
+ trace: collect_build_prerequisites: resume tvz/1.0.0
+ trace: collect_build_prerequisites: end tvz/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {tvz | toz->{tvz/1,1}}!
+ trace: collect_build_postponed (2): begin {toz | libfoo->{toz/1,1}}
+ trace: collect_build_prerequisites: pre-reeval tex/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tex/1.0.0: 1,1
+ trace: collect_build_prerequisites: pre-reeval bax/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bax/1.0.0: 1,1
+ trace: collect_build_postponed (2): skip dep-postponed existing dependent tex of dependency libfoo
+ trace: collect_build: add tex/1.0.0
+ trace: collect_build_postponed (2): re-evaluate existing dependents for {toz | libfoo->{toz/1,1}}
+ trace: collect_build: add bax/1.0.0
+ trace: collect_build_prerequisites: reeval bax/1.0.0
+ trace: postponed_configurations::add: add {bax^ 1,1: libfoo} to {toz | libfoo->{toz/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent bax/1.0.0 results in {bax^ toz | libfoo->{bax/1,1 toz/1,1}}
+ trace: collect_build_prerequisites: re-evaluated bax/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate begin {bax^ toz | libfoo->{bax/1,1 toz/1,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libfoo/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent bax/1.0.0
+ trace: collect_build_prerequisites: resume bax/1.0.0
+ trace: collect_build: add libbox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent bax/1.0.0
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bax/1.0.0
+ trace: postponed_configurations::add: create {bax | libbox->{bax/2,1} libbar->{bax/2,1}}
+ trace: collect_build_prerequisites: postpone bax/1.0.0
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent toz/0.2.0
+ trace: collect_build_prerequisites: resume toz/0.2.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent toz/0.2.0
+ trace: postponed_configurations::add: add {toz 2,1: libbar} to {bax | libbox->{bax/2,1} libbar->{bax/2,1}}
+ trace: collect_build_prerequisites: postpone toz/0.2.0
+ trace: collect_build_postponed (2): cfg-negotiate end {bax^ toz | libfoo->{bax/1,1 toz/1,1}}!
+ trace: collect_build_postponed (3): begin {bax toz | libbox->{bax/2,1} libbar->{bax/2,1 toz/2,1}}
+ trace: collect_build_postponed (3): skip being built existing dependent bax of dependency libbox
+ trace: collect_build_prerequisites: pre-reeval tez/1.0.0
+ trace: collect_build_prerequisites: re-evaluation of dependent tez/1.0.0 deviated for depends clause 2: now cannot select alternative, previously 1 was selected
+ trace: collect_build_postponed (3): schedule re-collection of deviated existing dependent tez/1.0.0
+ trace: collect_build: add tez/1.0.0
+ trace: collect_build_prerequisites: pre-reeval tex/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tex/1.0.0: 1,1
+ trace: collect_build_postponed (3): skip being built existing dependent bax of dependency libbar
+ trace: collect_build_prerequisites: pre-reeval tiz/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tiz/1.0.0: 1,1
+ trace: collect_build_postponed (3): skip being built existing dependent tez of dependency libbar
+ trace: collect_build_postponed (3): skip dep-postponed existing dependent tex of dependency libbar
+ trace: collect_build_postponed (3): re-evaluate existing dependents for {bax toz | libbox->{bax/2,1} libbar->{bax/2,1 toz/2,1}}
+ trace: collect_build: add tiz/1.0.0
+ trace: collect_build_prerequisites: reeval tiz/1.0.0
+ trace: postponed_configurations::add: create {tiz^ | tex->{tiz/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent tiz/1.0.0 results in {tiz^ | tex->{tiz/1,1}}
+ trace: collect_build_prerequisites: re-evaluated tiz/1.0.0
+ trace: collect_build_postponed (3): cfg-negotiate begin {bax toz | libbox->{bax/2,1} libbar->{bax/2,1 toz/2,1}}
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbox/1.0.0
+ trace: collect_build_prerequisites: skip configured libbar/1.0.0
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent bax/1.0.0
+ trace: collect_build_prerequisites: resume bax/1.0.0
+ trace: collect_build_prerequisites: end bax/1.0.0
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent toz/0.2.0
+ trace: collect_build_prerequisites: resume toz/0.2.0
+ trace: collect_build_prerequisites: end toz/0.2.0
+ trace: collect_build_postponed (3): cfg-negotiate end {bax toz | libbox->{bax/2,1} libbar->{bax/2,1 toz/2,1}}!
+ trace: collect_build_prerequisites: begin tez/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: add {tez 1,1: libbox} to {bax toz | libbox->{bax/2,1} libbar->{bax/2,1 toz/2,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent tez/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbox/1.0.0 of dependent tez/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build: postpone failure for dependent tez unsatisfied with dependency toz/0.2.0 (== 0.1.0)
+ trace: collect_build: pick toz/0.2.0 over toz/0.1.0
+ trace: collect_build_prerequisites: cfg-postpone dependency toz/0.2.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: add {tez 2,1: toz} to {tvz | toz->{tvz/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent tez/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency toz/0.2.0 of dependent tez/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: add {tez 3,1: libbar} to {bax tez toz | libbox->{bax/2,1 tez/1,1} libbar->{bax/2,1 toz/2,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent tez/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent tez/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end tez/1.0.0
+ trace: collect_build_postponed (4): begin {tiz^ | tex->{tiz/1,1}}
+ trace: collect_build_postponed (4): skip being built existing dependent tiz of dependency tex
+ trace: collect_build_postponed (4): cfg-negotiate begin {tiz^ | tex->{tiz/1,1}}
+ trace: collect_build_postponed (4): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin tex/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tex/1.0.0
+ trace: postponed_configurations::add: add {tex 1,1: libbar} to {bax tez toz | libbox->{bax/2,1 tez/1,1} libbar->{bax/2,1 tez/3,1 toz/2,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent tex/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent tex/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent tex/1.0.0
+ trace: postponed_configurations::add: add {tex 2,1: libfoo} to {bax^ toz | libfoo->{bax/1,1 toz/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent tex/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libfoo/1.0.0 of dependent tex/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end tex/1.0.0
+ trace: collect_build_postponed (4): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (4): select cfg-negotiated dependency alternative for dependent tiz/1.0.0
+ trace: collect_build_prerequisites: resume tiz/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tiz/1.0.0
+ trace: postponed_configurations::add: add {tiz 2,1: libbar} to {bax tex tez toz | libbox->{bax/2,1 tez/1,1} libbar->{bax/2,1 tex/1,1 tez/3,1 toz/2,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent tiz/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent tiz/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end tiz/1.0.0
+ trace: collect_build_postponed (4): cfg-negotiate end {tiz^ | tex->{tiz/1,1}}!
+ trace: collect_build_postponed (4): end {tiz^ | tex->{tiz/1,1}}
+ trace: collect_build_postponed (3): end {bax toz | libbox->{bax/2,1} libbar->{bax/2,1 toz/2,1}}
+ trace: collect_build_postponed (2): end {toz | libfoo->{toz/1,1}}
+ trace: collect_build_postponed (1): end {tvz | toz->{tvz/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: pkg_build: try to replace unsatisfactory dependency toz/0.2.0 with some other version
+ trace: try_replace_dependent: try to replace unsatisfied dependent tez/1.0.0 of dependency toz/0.2.0 with some other version
+ trace: try_replace_dependent: try to replace conflicting dependent tvz/1.0.0 of dependency toz/0.2.0 with some other version
+ trace: try_replace_dependency: replacement tvz/0.1.0 tried earlier for same command line, skipping
+ error: unable to satisfy constraints on package toz
+ info: tvz/1.0.0 depends on (toz == 0.2.0)
+ info: tez/1.0.0 depends on (toz == 0.1.0)
+ info: available toz/0.2.0
+ info: available toz/0.1.0
+ info: while satisfying tez/1.0.0
+ info: explicitly specify toz version to manually satisfy both constraints
+ %.*
+ EOE
+
+ $pkg_drop bax tiz tez
+ }
+
+ : merge-config-version-replacement
+ :
+ {
+ $clone_cfg;
+
+ # Dependencies:
+ #
+ # box: depends: {libbar libfoo} (c) | libbox
+ #
+ # bax: depends: libfoo(c)
+ # depends: {libbox libbar} (c)
+ #
+ # toz: depends: libbaz(c)
+ # depends: libfoo(c)
+ # depends: libbar(c)
+ #
+ # tez: depends: libbox(c)
+ # depends: toz == 0.1.0 (c)
+ # depends: libbar(c)
+ #
+ # toz/0.1.0:
+ #
+ $* box bax toz --verbose 1 2>!;
+
+ $pkg_status -r >>EOO;
+ !box configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured 1.0.0
+ !bax configured 1.0.0
+ libbar configured 1.0.0
+ libbox configured 1.0.0
+ libfoo configured 1.0.0
+ !toz configured 1.0.0
+ libbar configured 1.0.0
+ libbaz configured 1.0.0
+ libfoo configured 1.0.0
+ EOO
+
+ $* tez 2>&1 | $filter 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add tez/1.0.0
+ trace: collect_build_prerequisites: begin tez/1.0.0
+ trace: collect_build: add libbox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: create {tez | libbox->{tez/1,1}}
+ trace: collect_build_prerequisites: postpone tez/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_prerequisites: pre-reeval bax/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bax/1.0.0: 1,1
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {tez | libbox->{tez/1,1}}
+ trace: collect_build: add bax/1.0.0
+ trace: collect_build_prerequisites: reeval bax/1.0.0
+ trace: postponed_configurations::add: create {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent bax/1.0.0 results in {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluated bax/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbox/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tez/1.0.0
+ trace: collect_build_prerequisites: resume tez/1.0.0
+ trace: collect_build: add toz/0.1.0
+ warning: package tez dependency on (toz == 0.1.0) is forcing downgrade of toz/1.0.0 to 0.1.0
+ trace: collect_build_prerequisites: cfg-postpone dependency toz/0.1.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: create {tez | toz->{tez/2,1}}
+ trace: collect_build_prerequisites: postpone tez/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {tez | libbox->{tez/1,1}}!
+ trace: collect_build_postponed (2): begin {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: pre-reeval box/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated box/1.0.0: 1,1
+ trace: collect_build_postponed (2): skip being built existing dependent bax of dependency libfoo
+ trace: collect_build_postponed (2): skip being built existing dependent toz of dependency libfoo
+ trace: collect_build_postponed (2): re-evaluate existing dependents for {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build: add box/1.0.0
+ trace: collect_build_prerequisites: reeval box/1.0.0
+ trace: collect_build: add libbar/1.0.0
+ trace: postponed_configurations::add: add {box^ 1,1: libbar libfoo} to {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent box/1.0.0 results in {bax^ box^ | libfoo->{bax/1,1 box/1,1} libbar->{box/1,1}}
+ trace: collect_build_prerequisites: re-evaluated box/1.0.0
+ trace: collect_build_postponed (2): skip being built existing dependent box of dependency libbar
+ trace: collect_build_postponed (2): skip being built existing dependent bax of dependency libbar
+ trace: collect_build_postponed (2): skip being built existing dependent toz of dependency libbar
+ trace: collect_build_postponed (2): cfg-negotiate begin {bax^ box^ | libfoo->{bax/1,1 box/1,1} libbar->{box/1,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libfoo/1.0.0
+ trace: collect_build_prerequisites: skip configured libbar/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent bax/1.0.0
+ trace: collect_build_prerequisites: resume bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bax/1.0.0
+ trace: postponed_configurations::add: add {bax 2,1: libbox libbar} to {tez | libbox->{tez/1,1}}!
+ trace: postponed_configurations::add: merge {bax^ box^ | libfoo->{bax/1,1 box/1,1} libbar->{box/1,1}}? into {bax tez | libbox->{bax/2,1 tez/1,1} libbar->{bax/2,1}}!
+ trace: collect_build_prerequisites: cfg-postponing dependent bax/1.0.0 merges non-negotiated and/or being negotiated configurations in and results in {bax box^ tez | libbox->{bax/2,1 tez/1,1} libbar->{bax/2,1 box/1,1} libfoo->{bax/1,1 box/1,1}}!, throwing merge_configuration
+ trace: collect_build_postponed (0): cfg-negotiation of {tez | libbox->{tez/1,1}} failed due to non-negotiated clusters, force-merging based on shadow cluster {bax box^ tez | libbox->{bax/2,1 tez/1,1} libbar->{bax/2,1 box/1,1} libfoo->{bax/1,1 box/1,1}}!
+ trace: collect_build_postponed (1): begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_prerequisites: pre-reeval bax/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bax/1.0.0: 1,1
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {tez | libbox->{tez/1,1}}
+ trace: collect_build: add bax/1.0.0
+ trace: collect_build_prerequisites: reeval bax/1.0.0
+ trace: postponed_configurations::add: add {bax^ 1,1: libfoo} to {tez | libbox->{tez/1,1}} (shadow cluster-based)
+ trace: collect_build_prerequisites: re-evaluating dependent bax/1.0.0 results in {bax^ tez | libbox->{tez/1,1} libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluated bax/1.0.0
+ trace: collect_build_prerequisites: pre-reeval box/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated box/1.0.0: 1,1
+ trace: collect_build_postponed (1): skip being built existing dependent bax of dependency libfoo
+ trace: collect_build_prerequisites: pre-reeval toz/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated toz/1.0.0: 1,1
+ trace: collect_build: add libbaz/1.0.0
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {bax^ tez | libbox->{tez/1,1} libfoo->{bax/1,1}}
+ trace: collect_build: add box/1.0.0
+ trace: collect_build_prerequisites: reeval box/1.0.0
+ trace: collect_build: add libbar/1.0.0
+ trace: postponed_configurations::add: add {box^ 1,1: libbar libfoo} to {bax^ tez | libbox->{tez/1,1} libfoo->{bax/1,1}} (shadow cluster-based)
+ trace: collect_build_prerequisites: re-evaluating dependent box/1.0.0 results in {bax^ box^ tez | libbox->{tez/1,1} libfoo->{bax/1,1 box/1,1} libbar->{box/1,1}}
+ trace: collect_build_prerequisites: re-evaluated box/1.0.0
+ trace: collect_build: add toz/1.0.0
+ trace: collect_build_prerequisites: reeval toz/1.0.0
+ trace: postponed_configurations::add: create {toz^ | libbaz->{toz/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent toz/1.0.0 results in {toz^ | libbaz->{toz/1,1}}
+ trace: collect_build_prerequisites: re-evaluated toz/1.0.0
+ trace: collect_build_postponed (1): skip being built existing dependent box of dependency libbar
+ trace: collect_build_postponed (1): skip being built existing dependent bax of dependency libbar
+ trace: collect_build_postponed (1): skip being built existing dependent toz of dependency libbar
+ trace: collect_build_postponed (1): cfg-negotiate begin {bax^ box^ tez | libbox->{tez/1,1} libfoo->{bax/1,1 box/1,1} libbar->{box/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbox/1.0.0
+ trace: collect_build_prerequisites: skip configured libfoo/1.0.0
+ trace: collect_build_prerequisites: skip configured libbar/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent bax/1.0.0
+ trace: collect_build_prerequisites: resume bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bax/1.0.0
+ trace: postponed_configurations::add: add {bax 2,1: libbox libbar} to {bax^ box^ tez | libbox->{tez/1,1} libfoo->{bax/1,1 box/1,1} libbar->{box/1,1}}? (shadow cluster-based)
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent bax/1.0.0 is shadow-negotiated
+ trace: collect_build_prerequisites: dependency libbox/1.0.0 of dependent bax/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent bax/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end bax/1.0.0
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent box/1.0.0
+ trace: collect_build_prerequisites: resume box/1.0.0
+ trace: collect_build_prerequisites: end box/1.0.0
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tez/1.0.0
+ trace: collect_build_prerequisites: resume tez/1.0.0
+ trace: collect_build: pick toz/0.1.0 over toz/1.0.0
+ trace: collect_build: toz/1.0.0 package version needs to be replaced with toz/0.1.0
+ trace: pkg_build: collection failed due to package version replacement, retry from scratch
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add tez/1.0.0
+ trace: collect_build_prerequisites: begin tez/1.0.0
+ trace: collect_build: add libbox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: create {tez | libbox->{tez/1,1}}
+ trace: collect_build_prerequisites: postpone tez/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_prerequisites: pre-reeval bax/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bax/1.0.0: 1,1
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {tez | libbox->{tez/1,1}}
+ trace: collect_build: add bax/1.0.0
+ trace: collect_build_prerequisites: reeval bax/1.0.0
+ trace: postponed_configurations::add: create {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent bax/1.0.0 results in {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluated bax/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbox/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tez/1.0.0
+ trace: collect_build_prerequisites: resume tez/1.0.0
+ trace: collect_build: apply version replacement for toz/0.1.0
+ trace: collect_build: replacement: toz/0.1.0
+ trace: collect_build: add toz/0.1.0
+ warning: package tez dependency on (toz == 0.1.0) is forcing downgrade of toz/1.0.0 to 0.1.0
+ trace: collect_build_prerequisites: cfg-postpone dependency toz/0.1.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: create {tez | toz->{tez/2,1}}
+ trace: collect_build_prerequisites: postpone tez/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {tez | libbox->{tez/1,1}}!
+ trace: collect_build_postponed (2): begin {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: pre-reeval box/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated box/1.0.0: 1,1
+ trace: collect_build_postponed (2): skip being built existing dependent bax of dependency libfoo
+ trace: collect_build_postponed (2): skip being built existing dependent toz of dependency libfoo
+ trace: collect_build_postponed (2): re-evaluate existing dependents for {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build: add box/1.0.0
+ trace: collect_build_prerequisites: reeval box/1.0.0
+ trace: collect_build: add libbar/1.0.0
+ trace: postponed_configurations::add: add {box^ 1,1: libbar libfoo} to {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent box/1.0.0 results in {bax^ box^ | libfoo->{bax/1,1 box/1,1} libbar->{box/1,1}}
+ trace: collect_build_prerequisites: re-evaluated box/1.0.0
+ trace: collect_build_postponed (2): skip being built existing dependent box of dependency libbar
+ trace: collect_build_postponed (2): skip being built existing dependent bax of dependency libbar
+ trace: collect_build_postponed (2): skip being built existing dependent toz of dependency libbar
+ trace: collect_build_postponed (2): cfg-negotiate begin {bax^ box^ | libfoo->{bax/1,1 box/1,1} libbar->{box/1,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libfoo/1.0.0
+ trace: collect_build_prerequisites: skip configured libbar/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent bax/1.0.0
+ trace: collect_build_prerequisites: resume bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bax/1.0.0
+ trace: postponed_configurations::add: add {bax 2,1: libbox libbar} to {tez | libbox->{tez/1,1}}!
+ trace: postponed_configurations::add: merge {bax^ box^ | libfoo->{bax/1,1 box/1,1} libbar->{box/1,1}}? into {bax tez | libbox->{bax/2,1 tez/1,1} libbar->{bax/2,1}}!
+ trace: collect_build_prerequisites: cfg-postponing dependent bax/1.0.0 merges non-negotiated and/or being negotiated configurations in and results in {bax box^ tez | libbox->{bax/2,1 tez/1,1} libbar->{bax/2,1 box/1,1} libfoo->{bax/1,1 box/1,1}}!, throwing merge_configuration
+ trace: collect_build_postponed (0): cfg-negotiation of {tez | libbox->{tez/1,1}} failed due to non-negotiated clusters, force-merging based on shadow cluster {bax box^ tez | libbox->{bax/2,1 tez/1,1} libbar->{bax/2,1 box/1,1} libfoo->{bax/1,1 box/1,1}}!
+ trace: collect_build_postponed (1): begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_prerequisites: pre-reeval bax/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bax/1.0.0: 1,1
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {tez | libbox->{tez/1,1}}
+ trace: collect_build: add bax/1.0.0
+ trace: collect_build_prerequisites: reeval bax/1.0.0
+ trace: postponed_configurations::add: add {bax^ 1,1: libfoo} to {tez | libbox->{tez/1,1}} (shadow cluster-based)
+ trace: collect_build_prerequisites: re-evaluating dependent bax/1.0.0 results in {bax^ tez | libbox->{tez/1,1} libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluated bax/1.0.0
+ trace: collect_build_prerequisites: pre-reeval box/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated box/1.0.0: 1,1
+ trace: collect_build_postponed (1): skip being built existing dependent bax of dependency libfoo
+ trace: collect_build_postponed (1): skip expected to be built existing dependent toz of dependency libfoo
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {bax^ tez | libbox->{tez/1,1} libfoo->{bax/1,1}}
+ trace: collect_build: add box/1.0.0
+ trace: collect_build_prerequisites: reeval box/1.0.0
+ trace: collect_build: add libbar/1.0.0
+ trace: postponed_configurations::add: add {box^ 1,1: libbar libfoo} to {bax^ tez | libbox->{tez/1,1} libfoo->{bax/1,1}} (shadow cluster-based)
+ trace: collect_build_prerequisites: re-evaluating dependent box/1.0.0 results in {bax^ box^ tez | libbox->{tez/1,1} libfoo->{bax/1,1 box/1,1} libbar->{box/1,1}}
+ trace: collect_build_prerequisites: re-evaluated box/1.0.0
+ trace: collect_build_postponed (1): skip being built existing dependent box of dependency libbar
+ trace: collect_build_postponed (1): skip being built existing dependent bax of dependency libbar
+ trace: collect_build_postponed (1): skip expected to be built existing dependent toz of dependency libbar
+ trace: collect_build_postponed (1): cfg-negotiate begin {bax^ box^ tez | libbox->{tez/1,1} libfoo->{bax/1,1 box/1,1} libbar->{box/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbox/1.0.0
+ trace: collect_build_prerequisites: skip configured libfoo/1.0.0
+ trace: collect_build_prerequisites: skip configured libbar/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent bax/1.0.0
+ trace: collect_build_prerequisites: resume bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bax/1.0.0
+ trace: postponed_configurations::add: add {bax 2,1: libbox libbar} to {bax^ box^ tez | libbox->{tez/1,1} libfoo->{bax/1,1 box/1,1} libbar->{box/1,1}}? (shadow cluster-based)
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent bax/1.0.0 is shadow-negotiated
+ trace: collect_build_prerequisites: dependency libbox/1.0.0 of dependent bax/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent bax/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end bax/1.0.0
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent box/1.0.0
+ trace: collect_build_prerequisites: resume box/1.0.0
+ trace: collect_build_prerequisites: end box/1.0.0
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tez/1.0.0
+ trace: collect_build_prerequisites: resume tez/1.0.0
+ trace: collect_build: apply version replacement for toz/0.1.0
+ trace: collect_build: replacement: toz/0.1.0
+ trace: collect_build: add toz/0.1.0
+ warning: package tez dependency on (toz == 0.1.0) is forcing downgrade of toz/1.0.0 to 0.1.0
+ trace: collect_build_prerequisites: cfg-postpone dependency toz/0.1.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: create {tez | toz->{tez/2,1}}
+ trace: collect_build_prerequisites: postpone tez/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {bax^ box^ tez | libbox->{bax/2,1 tez/1,1} libfoo->{bax/1,1 box/1,1} libbar->{bax/2,1 box/1,1}}!
+ trace: collect_build_postponed (2): begin {tez | toz->{tez/2,1}}
+ trace: collect_build_postponed (2): cfg-negotiate begin {tez | toz->{tez/2,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin toz/0.1.0
+ trace: collect_build_prerequisites: end toz/0.1.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent tez/1.0.0
+ trace: collect_build_prerequisites: resume tez/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: add {tez 3,1: libbar} to {bax^ box^ tez | libbox->{bax/2,1 tez/1,1} libfoo->{bax/1,1 box/1,1} libbar->{bax/2,1 box/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent tez/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent tez/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end tez/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {tez | toz->{tez/2,1}}!
+ trace: collect_build_postponed (2): end {tez | toz->{tez/2,1}}
+ trace: collect_build_postponed (1): end {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: evaluate_dependency: libbaz/1.0.0: unused
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ %.*
+ trace: collect_build: add tez/1.0.0
+ trace: collect_build_prerequisites: begin tez/1.0.0
+ trace: collect_build: add libbox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: create {tez | libbox->{tez/1,1}}
+ trace: collect_build_prerequisites: postpone tez/1.0.0
+ trace: collect_drop: add libbaz
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_prerequisites: pre-reeval bax/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bax/1.0.0: 1,1
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {tez | libbox->{tez/1,1}}
+ trace: collect_build: add bax/1.0.0
+ trace: collect_build_prerequisites: reeval bax/1.0.0
+ trace: postponed_configurations::add: create {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent bax/1.0.0 results in {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluated bax/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbox/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tez/1.0.0
+ trace: collect_build_prerequisites: resume tez/1.0.0
+ trace: collect_build: add toz/0.1.0
+ warning: package tez dependency on (toz == 0.1.0) is forcing downgrade of toz/1.0.0 to 0.1.0
+ trace: collect_build_prerequisites: cfg-postpone dependency toz/0.1.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: create {tez | toz->{tez/2,1}}
+ trace: collect_build_prerequisites: postpone tez/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {tez | libbox->{tez/1,1}}!
+ trace: collect_build_postponed (2): begin {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: pre-reeval box/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated box/1.0.0: 1,1
+ trace: collect_build_postponed (2): skip being built existing dependent bax of dependency libfoo
+ trace: collect_build_postponed (2): skip being built existing dependent toz of dependency libfoo
+ trace: collect_build_postponed (2): re-evaluate existing dependents for {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build: add box/1.0.0
+ trace: collect_build_prerequisites: reeval box/1.0.0
+ trace: collect_build: add libbar/1.0.0
+ trace: postponed_configurations::add: add {box^ 1,1: libbar libfoo} to {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent box/1.0.0 results in {bax^ box^ | libfoo->{bax/1,1 box/1,1} libbar->{box/1,1}}
+ trace: collect_build_prerequisites: re-evaluated box/1.0.0
+ trace: collect_build_postponed (2): skip being built existing dependent box of dependency libbar
+ trace: collect_build_postponed (2): skip being built existing dependent bax of dependency libbar
+ trace: collect_build_postponed (2): skip being built existing dependent toz of dependency libbar
+ trace: collect_build_postponed (2): cfg-negotiate begin {bax^ box^ | libfoo->{bax/1,1 box/1,1} libbar->{box/1,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libfoo/1.0.0
+ trace: collect_build_prerequisites: skip configured libbar/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent bax/1.0.0
+ trace: collect_build_prerequisites: resume bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bax/1.0.0
+ trace: postponed_configurations::add: add {bax 2,1: libbox libbar} to {tez | libbox->{tez/1,1}}!
+ trace: postponed_configurations::add: merge {bax^ box^ | libfoo->{bax/1,1 box/1,1} libbar->{box/1,1}}? into {bax tez | libbox->{bax/2,1 tez/1,1} libbar->{bax/2,1}}!
+ trace: collect_build_prerequisites: cfg-postponing dependent bax/1.0.0 merges non-negotiated and/or being negotiated configurations in and results in {bax box^ tez | libbox->{bax/2,1 tez/1,1} libbar->{bax/2,1 box/1,1} libfoo->{bax/1,1 box/1,1}}!, throwing merge_configuration
+ trace: collect_build_postponed (0): cfg-negotiation of {tez | libbox->{tez/1,1}} failed due to non-negotiated clusters, force-merging based on shadow cluster {bax box^ tez | libbox->{bax/2,1 tez/1,1} libbar->{bax/2,1 box/1,1} libfoo->{bax/1,1 box/1,1}}!
+ trace: collect_build_postponed (1): begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_prerequisites: pre-reeval bax/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bax/1.0.0: 1,1
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {tez | libbox->{tez/1,1}}
+ trace: collect_build: add bax/1.0.0
+ trace: collect_build_prerequisites: reeval bax/1.0.0
+ trace: postponed_configurations::add: add {bax^ 1,1: libfoo} to {tez | libbox->{tez/1,1}} (shadow cluster-based)
+ trace: collect_build_prerequisites: re-evaluating dependent bax/1.0.0 results in {bax^ tez | libbox->{tez/1,1} libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluated bax/1.0.0
+ trace: collect_build_prerequisites: pre-reeval box/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated box/1.0.0: 1,1
+ trace: collect_build_postponed (1): skip being built existing dependent bax of dependency libfoo
+ trace: collect_build_prerequisites: pre-reeval toz/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated toz/1.0.0: 1,1
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {bax^ tez | libbox->{tez/1,1} libfoo->{bax/1,1}}
+ trace: collect_build: add box/1.0.0
+ trace: collect_build_prerequisites: reeval box/1.0.0
+ trace: collect_build: add libbar/1.0.0
+ trace: postponed_configurations::add: add {box^ 1,1: libbar libfoo} to {bax^ tez | libbox->{tez/1,1} libfoo->{bax/1,1}} (shadow cluster-based)
+ trace: collect_build_prerequisites: re-evaluating dependent box/1.0.0 results in {bax^ box^ tez | libbox->{tez/1,1} libfoo->{bax/1,1 box/1,1} libbar->{box/1,1}}
+ trace: collect_build_prerequisites: re-evaluated box/1.0.0
+ trace: collect_build: add toz/1.0.0
+ trace: collect_build_prerequisites: reeval toz/1.0.0
+ trace: postponed_configurations::add: create {toz^ | libbaz->{toz/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent toz/1.0.0 results in {toz^ | libbaz->{toz/1,1}}
+ trace: collect_build_prerequisites: re-evaluated toz/1.0.0
+ trace: collect_build_postponed (1): skip being built existing dependent box of dependency libbar
+ trace: collect_build_postponed (1): skip being built existing dependent bax of dependency libbar
+ trace: collect_build_postponed (1): skip being built existing dependent toz of dependency libbar
+ trace: collect_build_postponed (1): cfg-negotiate begin {bax^ box^ tez | libbox->{tez/1,1} libfoo->{bax/1,1 box/1,1} libbar->{box/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbox/1.0.0
+ trace: collect_build_prerequisites: skip configured libfoo/1.0.0
+ trace: collect_build_prerequisites: skip configured libbar/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent bax/1.0.0
+ trace: collect_build_prerequisites: resume bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bax/1.0.0
+ trace: postponed_configurations::add: add {bax 2,1: libbox libbar} to {bax^ box^ tez | libbox->{tez/1,1} libfoo->{bax/1,1 box/1,1} libbar->{box/1,1}}? (shadow cluster-based)
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent bax/1.0.0 is shadow-negotiated
+ trace: collect_build_prerequisites: dependency libbox/1.0.0 of dependent bax/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent bax/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end bax/1.0.0
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent box/1.0.0
+ trace: collect_build_prerequisites: resume box/1.0.0
+ trace: collect_build_prerequisites: end box/1.0.0
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tez/1.0.0
+ trace: collect_build_prerequisites: resume tez/1.0.0
+ trace: collect_build: pick toz/0.1.0 over toz/1.0.0
+ trace: collect_build: toz/1.0.0 package version needs to be replaced with toz/0.1.0
+ trace: pkg_build: collection failed due to package version replacement, retry from scratch
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add tez/1.0.0
+ trace: collect_build_prerequisites: begin tez/1.0.0
+ trace: collect_build: add libbox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: create {tez | libbox->{tez/1,1}}
+ trace: collect_build_prerequisites: postpone tez/1.0.0
+ trace: collect_drop: add libbaz
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_prerequisites: pre-reeval bax/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bax/1.0.0: 1,1
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {tez | libbox->{tez/1,1}}
+ trace: collect_build: add bax/1.0.0
+ trace: collect_build_prerequisites: reeval bax/1.0.0
+ trace: postponed_configurations::add: create {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent bax/1.0.0 results in {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluated bax/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbox/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tez/1.0.0
+ trace: collect_build_prerequisites: resume tez/1.0.0
+ trace: collect_build: apply version replacement for toz/0.1.0
+ trace: collect_build: replacement: toz/0.1.0
+ trace: collect_build: add toz/0.1.0
+ warning: package tez dependency on (toz == 0.1.0) is forcing downgrade of toz/1.0.0 to 0.1.0
+ trace: collect_build_prerequisites: cfg-postpone dependency toz/0.1.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: create {tez | toz->{tez/2,1}}
+ trace: collect_build_prerequisites: postpone tez/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {tez | libbox->{tez/1,1}}!
+ trace: collect_build_postponed (2): begin {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: pre-reeval box/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated box/1.0.0: 1,1
+ trace: collect_build_postponed (2): skip being built existing dependent bax of dependency libfoo
+ trace: collect_build_postponed (2): skip being built existing dependent toz of dependency libfoo
+ trace: collect_build_postponed (2): re-evaluate existing dependents for {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build: add box/1.0.0
+ trace: collect_build_prerequisites: reeval box/1.0.0
+ trace: collect_build: add libbar/1.0.0
+ trace: postponed_configurations::add: add {box^ 1,1: libbar libfoo} to {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent box/1.0.0 results in {bax^ box^ | libfoo->{bax/1,1 box/1,1} libbar->{box/1,1}}
+ trace: collect_build_prerequisites: re-evaluated box/1.0.0
+ trace: collect_build_postponed (2): skip being built existing dependent box of dependency libbar
+ trace: collect_build_postponed (2): skip being built existing dependent bax of dependency libbar
+ trace: collect_build_postponed (2): skip being built existing dependent toz of dependency libbar
+ trace: collect_build_postponed (2): cfg-negotiate begin {bax^ box^ | libfoo->{bax/1,1 box/1,1} libbar->{box/1,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libfoo/1.0.0
+ trace: collect_build_prerequisites: skip configured libbar/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent bax/1.0.0
+ trace: collect_build_prerequisites: resume bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bax/1.0.0
+ trace: postponed_configurations::add: add {bax 2,1: libbox libbar} to {tez | libbox->{tez/1,1}}!
+ trace: postponed_configurations::add: merge {bax^ box^ | libfoo->{bax/1,1 box/1,1} libbar->{box/1,1}}? into {bax tez | libbox->{bax/2,1 tez/1,1} libbar->{bax/2,1}}!
+ trace: collect_build_prerequisites: cfg-postponing dependent bax/1.0.0 merges non-negotiated and/or being negotiated configurations in and results in {bax box^ tez | libbox->{bax/2,1 tez/1,1} libbar->{bax/2,1 box/1,1} libfoo->{bax/1,1 box/1,1}}!, throwing merge_configuration
+ trace: collect_build_postponed (0): cfg-negotiation of {tez | libbox->{tez/1,1}} failed due to non-negotiated clusters, force-merging based on shadow cluster {bax box^ tez | libbox->{bax/2,1 tez/1,1} libbar->{bax/2,1 box/1,1} libfoo->{bax/1,1 box/1,1}}!
+ trace: collect_build_postponed (1): begin {tez | libbox->{tez/1,1}}
+ trace: collect_build_prerequisites: pre-reeval bax/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bax/1.0.0: 1,1
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {tez | libbox->{tez/1,1}}
+ trace: collect_build: add bax/1.0.0
+ trace: collect_build_prerequisites: reeval bax/1.0.0
+ trace: postponed_configurations::add: add {bax^ 1,1: libfoo} to {tez | libbox->{tez/1,1}} (shadow cluster-based)
+ trace: collect_build_prerequisites: re-evaluating dependent bax/1.0.0 results in {bax^ tez | libbox->{tez/1,1} libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluated bax/1.0.0
+ trace: collect_build_prerequisites: pre-reeval box/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated box/1.0.0: 1,1
+ trace: collect_build_postponed (1): skip being built existing dependent bax of dependency libfoo
+ trace: collect_build_postponed (1): skip expected to be built existing dependent toz of dependency libfoo
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {bax^ tez | libbox->{tez/1,1} libfoo->{bax/1,1}}
+ trace: collect_build: add box/1.0.0
+ trace: collect_build_prerequisites: reeval box/1.0.0
+ trace: collect_build: add libbar/1.0.0
+ trace: postponed_configurations::add: add {box^ 1,1: libbar libfoo} to {bax^ tez | libbox->{tez/1,1} libfoo->{bax/1,1}} (shadow cluster-based)
+ trace: collect_build_prerequisites: re-evaluating dependent box/1.0.0 results in {bax^ box^ tez | libbox->{tez/1,1} libfoo->{bax/1,1 box/1,1} libbar->{box/1,1}}
+ trace: collect_build_prerequisites: re-evaluated box/1.0.0
+ trace: collect_build_postponed (1): skip being built existing dependent box of dependency libbar
+ trace: collect_build_postponed (1): skip being built existing dependent bax of dependency libbar
+ trace: collect_build_postponed (1): skip expected to be built existing dependent toz of dependency libbar
+ trace: collect_build_postponed (1): cfg-negotiate begin {bax^ box^ tez | libbox->{tez/1,1} libfoo->{bax/1,1 box/1,1} libbar->{box/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbox/1.0.0
+ trace: collect_build_prerequisites: skip configured libfoo/1.0.0
+ trace: collect_build_prerequisites: skip configured libbar/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent bax/1.0.0
+ trace: collect_build_prerequisites: resume bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bax/1.0.0
+ trace: postponed_configurations::add: add {bax 2,1: libbox libbar} to {bax^ box^ tez | libbox->{tez/1,1} libfoo->{bax/1,1 box/1,1} libbar->{box/1,1}}? (shadow cluster-based)
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent bax/1.0.0 is shadow-negotiated
+ trace: collect_build_prerequisites: dependency libbox/1.0.0 of dependent bax/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent bax/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end bax/1.0.0
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent box/1.0.0
+ trace: collect_build_prerequisites: resume box/1.0.0
+ trace: collect_build_prerequisites: end box/1.0.0
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent tez/1.0.0
+ trace: collect_build_prerequisites: resume tez/1.0.0
+ trace: collect_build: apply version replacement for toz/0.1.0
+ trace: collect_build: replacement: toz/0.1.0
+ trace: collect_build: add toz/0.1.0
+ warning: package tez dependency on (toz == 0.1.0) is forcing downgrade of toz/1.0.0 to 0.1.0
+ trace: collect_build_prerequisites: cfg-postpone dependency toz/0.1.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: create {tez | toz->{tez/2,1}}
+ trace: collect_build_prerequisites: postpone tez/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {bax^ box^ tez | libbox->{bax/2,1 tez/1,1} libfoo->{bax/1,1 box/1,1} libbar->{bax/2,1 box/1,1}}!
+ trace: collect_build_postponed (2): begin {tez | toz->{tez/2,1}}
+ trace: collect_build_postponed (2): cfg-negotiate begin {tez | toz->{tez/2,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin toz/0.1.0
+ trace: collect_build_prerequisites: end toz/0.1.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent tez/1.0.0
+ trace: collect_build_prerequisites: resume tez/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent tez/1.0.0
+ trace: postponed_configurations::add: add {tez 3,1: libbar} to {bax^ box^ tez | libbox->{bax/2,1 tez/1,1} libfoo->{bax/1,1 box/1,1} libbar->{bax/2,1 box/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent tez/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent tez/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end tez/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {tez | toz->{tez/2,1}}!
+ trace: collect_build_postponed (2): end {tez | toz->{tez/2,1}}
+ trace: collect_build_postponed (1): end {tez | libbox->{tez/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ build plan:
+ drop libbaz/1.0.0 (unused)
+ downgrade toz/0.1.0 (required by tez)
+ config.toz.extras=true (set by tez)
+ new tez/1.0.0
+ trace: execute_plan: simulate: no
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !box configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured 1.0.0
+ !bax configured 1.0.0
+ libbar configured 1.0.0
+ libbox configured 1.0.0
+ libfoo configured 1.0.0
+ !toz configured 0.1.0 available 1.0.0 0.2.0
+ !tez configured 1.0.0
+ libbar configured 1.0.0
+ libbox configured 1.0.0
+ !toz configured 0.1.0 available 1.0.0 0.2.0
+ EOO
+
+ $pkg_drop box bax toz tez
+ }
+
+ : recollect-dependent-bogus-dependency-postponement
+ :
+ {
+ $clone_cfg;
+
+ # Dependencies:
+ #
+ # bax: depends: libfoo(c)
+ # depends: {libbox libbar} (c)
+ #
+ # bas: depends: libbar(c)
+ # depends: bus(c)
+ #
+ # bus: depends: libbaz(c)
+ # depends: foo(c)
+ #
+ # foo: depends: libfoo(c)
+ #
+ # bat: depends: libbaz(c)
+ #
+ $* bax bas --verbose 1 2>!;
+
+ $pkg_status -r >>EOO;
+ !bax configured 1.0.0
+ libbar configured 1.0.0
+ libbox configured 1.0.0
+ libfoo configured 1.0.0
+ !bas configured 1.0.0
+ bus configured 1.0.0
+ foo configured 1.0.0
+ libfoo configured 1.0.0
+ libbaz configured 1.0.0
+ libbar configured 1.0.0
+ EOO
+
+ $* bat 2>&1 | $filter 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add bat/1.0.0
+ trace: collect_build_prerequisites: begin bat/1.0.0
+ trace: collect_build: add libbaz/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbaz/1.0.0 of dependent bat/1.0.0
+ trace: postponed_configurations::add: create {bat | libbaz->{bat/1,1}}
+ trace: collect_build_prerequisites: postpone bat/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {bat | libbaz->{bat/1,1}}
+ trace: collect_build_prerequisites: pre-reeval bus/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bus/1.0.0: 1,1
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {bat | libbaz->{bat/1,1}}
+ trace: collect_build: add bus/1.0.0
+ trace: collect_build_prerequisites: reeval bus/1.0.0
+ trace: postponed_configurations::add: add {bus^ 1,1: libbaz} to {bat | libbaz->{bat/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent bus/1.0.0 results in {bat bus^ | libbaz->{bat/1,1 bus/1,1}}
+ trace: collect_build_prerequisites: re-evaluated bus/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {bat bus^ | libbaz->{bat/1,1 bus/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbaz/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent bat/1.0.0
+ trace: collect_build_prerequisites: resume bat/1.0.0
+ trace: collect_build_prerequisites: end bat/1.0.0
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent bus/1.0.0
+ trace: collect_build_prerequisites: resume bus/1.0.0
+ trace: collect_build: add foo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency foo/1.0.0 of dependent bus/1.0.0
+ trace: postponed_configurations::add: create {bus | foo->{bus/2,1}}
+ trace: collect_build_prerequisites: postpone bus/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {bat bus^ | libbaz->{bat/1,1 bus/1,1}}!
+ trace: collect_build_postponed (2): begin {bus | foo->{bus/2,1}}
+ trace: collect_build_postponed (2): skip being built existing dependent bus of dependency foo
+ trace: collect_build_postponed (2): cfg-negotiate begin {bus | foo->{bus/2,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin foo/1.0.0
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent foo/1.0.0
+ trace: postponed_configurations::add: create {foo | libfoo->{foo/1,1}}
+ trace: collect_build_prerequisites: postpone foo/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent bus/1.0.0
+ trace: collect_build_prerequisites: resume bus/1.0.0
+ trace: collect_build_prerequisites: end bus/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {bus | foo->{bus/2,1}}!
+ trace: collect_build_postponed (3): begin {foo | libfoo->{foo/1,1}}
+ trace: collect_build_postponed (3): skip being built existing dependent foo of dependency libfoo
+ trace: collect_build_prerequisites: pre-reeval bax/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bax/1.0.0: 1,1
+ trace: collect_build_postponed (3): re-evaluate existing dependents for {foo | libfoo->{foo/1,1}}
+ trace: collect_build: add bax/1.0.0
+ trace: collect_build_prerequisites: reeval bax/1.0.0
+ trace: postponed_configurations::add: add {bax^ 1,1: libfoo} to {foo | libfoo->{foo/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent bax/1.0.0 results in {bax^ foo | libfoo->{bax/1,1 foo/1,1}}
+ trace: collect_build_prerequisites: re-evaluated bax/1.0.0
+ trace: collect_build_postponed (3): cfg-negotiate begin {bax^ foo | libfoo->{bax/1,1 foo/1,1}}
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libfoo/1.0.0
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent bax/1.0.0
+ trace: collect_build_prerequisites: resume bax/1.0.0
+ trace: collect_build: add libbox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent bax/1.0.0
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bax/1.0.0
+ trace: postponed_configurations::add: create {bax | libbox->{bax/2,1} libbar->{bax/2,1}}
+ trace: collect_build_prerequisites: postpone bax/1.0.0
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent foo/1.0.0
+ trace: collect_build_prerequisites: resume foo/1.0.0
+ trace: collect_build_prerequisites: end foo/1.0.0
+ trace: collect_build_postponed (3): cfg-negotiate end {bax^ foo | libfoo->{bax/1,1 foo/1,1}}!
+ trace: collect_build_postponed (4): begin {bax | libbox->{bax/2,1} libbar->{bax/2,1}}
+ trace: collect_build_postponed (4): skip being built existing dependent bax of dependency libbox
+ trace: collect_build_postponed (4): skip being built existing dependent bax of dependency libbar
+ trace: collect_build_prerequisites: pre-reeval bas/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bas/1.0.0: 1,1
+ trace: collect_build_postponed (4): re-evaluate existing dependents for {bax | libbox->{bax/2,1} libbar->{bax/2,1}}
+ trace: collect_build: add bas/1.0.0
+ trace: collect_build_prerequisites: reeval bas/1.0.0
+ trace: postponed_configurations::add: add {bas^ 1,1: libbar} to {bax | libbox->{bax/2,1} libbar->{bax/2,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent bas/1.0.0 results in {bas^ bax | libbox->{bax/2,1} libbar->{bas/1,1 bax/2,1}}
+ trace: collect_build_prerequisites: re-evaluated bas/1.0.0
+ trace: collect_build_postponed (4): cfg-negotiate begin {bas^ bax | libbox->{bax/2,1} libbar->{bas/1,1 bax/2,1}}
+ trace: collect_build_postponed (4): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbox/1.0.0
+ trace: collect_build_prerequisites: skip configured libbar/1.0.0
+ trace: collect_build_postponed (4): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (4): select cfg-negotiated dependency alternative for dependent bas/1.0.0
+ trace: collect_build_prerequisites: resume bas/1.0.0
+ trace: collect_build_prerequisites: cannot cfg-postpone dependency bus/1.0.0 of dependent bas/1.0.0 (collected prematurely), throwing postpone_dependency
+ trace: pkg_build: collection failed due to prematurely collected dependency (bus), retry from scratch
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add bat/1.0.0
+ trace: collect_build_prerequisites: begin bat/1.0.0
+ trace: collect_build: add libbaz/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbaz/1.0.0 of dependent bat/1.0.0
+ trace: postponed_configurations::add: create {bat | libbaz->{bat/1,1}}
+ trace: collect_build_prerequisites: postpone bat/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {bat | libbaz->{bat/1,1}}
+ trace: collect_build_prerequisites: pre-reeval bus/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bus/1.0.0: 1,1
+ trace: collect_build_postponed (1): skip dep-postponed existing dependent bus of dependency libbaz
+ trace: collect_build: add bus/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {bat | libbaz->{bat/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbaz/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent bat/1.0.0
+ trace: collect_build_prerequisites: resume bat/1.0.0
+ trace: collect_build_prerequisites: end bat/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {bat | libbaz->{bat/1,1}}!
+ trace: collect_build_prerequisites: pre-reeval bas/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bas/1.0.0: 1,1
+ trace: collect_build_postponed (1): schedule re-collection of existing dependent bas/1.0.0 due to bogus postponement of dependency bus
+ trace: collect_build: add bas/1.0.0
+ trace: collect_build_prerequisites: begin bas/1.0.0
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bas/1.0.0
+ trace: postponed_configurations::add: create {bas | libbar->{bas/1,1}}
+ trace: collect_build_prerequisites: postpone bas/1.0.0
+ trace: collect_build_postponed (2): begin {bas | libbar->{bas/1,1}}
+ trace: collect_build_prerequisites: pre-reeval bax/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bax/1.0.0: 1,1
+ trace: collect_build_postponed (2): skip being built existing dependent bas of dependency libbar
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_postponed (2): re-evaluate existing dependents for {bas | libbar->{bas/1,1}}
+ trace: collect_build: add bax/1.0.0
+ trace: collect_build_prerequisites: reeval bax/1.0.0
+ trace: postponed_configurations::add: create {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent bax/1.0.0 results in {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluated bax/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate begin {bas | libbar->{bas/1,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbar/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent bas/1.0.0
+ trace: collect_build_prerequisites: resume bas/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency bus/1.0.0 of dependent bas/1.0.0
+ trace: postponed_configurations::add: create {bas | bus->{bas/2,1}}
+ trace: collect_build_prerequisites: postpone bas/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {bas | libbar->{bas/1,1}}!
+ trace: collect_build_postponed (3): begin {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: pre-reeval foo/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated foo/1.0.0: 1,1
+ trace: collect_build_postponed (3): skip being built existing dependent bax of dependency libfoo
+ trace: collect_build_postponed (3): re-evaluate existing dependents for {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build: add foo/1.0.0
+ trace: collect_build_prerequisites: reeval foo/1.0.0
+ trace: postponed_configurations::add: add {foo^ 1,1: libfoo} to {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent foo/1.0.0 results in {bax^ foo^ | libfoo->{bax/1,1 foo/1,1}}
+ trace: collect_build_prerequisites: re-evaluated foo/1.0.0
+ trace: collect_build_postponed (3): cfg-negotiate begin {bax^ foo^ | libfoo->{bax/1,1 foo/1,1}}
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libfoo/1.0.0
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent bax/1.0.0
+ trace: collect_build_prerequisites: resume bax/1.0.0
+ trace: collect_build: add libbox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bax/1.0.0
+ trace: postponed_configurations::add: add {bax 2,1: libbox libbar} to {bas | libbar->{bas/1,1}}!
+ trace: collect_build_prerequisites: skip being built existing dependent bax of dependency libbox
+ trace: collect_build_prerequisites: cfg-postponing dependent bax/1.0.0 involves (being) negotiated configurations and results in {bas bax | libbar->{bas/1,1 bax/2,1} libbox->{bax/2,1}}!, throwing retry_configuration
+ trace: collect_build_postponed (1): cfg-negotiation of {bas | libbar->{bas/1,1}} failed due to dependent bax, refining configuration
+ trace: collect_build_postponed (2): begin {bas | libbar->{bas/1,1}}
+ trace: collect_build_prerequisites: pre-reeval bax/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bax/1.0.0: 1,1
+ trace: collect_build_postponed (2): skip being built existing dependent bas of dependency libbar
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_postponed (2): re-evaluate existing dependents for {bas | libbar->{bas/1,1}}
+ trace: collect_build: add bax/1.0.0
+ trace: collect_build_prerequisites: reeval bax/1.0.0
+ trace: postponed_configurations::add: create {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent bax/1.0.0 results in {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluated bax/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate begin {bas | libbar->{bas/1,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbar/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent bas/1.0.0
+ trace: collect_build_prerequisites: resume bas/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency bus/1.0.0 of dependent bas/1.0.0
+ trace: postponed_configurations::add: create {bas | bus->{bas/2,1}}
+ trace: collect_build_prerequisites: postpone bas/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {bas | libbar->{bas/1,1}}!
+ trace: collect_build_postponed (3): begin {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: pre-reeval foo/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated foo/1.0.0: 1,1
+ trace: collect_build_postponed (3): skip being built existing dependent bax of dependency libfoo
+ trace: collect_build_postponed (3): re-evaluate existing dependents for {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build: add foo/1.0.0
+ trace: collect_build_prerequisites: reeval foo/1.0.0
+ trace: postponed_configurations::add: add {foo^ 1,1: libfoo} to {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent foo/1.0.0 results in {bax^ foo^ | libfoo->{bax/1,1 foo/1,1}}
+ trace: collect_build_prerequisites: re-evaluated foo/1.0.0
+ trace: collect_build_postponed (3): cfg-negotiate begin {bax^ foo^ | libfoo->{bax/1,1 foo/1,1}}
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libfoo/1.0.0
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent bax/1.0.0
+ trace: collect_build_prerequisites: resume bax/1.0.0
+ trace: collect_build: add libbox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bax/1.0.0
+ trace: postponed_configurations::add: add {bax 2,1: libbox libbar} to {bas | libbar->{bas/1,1}}!
+ trace: collect_build_prerequisites: skip being built existing dependent bax of dependency libbox
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent bax/1.0.0 is negotiated
+ trace: collect_build_prerequisites: collecting cfg-postponed dependency libbox/1.0.0 of dependent bax/1.0.0
+ trace: collect_build_prerequisites: skip configured libbox/1.0.0
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent bax/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end bax/1.0.0
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent foo/1.0.0
+ trace: collect_build_prerequisites: resume foo/1.0.0
+ trace: collect_build_prerequisites: end foo/1.0.0
+ trace: collect_build_postponed (3): cfg-negotiate end {bax^ foo^ | libfoo->{bax/1,1 foo/1,1}}!
+ trace: collect_build_postponed (4): begin {bas | bus->{bas/2,1}}
+ trace: collect_build_postponed (4): skip being built existing dependent bas of dependency bus
+ trace: collect_build_postponed (4): cfg-negotiate begin {bas | bus->{bas/2,1}}
+ trace: collect_build_postponed (4): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin bus/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbaz/1.0.0 of dependent bus/1.0.0
+ trace: postponed_configurations::add: add {bus 1,1: libbaz} to {bat | libbaz->{bat/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent bus/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbaz/1.0.0 of dependent bus/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: cannot cfg-postpone dependency foo/1.0.0 of dependent bus/1.0.0 (collected prematurely), throwing postpone_dependency
+ trace: pkg_build: collection failed due to prematurely collected dependency (foo), retry from scratch
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add bat/1.0.0
+ trace: collect_build_prerequisites: begin bat/1.0.0
+ trace: collect_build: add libbaz/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbaz/1.0.0 of dependent bat/1.0.0
+ trace: postponed_configurations::add: create {bat | libbaz->{bat/1,1}}
+ trace: collect_build_prerequisites: postpone bat/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {bat | libbaz->{bat/1,1}}
+ trace: collect_build_prerequisites: pre-reeval bus/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bus/1.0.0: 1,1
+ trace: collect_build_postponed (1): skip dep-postponed existing dependent bus of dependency libbaz
+ trace: collect_build: add bus/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {bat | libbaz->{bat/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbaz/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent bat/1.0.0
+ trace: collect_build_prerequisites: resume bat/1.0.0
+ trace: collect_build_prerequisites: end bat/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {bat | libbaz->{bat/1,1}}!
+ trace: collect_build_prerequisites: pre-reeval bas/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bas/1.0.0: 1,1
+ trace: collect_build_postponed (1): schedule re-collection of existing dependent bas/1.0.0 due to bogus postponement of dependency bus
+ trace: collect_build: add bas/1.0.0
+ trace: collect_build_prerequisites: begin bas/1.0.0
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bas/1.0.0
+ trace: postponed_configurations::add: create {bas | libbar->{bas/1,1}}
+ trace: collect_build_prerequisites: postpone bas/1.0.0
+ trace: collect_build_postponed (2): begin {bas | libbar->{bas/1,1}}
+ trace: collect_build_prerequisites: pre-reeval bax/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bax/1.0.0: 1,1
+ trace: collect_build_postponed (2): skip being built existing dependent bas of dependency libbar
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_postponed (2): re-evaluate existing dependents for {bas | libbar->{bas/1,1}}
+ trace: collect_build: add bax/1.0.0
+ trace: collect_build_prerequisites: reeval bax/1.0.0
+ trace: postponed_configurations::add: create {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent bax/1.0.0 results in {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluated bax/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate begin {bas | libbar->{bas/1,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbar/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent bas/1.0.0
+ trace: collect_build_prerequisites: resume bas/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency bus/1.0.0 of dependent bas/1.0.0
+ trace: postponed_configurations::add: create {bas | bus->{bas/2,1}}
+ trace: collect_build_prerequisites: postpone bas/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {bas | libbar->{bas/1,1}}!
+ trace: collect_build_postponed (3): begin {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: pre-reeval foo/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated foo/1.0.0: 1,1
+ trace: collect_build_postponed (3): skip being built existing dependent bax of dependency libfoo
+ trace: collect_build_postponed (3): skip dep-postponed existing dependent foo of dependency libfoo
+ trace: collect_build: add foo/1.0.0
+ trace: collect_build_postponed (3): cfg-negotiate begin {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libfoo/1.0.0
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent bax/1.0.0
+ trace: collect_build_prerequisites: resume bax/1.0.0
+ trace: collect_build: add libbox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bax/1.0.0
+ trace: postponed_configurations::add: add {bax 2,1: libbox libbar} to {bas | libbar->{bas/1,1}}!
+ trace: collect_build_prerequisites: skip being built existing dependent bax of dependency libbox
+ trace: collect_build_prerequisites: cfg-postponing dependent bax/1.0.0 involves (being) negotiated configurations and results in {bas bax | libbar->{bas/1,1 bax/2,1} libbox->{bax/2,1}}!, throwing retry_configuration
+ trace: collect_build_postponed (1): cfg-negotiation of {bas | libbar->{bas/1,1}} failed due to dependent bax, refining configuration
+ trace: collect_build_postponed (2): begin {bas | libbar->{bas/1,1}}
+ trace: collect_build_prerequisites: pre-reeval bax/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bax/1.0.0: 1,1
+ trace: collect_build_postponed (2): skip being built existing dependent bas of dependency libbar
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_postponed (2): re-evaluate existing dependents for {bas | libbar->{bas/1,1}}
+ trace: collect_build: add bax/1.0.0
+ trace: collect_build_prerequisites: reeval bax/1.0.0
+ trace: postponed_configurations::add: create {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent bax/1.0.0 results in {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluated bax/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate begin {bas | libbar->{bas/1,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbar/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent bas/1.0.0
+ trace: collect_build_prerequisites: resume bas/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency bus/1.0.0 of dependent bas/1.0.0
+ trace: postponed_configurations::add: create {bas | bus->{bas/2,1}}
+ trace: collect_build_prerequisites: postpone bas/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {bas | libbar->{bas/1,1}}!
+ trace: collect_build_postponed (3): begin {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: pre-reeval foo/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated foo/1.0.0: 1,1
+ trace: collect_build_postponed (3): skip being built existing dependent bax of dependency libfoo
+ trace: collect_build_postponed (3): skip dep-postponed existing dependent foo of dependency libfoo
+ trace: collect_build: add foo/1.0.0
+ trace: collect_build_postponed (3): cfg-negotiate begin {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libfoo/1.0.0
+ trace: collect_build_postponed (3): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (3): select cfg-negotiated dependency alternative for dependent bax/1.0.0
+ trace: collect_build_prerequisites: resume bax/1.0.0
+ trace: collect_build: add libbox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent bax/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bax/1.0.0
+ trace: postponed_configurations::add: add {bax 2,1: libbox libbar} to {bas | libbar->{bas/1,1}}!
+ trace: collect_build_prerequisites: skip being built existing dependent bax of dependency libbox
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent bax/1.0.0 is negotiated
+ trace: collect_build_prerequisites: collecting cfg-postponed dependency libbox/1.0.0 of dependent bax/1.0.0
+ trace: collect_build_prerequisites: skip configured libbox/1.0.0
+ trace: collect_build_prerequisites: dependency libbar/1.0.0 of dependent bax/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end bax/1.0.0
+ trace: collect_build_postponed (3): cfg-negotiate end {bax^ | libfoo->{bax/1,1}}!
+ trace: collect_build_postponed (4): begin {bas | bus->{bas/2,1}}
+ trace: collect_build_postponed (4): skip being built existing dependent bas of dependency bus
+ trace: collect_build_postponed (4): cfg-negotiate begin {bas | bus->{bas/2,1}}
+ trace: collect_build_postponed (4): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin bus/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbaz/1.0.0 of dependent bus/1.0.0
+ trace: postponed_configurations::add: add {bus 1,1: libbaz} to {bat | libbaz->{bat/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent bus/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libbaz/1.0.0 of dependent bus/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: cfg-postpone dependency foo/1.0.0 of dependent bus/1.0.0
+ trace: postponed_configurations::add: create {bus | foo->{bus/2,1}}
+ trace: collect_build_prerequisites: postpone bus/1.0.0
+ trace: collect_build_postponed (4): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (4): select cfg-negotiated dependency alternative for dependent bas/1.0.0
+ trace: collect_build_prerequisites: resume bas/1.0.0
+ trace: collect_build_prerequisites: end bas/1.0.0
+ trace: collect_build_postponed (4): cfg-negotiate end {bas | bus->{bas/2,1}}!
+ trace: collect_build_postponed (5): begin {bus | foo->{bus/2,1}}
+ trace: collect_build_postponed (5): skip being built existing dependent bus of dependency foo
+ trace: collect_build_postponed (5): cfg-negotiate begin {bus | foo->{bus/2,1}}
+ trace: collect_build_postponed (5): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin foo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent foo/1.0.0
+ trace: postponed_configurations::add: add {foo 1,1: libfoo} to {bax^ | libfoo->{bax/1,1}}!
+ trace: collect_build_prerequisites: configuration for cfg-postponed dependencies of dependent foo/1.0.0 is negotiated
+ trace: collect_build_prerequisites: dependency libfoo/1.0.0 of dependent foo/1.0.0 is already (being) recursively collected, skipping
+ trace: collect_build_prerequisites: end foo/1.0.0
+ trace: collect_build_postponed (5): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (5): select cfg-negotiated dependency alternative for dependent bus/1.0.0
+ trace: collect_build_prerequisites: resume bus/1.0.0
+ trace: collect_build_prerequisites: end bus/1.0.0
+ trace: collect_build_postponed (5): cfg-negotiate end {bus | foo->{bus/2,1}}!
+ trace: collect_build_postponed (5): end {bus | foo->{bus/2,1}}
+ trace: collect_build_postponed (4): end {bas | bus->{bas/2,1}}
+ trace: collect_build_postponed (3): end {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_postponed (2): end {bas | libbar->{bas/1,1}}
+ trace: collect_build_postponed (1): end {bat | libbaz->{bat/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ build plan:
+ new bat/1.0.0
+ trace: execute_plan: simulate: no
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !bax configured 1.0.0
+ libbar configured 1.0.0
+ libbox configured 1.0.0
+ libfoo configured 1.0.0
+ !bas configured 1.0.0
+ bus configured 1.0.0
+ foo configured 1.0.0
+ libfoo configured 1.0.0
+ libbaz configured 1.0.0
+ libbar configured 1.0.0
+ !bat configured 1.0.0
+ libbaz configured 1.0.0
+ EOO
+
+ $pkg_drop bax bas bat
+ }
+
+ : recollect-pruned-collection
+ :
+ {
+ $clone_cfg;
+
+ $* libbar/0.1.0 libbiz/0.1.0 bax 2>!;
+
+ $pkg_status -r >>EOO;
+ !libbar configured !0.1.0 available 1.0.0
+ !libbiz configured !0.1.0 available 1.0.0
+ !bax configured 1.0.0
+ !libbar configured !0.1.0 available 1.0.0
+ libbox configured 1.0.0
+ libfoo configured 1.0.0
+ EOO
+
+ $* --upgrade --immediate 2>&1 | $filter 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build: add libbiz/1.0.0
+ trace: collect_build: add bax/1.0.0
+ trace: collect_build_prerequisites: pre-reeval bax/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bax/1.0.0: 1,1
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of existing dependent bax/1.0.0 due to dependency libbar/1.0.0
+ trace: postponed_configurations::add: create {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: begin libbiz/1.0.0
+ trace: collect_build: pick libbar/1.0.0 over libbar/0.1.0
+ trace: collect_build_prerequisites: no cfg-clause for dependency libbar/1.0.0 of dependent libbiz/1.0.0
+ trace: collect_build_prerequisites: begin libbar/1.0.0
+ trace: collect_build_prerequisites: end libbar/1.0.0
+ trace: collect_build_prerequisites: end libbiz/1.0.0
+ trace: collect_build_prerequisites: skip configured bax/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: pre-reeval bax/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bax/1.0.0: 1,1
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: reeval bax/1.0.0
+ trace: postponed_configurations::add: add {bax^ 1,1: libfoo} to {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent bax/1.0.0 results in {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: re-evaluated bax/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {bax^ | libfoo->{bax/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libfoo/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent bax/1.0.0
+ trace: collect_build_prerequisites: resume bax/1.0.0
+ trace: collect_build: add libbox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent bax/1.0.0
+ trace: collect_build: pick libbar/1.0.0 over libbar/0.1.0
+ trace: collect_build_prerequisites: cannot cfg-postpone dependency libbar/1.0.0 of dependent bax/1.0.0 (collected prematurely), throwing postpone_dependency
+ trace: pkg_build: collection failed due to prematurely collected dependency (libbar), retry from scratch
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build: add libbiz/1.0.0
+ trace: collect_build: add bax/1.0.0
+ trace: pkg_build: dep-postpone user-specified libbar
+ trace: collect_build_prerequisites: begin libbiz/1.0.0
+ trace: collect_build: pick libbar/1.0.0 over libbar/0.1.0
+ trace: collect_build_prerequisites: dep-postpone dependency libbar/1.0.0 of dependent libbiz/1.0.0
+ trace: collect_build_prerequisites: end libbiz/1.0.0
+ trace: collect_build_prerequisites: skip configured bax/1.0.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_prerequisites: pre-reeval bax/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated bax/1.0.0: 1,1
+ trace: collect_build_postponed (0): schedule re-collection of existing dependent bax/1.0.0 due to bogus postponement of dependency libbar
+ trace: collect_build_prerequisites: begin bax/1.0.0
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent bax/1.0.0
+ trace: postponed_configurations::add: create {bax | libfoo->{bax/1,1}}
+ trace: collect_build_prerequisites: postpone bax/1.0.0
+ trace: collect_build_postponed (1): begin {bax | libfoo->{bax/1,1}}
+ trace: collect_build_postponed (1): skip being built existing dependent bax of dependency libfoo
+ trace: collect_build_postponed (1): cfg-negotiate begin {bax | libfoo->{bax/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libfoo/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent bax/1.0.0
+ trace: collect_build_prerequisites: resume bax/1.0.0
+ trace: collect_build: add libbox/1.0.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbox/1.0.0 of dependent bax/1.0.0
+ trace: collect_build: pick libbar/1.0.0 over libbar/0.1.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libbar/1.0.0 of dependent bax/1.0.0
+ trace: postponed_configurations::add: create {bax | libbox->{bax/2,1} libbar->{bax/2,1}}
+ trace: collect_build_prerequisites: postpone bax/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate end {bax | libfoo->{bax/1,1}}!
+ trace: collect_build_postponed (2): begin {bax | libbox->{bax/2,1} libbar->{bax/2,1}}
+ trace: collect_build_postponed (2): skip being built existing dependent bax of dependency libbox
+ trace: collect_build_postponed (2): skip being built existing dependent bax of dependency libbar
+ trace: collect_build_postponed (2): cfg-negotiate begin {bax | libbox->{bax/2,1} libbar->{bax/2,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbox/1.0.0
+ trace: collect_build_prerequisites: begin libbar/1.0.0
+ trace: collect_build_prerequisites: end libbar/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent bax/1.0.0
+ trace: collect_build_prerequisites: resume bax/1.0.0
+ trace: collect_build_prerequisites: end bax/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {bax | libbox->{bax/2,1} libbar->{bax/2,1}}!
+ trace: collect_build_postponed (2): end {bax | libbox->{bax/2,1} libbar->{bax/2,1}}
+ trace: collect_build_postponed (1): end {bax | libfoo->{bax/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ build plan:
+ upgrade libbar/1.0.0
+ config.libbar.extras=true (set by bax)
+ upgrade libbiz/1.0.0
+ reconfigure/update bax/1.0.0
+ config.bax.libfoo_extras=true (set by bax)
+ trace: execute_plan: simulate: no
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !libbar configured 1.0.0
+ !libbiz configured 1.0.0
+ !libbar configured 1.0.0
+ !bax configured 1.0.0
+ !libbar configured 1.0.0
+ libbox configured 1.0.0
+ libfoo configured 1.0.0
+ EOO
+
+ $pkg_drop libbar libbiz bax
+ }
+
+ : optimization
+ :
+ {
+ +$clone_cfg
+
+ : dependency-after-config-clause
+ :
+ {
+ $clone_cfg;
+
+ $* tax ?libfoo/0.1.0 2>!;
+
+ $pkg_status -r >>EOO;
+ !tax configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured !0.1.0 available 1.0.0
+ EOO
+
+ $* ?libfoo 2>&1 | $filter 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: execute_plan: simulate: yes
+ trace: evaluate_dependency: libfoo/0.1.0: update to libfoo/1.0.0
+ trace: pkg_build: refine package collection/plan execution
+ trace: collect_build_prerequisites: pre-reeval tax/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tax/1.0.0: 1,1 re-evaluation is optional
+ trace: collect_build_prerequisites: begin libfoo/1.0.0
+ trace: collect_build_prerequisites: end libfoo/1.0.0
+ trace: execute_plan: simulate: yes
+ %.*
+ build plan:
+ upgrade libfoo/1.0.0
+ reconfigure tax (dependent of libfoo)
+ trace: execute_plan: simulate: no
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !tax configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured 1.0.0
+ EOO
+
+ $pkg_drop tax
+ }
+
+ : dependency-before-config-clause
+ :
+ {
+ $clone_cfg;
+
+ $* tex/0.2.0 ?libbar/0.1.0 2>!;
+
+ $pkg_status -r >>EOO;
+ !tex configured !0.2.0 available 1.0.0 0.3.0
+ libbar configured !0.1.0 available 1.0.0
+ libfoo configured 1.0.0
+ EOO
+
+ $* ?libbar 2>&1 | $filter 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: execute_plan: simulate: yes
+ trace: evaluate_dependency: libbar/0.1.0: update to libbar/1.0.0
+ trace: pkg_build: refine package collection/plan execution
+ trace: collect_build_prerequisites: pre-reeval tex/0.2.0
+ trace: collect_build_prerequisites: pre-reevaluated tex/0.2.0: 2,1 re-evaluation is optional
+ trace: collect_build_prerequisites: begin libbar/1.0.0
+ trace: collect_build_prerequisites: end libbar/1.0.0
+ trace: execute_plan: simulate: yes
+ %.*
+ build plan:
+ upgrade libbar/1.0.0
+ reconfigure tex (dependent of libbar)
+ trace: execute_plan: simulate: no
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !tex configured !0.2.0 available 1.0.0 0.3.0
+ libbar configured 1.0.0
+ libfoo configured 1.0.0
+ EOO
+
+ $pkg_drop tex
+ }
+
+ : other-config-clause
+ :
+ {
+ $clone_cfg;
+
+ $* tax ?libfoo/0.1.0 2>!;
+
+ $pkg_status -r >>EOO;
+ !tax configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured !0.1.0 available 1.0.0
+ EOO
+
+ $* libfoo foo/0.1.0 2>&1 | $filter 2>>~%EOE%;
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build: add foo/0.1.0
+ trace: collect_build_prerequisites: pre-reeval tax/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tax/1.0.0: 1,1 re-evaluation is optional
+ trace: collect_build_prerequisites: begin libfoo/1.0.0
+ trace: collect_build_prerequisites: end libfoo/1.0.0
+ trace: collect_build_prerequisites: begin foo/0.1.0
+ trace: collect_build: pick libfoo/1.0.0 over libfoo/0.1.0
+ trace: collect_build_prerequisites: cannot cfg-postpone dependency libfoo/1.0.0 of dependent foo/0.1.0 (collected prematurely), throwing postpone_dependency
+ trace: pkg_build: collection failed due to prematurely collected dependency (libfoo), retry from scratch
+ trace: pkg_build: refine package collection/plan execution from scratch
+ trace: collect_build: add libfoo/1.0.0
+ trace: collect_build: add foo/0.1.0
+ trace: pkg_build: dep-postpone user-specified libfoo
+ trace: collect_build_prerequisites: begin foo/0.1.0
+ trace: collect_build: pick libfoo/1.0.0 over libfoo/0.1.0
+ trace: collect_build_prerequisites: cfg-postpone dependency libfoo/1.0.0 of dependent foo/0.1.0
+ trace: postponed_configurations::add: create {foo | libfoo->{foo/1,1}}
+ trace: collect_build_prerequisites: postpone foo/0.1.0
+ trace: collect_build_postponed (0): begin
+ trace: collect_build_postponed (1): begin {foo | libfoo->{foo/1,1}}
+ trace: collect_build_prerequisites: pre-reeval tax/1.0.0
+ trace: collect_build_prerequisites: pre-reevaluated tax/1.0.0: 1,1 re-evaluation is optional
+ trace: collect_build: add libbar/1.0.0
+ trace: collect_build_postponed (1): re-evaluate existing dependents for {foo | libfoo->{foo/1,1}}
+ trace: collect_build: add tax/1.0.0
+ trace: collect_build_prerequisites: reeval tax/1.0.0
+ trace: postponed_configurations::add: create {tax^ | libbar->{tax/1,1}}
+ trace: collect_build_prerequisites: re-evaluating dependent tax/1.0.0 results in {tax^ | libbar->{tax/1,1}}
+ trace: collect_build_prerequisites: re-evaluated tax/1.0.0
+ trace: collect_build_postponed (1): cfg-negotiate begin {foo | libfoo->{foo/1,1}}
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: begin libfoo/1.0.0
+ trace: collect_build_prerequisites: end libfoo/1.0.0
+ trace: collect_build_postponed (1): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (1): select cfg-negotiated dependency alternative for dependent foo/0.1.0
+ trace: collect_build_prerequisites: resume foo/0.1.0
+ trace: collect_build_prerequisites: end foo/0.1.0
+ trace: collect_build_postponed (1): cfg-negotiate end {foo | libfoo->{foo/1,1}}!
+ trace: collect_build_postponed (2): begin {tax^ | libbar->{tax/1,1}}
+ trace: collect_build_postponed (2): skip being built existing dependent tax of dependency libbar
+ trace: collect_build_postponed (2): cfg-negotiate begin {tax^ | libbar->{tax/1,1}}
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependencies
+ trace: collect_build_prerequisites: skip configured libbar/1.0.0
+ trace: collect_build_postponed (2): recursively collect cfg-negotiated dependents
+ trace: collect_build_postponed (2): select cfg-negotiated dependency alternative for dependent tax/1.0.0
+ trace: collect_build_prerequisites: resume tax/1.0.0
+ trace: collect_build: pick libfoo/1.0.0 over libfoo/0.1.0
+ trace: collect_build_prerequisites: dep-postpone dependency libfoo/1.0.0 of dependent tax/1.0.0
+ trace: collect_build_prerequisites: end tax/1.0.0
+ trace: collect_build_postponed (2): cfg-negotiate end {tax^ | libbar->{tax/1,1}}!
+ trace: collect_build_postponed (2): end {tax^ | libbar->{tax/1,1}}
+ trace: collect_build_postponed (1): end {foo | libfoo->{foo/1,1}}
+ trace: collect_build_postponed (0): end
+ trace: execute_plan: simulate: yes
+ %.*
+ build plan:
+ upgrade libfoo/1.0.0
+ config.libfoo.extras=true (set by foo)
+ reconfigure tax/1.0.0 (dependent of libfoo)
+ new foo/0.1.0
+ trace: execute_plan: simulate: no
+ %.*
+ EOE
+
+ $pkg_status -r >>EOO;
+ !libfoo configured 1.0.0
+ !tax configured 1.0.0
+ libbar configured 1.0.0
+ !libfoo configured 1.0.0
+ !foo configured !0.1.0 available 1.0.0 0.2.0
+ !libfoo configured 1.0.0
+ EOO
+
+ $pkg_drop tax foo libfoo
+ }
+ }
+
+ : repo-packages
+ :
+ : Don't match the tracing but just make sure that pkg-build doesn't crash
+ : or hang and ends up with an expected packages setup.
+ :
+ {
+ +$clone_cfg
+
+ # Note that we don't mach the configuration negotiation flows in the
+ # tests of this scope.
+ #
+ test.arguments += --verbose 1
+
+ # Convert specific warnings to infos since we expect them to
+ # appear. This, in particular, prevents bbot workers to set task result
+ # status to warning.
+ #
+ warn_to_info = [cmdline] sed -e 's/warning: (package .* is forcing .*)/info: \1/' >&2
+
+ : new-all
+ :
+ {
+ $clone_cfg;
+
+ $* libfoo libbar ?libbaz/0.1.0 ?libbox/0.1.0 libbiz \
+ foo fox fux fix fex bar baz bac bat bas bus \
+ box bax bux bix bex boo biz buz buc tax tex \
+ tix tiz toz tez tuz tux dex dix diz dox 2>!;
+
+ $pkg_status -r >>EOO;
+ !libfoo configured 1.0.0
+ !libbar configured 1.0.0
+ !bat configured 1.0.0
+ libbaz configured !0.1.0 available 1.0.0
+ !tix configured 0.1.0 available 1.0.0
+ !toz configured 0.1.0 available 1.0.0 0.2.0
+ !tuz configured 1.0.0
+ !toz configured 0.1.0 available 1.0.0 0.2.0
+ !tux configured 1.0.0
+ libbox configured !0.1.0 available 1.0.0
+ !tix configured 0.1.0 available 1.0.0
+ !libbiz configured 1.0.0
+ !libbar configured 1.0.0
+ !bar configured 1.0.0
+ !libbar configured 1.0.0
+ !bux configured 1.0.0
+ !libbar configured 1.0.0
+ !bex configured 1.0.0
+ !libbar configured 1.0.0
+ !boo configured 1.0.0
+ !libbar configured 1.0.0
+ !biz configured 1.0.0
+ !boo configured 1.0.0
+ !libbar configured 1.0.0
+ !buz configured 1.0.0
+ !bux configured 1.0.0
+ !libbar configured 1.0.0
+ !tez configured 1.0.0
+ !libbar configured 1.0.0
+ libbox configured !0.1.0 available 1.0.0
+ !toz configured 0.1.0 available 1.0.0 0.2.0
+ !bix configured 1.0.0
+ !bar configured 1.0.0
+ !libbar configured 1.0.0
+ !bux configured 1.0.0
+ !libbar configured 1.0.0
+ !libbar configured 1.0.0
+ !foo configured 1.0.0
+ !libfoo configured 1.0.0
+ !fox configured 1.0.0
+ !libfoo configured 1.0.0
+ !fux configured 1.0.0
+ !libfoo configured 1.0.0
+ !baz configured 1.0.0
+ !libbar configured 1.0.0
+ !libfoo configured 1.0.0
+ !bac configured 1.0.0
+ !libbar configured 1.0.0
+ libbaz configured !0.1.0 available 1.0.0
+ !libfoo configured 1.0.0
+ !fix configured 1.0.0
+ !foo configured 1.0.0
+ !libfoo configured 1.0.0
+ !fex configured 1.0.0
+ !foo configured 1.0.0
+ !libfoo configured 1.0.0
+ !libfoo configured 1.0.0
+ !bus configured 1.0.0
+ !foo configured 1.0.0
+ !libfoo configured 1.0.0
+ libbaz configured !0.1.0 available 1.0.0
+ !bas configured 1.0.0
+ !bus configured 1.0.0
+ !foo configured 1.0.0
+ !libfoo configured 1.0.0
+ libbaz configured !0.1.0 available 1.0.0
+ !libbar configured 1.0.0
+ !box configured 1.0.0
+ !libbar configured 1.0.0
+ !libfoo configured 1.0.0
+ !bax configured 1.0.0
+ !libbar configured 1.0.0
+ libbox configured !0.1.0 available 1.0.0
+ !libfoo configured 1.0.0
+ !buc configured 1.0.0
+ !bux configured 1.0.0
+ !libbar configured 1.0.0
+ !libfoo configured 1.0.0
+ !tax configured 1.0.0
+ !libbar configured 1.0.0
+ !libfoo configured 1.0.0
+ !tex configured 1.0.0
+ !libbar configured 1.0.0
+ !libfoo configured 1.0.0
+ !tiz configured 1.0.0
+ !libbar configured 1.0.0
+ !tex configured 1.0.0
+ !libbar configured 1.0.0
+ !libfoo configured 1.0.0
+ !dex configured 1.0.0
+ !bar configured 1.0.0
+ !libbar configured 1.0.0
+ !libfoo configured 1.0.0
+ !dox configured 1.0.0
+ !dex configured 1.0.0
+ !bar configured 1.0.0
+ !libbar configured 1.0.0
+ !libfoo configured 1.0.0
+ !dix configured 1.0.0
+ !dox configured 1.0.0
+ !dex configured 1.0.0
+ !bar configured 1.0.0
+ !libbar configured 1.0.0
+ !libfoo configured 1.0.0
+ !libbar configured 1.0.0
+ libbox configured !0.1.0 available 1.0.0
+ !diz configured 1.0.0
+ !dox configured 1.0.0
+ !dex configured 1.0.0
+ !bar configured 1.0.0
+ !libbar configured 1.0.0
+ !libfoo configured 1.0.0
+ !libbar configured 1.0.0
+ libbox configured !0.1.0 available 1.0.0
+ EOO
+
+ $pkg_drop libfoo libbar libbiz foo fox fux fix fex bar baz bac bat \
+ bas bus box bax bux bix bex boo biz buz buc tax tex tix tiz \
+ toz tez tuz tux dex dix diz dox
+ }
+
+ : upgrade
+ :
+ {
+ +$clone_cfg
+
+ : recursive
+ :
+ {
+ +$clone_cfg
+
+ : libs
+ :
+ {
+ $clone_cfg;
+
+ $* ?libfoo/0.1.0 libbar/0.1.0 ?libbaz/0.1.0 ?libbox/0.1.0 \
+ libbiz/0.1.0 foo fox fux fix fex bar baz bac bat bas bus \
+ box bax bux bix bex boo biz buz buc tax tex tix tiz toz \
+ tez tuz tux dex dix diz dox 2>!;
+
+ $pkg_status -r >>EOO;
+ !libbar configured !0.1.0 available 1.0.0
+ !libbiz configured !0.1.0 available 1.0.0
+ !bat configured 1.0.0
+ libbaz configured !0.1.0 available 1.0.0
+ !tix configured 0.1.0 available 1.0.0
+ !toz configured 0.1.0 available 1.0.0 0.2.0
+ !tuz configured 1.0.0
+ !toz configured 0.1.0 available 1.0.0 0.2.0
+ !tux configured 1.0.0
+ libbox configured !0.1.0 available 1.0.0
+ !tix configured 0.1.0 available 1.0.0
+ !bar configured 1.0.0
+ !libbar configured !0.1.0 available 1.0.0
+ !bux configured 1.0.0
+ !libbar configured !0.1.0 available 1.0.0
+ !bex configured 1.0.0
+ !libbar configured !0.1.0 available 1.0.0
+ !boo configured 1.0.0
+ !libbar configured !0.1.0 available 1.0.0
+ !biz configured 1.0.0
+ !boo configured 1.0.0
+ !libbar configured !0.1.0 available 1.0.0
+ !buz configured 1.0.0
+ !bux configured 1.0.0
+ !libbar configured !0.1.0 available 1.0.0
+ !tez configured 1.0.0
+ !libbar configured !0.1.0 available 1.0.0
+ libbox configured !0.1.0 available 1.0.0
+ !toz configured 0.1.0 available 1.0.0 0.2.0
+ !bix configured 1.0.0
+ !bar configured 1.0.0
+ !libbar configured !0.1.0 available 1.0.0
+ !bux configured 1.0.0
+ !libbar configured !0.1.0 available 1.0.0
+ !libbar configured !0.1.0 available 1.0.0
+ !foo configured 1.0.0
+ libfoo configured !0.1.0 available 1.0.0
+ !fox configured 1.0.0
+ libfoo configured !0.1.0 available 1.0.0
+ !fux configured 1.0.0
+ libfoo configured !0.1.0 available 1.0.0
+ !baz configured 1.0.0
+ !libbar configured !0.1.0 available 1.0.0
+ libfoo configured !0.1.0 available 1.0.0
+ !bac configured 1.0.0
+ !libbar configured !0.1.0 available 1.0.0
+ libbaz configured !0.1.0 available 1.0.0
+ libfoo configured !0.1.0 available 1.0.0
+ !fix configured 1.0.0
+ !foo configured 1.0.0
+ libfoo configured !0.1.0 available 1.0.0
+ !fex configured 1.0.0
+ !foo configured 1.0.0
+ libfoo configured !0.1.0 available 1.0.0
+ libfoo configured !0.1.0 available 1.0.0
+ !bus configured 1.0.0
+ !foo configured 1.0.0
+ libfoo configured !0.1.0 available 1.0.0
+ libbaz configured !0.1.0 available 1.0.0
+ !bas configured 1.0.0
+ !bus configured 1.0.0
+ !foo configured 1.0.0
+ libfoo configured !0.1.0 available 1.0.0
+ libbaz configured !0.1.0 available 1.0.0
+ !libbar configured !0.1.0 available 1.0.0
+ !box configured 1.0.0
+ !libbar configured !0.1.0 available 1.0.0
+ libfoo configured !0.1.0 available 1.0.0
+ !bax configured 1.0.0
+ !libbar configured !0.1.0 available 1.0.0
+ libbox configured !0.1.0 available 1.0.0
+ libfoo configured !0.1.0 available 1.0.0
+ !buc configured 1.0.0
+ !bux configured 1.0.0
+ !libbar configured !0.1.0 available 1.0.0
+ libfoo configured !0.1.0 available 1.0.0
+ !tax configured 1.0.0
+ !libbar configured !0.1.0 available 1.0.0
+ libfoo configured !0.1.0 available 1.0.0
+ !tex configured 1.0.0
+ !libbar configured !0.1.0 available 1.0.0
+ libfoo configured !0.1.0 available 1.0.0
+ !tiz configured 1.0.0
+ !libbar configured !0.1.0 available 1.0.0
+ !tex configured 1.0.0
+ !libbar configured !0.1.0 available 1.0.0
+ libfoo configured !0.1.0 available 1.0.0
+ !dex configured 1.0.0
+ !bar configured 1.0.0
+ !libbar configured !0.1.0 available 1.0.0
+ libfoo configured !0.1.0 available 1.0.0
+ !dox configured 1.0.0
+ !dex configured 1.0.0
+ !bar configured 1.0.0
+ !libbar configured !0.1.0 available 1.0.0
+ libfoo configured !0.1.0 available 1.0.0
+ !dix configured 1.0.0
+ !dox configured 1.0.0
+ !dex configured 1.0.0
+ !bar configured 1.0.0
+ !libbar configured !0.1.0 available 1.0.0
+ libfoo configured !0.1.0 available 1.0.0
+ !libbar configured !0.1.0 available 1.0.0
+ libbox configured !0.1.0 available 1.0.0
+ !diz configured 1.0.0
+ !dox configured 1.0.0
+ !dex configured 1.0.0
+ !bar configured 1.0.0
+ !libbar configured !0.1.0 available 1.0.0
+ libfoo configured !0.1.0 available 1.0.0
+ !libbar configured !0.1.0 available 1.0.0
+ libbox configured !0.1.0 available 1.0.0
+ EOO
+
+ $* --upgrade --recursive 2>&1 | $warn_to_info 2>!;
+
+ $pkg_status -r >>EOO;
+ !libbar configured 1.0.0
+ !libbiz configured 1.0.0
+ !libbar configured 1.0.0
+ !bat configured 1.0.0
+ libbaz configured !1.0.0
+ !tix configured 0.1.0 available 1.0.0
+ !toz configured 0.1.0 available 1.0.0 0.2.0
+ !tuz configured 1.0.0
+ !toz configured 0.1.0 available 1.0.0 0.2.0
+ !tux configured 1.0.0
+ libbox configured !1.0.0
+ !tix configured 0.1.0 available 1.0.0
+ !bar configured 1.0.0
+ !libbar configured 1.0.0
+ !bux configured 1.0.0
+ !libbar configured 1.0.0
+ !bex configured 1.0.0
+ !libbar configured 1.0.0
+ !boo configured 1.0.0
+ !libbar configured 1.0.0
+ !biz configured 1.0.0
+ !boo configured 1.0.0
+ !libbar configured 1.0.0
+ !buz configured 1.0.0
+ !bux configured 1.0.0
+ !libbar configured 1.0.0
+ !tez configured 1.0.0
+ !libbar configured 1.0.0
+ libbox configured !1.0.0
+ !toz configured 0.1.0 available 1.0.0 0.2.0
+ !bix configured 1.0.0
+ !bar configured 1.0.0
+ !libbar configured 1.0.0
+ !bux configured 1.0.0
+ !libbar configured 1.0.0
+ !libbar configured 1.0.0
+ !foo configured 1.0.0
+ libfoo configured !1.0.0
+ !fox configured 1.0.0
+ libfoo configured !1.0.0
+ !fux configured 1.0.0
+ libfoo configured !1.0.0
+ !baz configured 1.0.0
+ !libbar configured 1.0.0
+ libfoo configured !1.0.0
+ !bac configured 1.0.0
+ !libbar configured 1.0.0
+ libbaz configured !1.0.0
+ libfoo configured !1.0.0
+ !fix configured 1.0.0
+ !foo configured 1.0.0
+ libfoo configured !1.0.0
+ !fex configured 1.0.0
+ !foo configured 1.0.0
+ libfoo configured !1.0.0
+ libfoo configured !1.0.0
+ !bus configured 1.0.0
+ !foo configured 1.0.0
+ libfoo configured !1.0.0
+ libbaz configured !1.0.0
+ !bas configured 1.0.0
+ !bus configured 1.0.0
+ !foo configured 1.0.0
+ libfoo configured !1.0.0
+ libbaz configured !1.0.0
+ !libbar configured 1.0.0
+ !box configured 1.0.0
+ !libbar configured 1.0.0
+ libfoo configured !1.0.0
+ !bax configured 1.0.0
+ !libbar configured 1.0.0
+ libbox configured !1.0.0
+ libfoo configured !1.0.0
+ !buc configured 1.0.0
+ !bux configured 1.0.0
+ !libbar configured 1.0.0
+ libfoo configured !1.0.0
+ !tax configured 1.0.0
+ !libbar configured 1.0.0
+ libfoo configured !1.0.0
+ !tex configured 1.0.0
+ !libbar configured 1.0.0
+ libfoo configured !1.0.0
+ !tiz configured 1.0.0
+ !libbar configured 1.0.0
+ !tex configured 1.0.0
+ !libbar configured 1.0.0
+ libfoo configured !1.0.0
+ !dex configured 1.0.0
+ !bar configured 1.0.0
+ !libbar configured 1.0.0
+ libfoo configured !1.0.0
+ !dox configured 1.0.0
+ !dex configured 1.0.0
+ !bar configured 1.0.0
+ !libbar configured 1.0.0
+ libfoo configured !1.0.0
+ !dix configured 1.0.0
+ !dox configured 1.0.0
+ !dex configured 1.0.0
+ !bar configured 1.0.0
+ !libbar configured 1.0.0
+ libfoo configured !1.0.0
+ !libbar configured 1.0.0
+ libbox configured !1.0.0
+ !diz configured 1.0.0
+ !dox configured 1.0.0
+ !dex configured 1.0.0
+ !bar configured 1.0.0
+ !libbar configured 1.0.0
+ libfoo configured !1.0.0
+ !libbar configured 1.0.0
+ libbox configured !1.0.0
+ EOO
+
+ $pkg_drop libfoo libbar libbiz foo fox fux fix fex bar baz bac \
+ bat bas bus box bax bux bix bex boo biz buz buc \
+ tax tex tix tiz toz tez tuz tux dex dix diz dox
+ }
+
+ : all
+ :
+ {
+ $clone_cfg;
+
+ $* ?libfoo/0.1.0 ?libbar/0.1.0 ?libbaz/0.1.0 ?libbox/0.1.0 \
+ ?libbiz/0.1.0 foo/0.1.0 fox/0.1.0 fux/0.1.0 fix/0.1.0 fex/0.1.0 \
+ bar/0.1.0 baz/0.1.0 bac/1.0.0 bat/1.0.0 bas/1.0.0 bus/0.1.0 \
+ box/0.1.0 bax/1.0.0 bux/1.0.0 bix/1.0.0 bex/1.0.0 boo/1.0.0 \
+ biz/0.1.0 buz/1.0.0 buc/1.0.0 tax/1.0.0 tex/0.1.0 tix/0.1.0 \
+ tiz/1.0.0 toz/0.1.0 tez/1.0.0 tuz/1.0.0 tux/1.0.0 dex/1.0.0 \
+ dix/1.0.0 diz/1.0.0 dox/1.0.0 2>!;
+
+ $pkg_status -r >>EOO;
+ !bat configured !1.0.0
+ libbaz configured !0.1.0 available 1.0.0
+ !biz configured !0.1.0 available 1.0.0
+ libbiz configured !0.1.0 available 1.0.0
+ !tix configured !0.1.0 available 1.0.0
+ !toz configured !0.1.0 available 1.0.0 0.2.0
+ !tuz configured !1.0.0
+ !toz configured !0.1.0 available 1.0.0 0.2.0
+ !box configured !0.1.0 available 1.0.0 0.2.0
+ libbox configured !0.1.0 available 1.0.0
+ !tux configured !1.0.0
+ libbox configured !0.1.0 available 1.0.0
+ !tix configured !0.1.0 available 1.0.0
+ !fox configured !0.1.0 available 1.0.0 0.2.0
+ libbar configured !0.1.0 available 1.0.0
+ !bar configured !0.1.0 available 1.0.0
+ libbar configured !0.1.0 available 1.0.0
+ !bux configured !1.0.0
+ libbar configured !0.1.0 available 1.0.0
+ !bex configured !1.0.0
+ libbar configured !0.1.0 available 1.0.0
+ !boo configured !1.0.0
+ libbar configured !0.1.0 available 1.0.0
+ !buz configured !1.0.0
+ !bux configured !1.0.0
+ libbar configured !0.1.0 available 1.0.0
+ !tez configured !1.0.0
+ libbar configured !0.1.0 available 1.0.0
+ libbox configured !0.1.0 available 1.0.0
+ !toz configured !0.1.0 available 1.0.0 0.2.0
+ !bix configured !1.0.0
+ !bar configured !0.1.0 available 1.0.0
+ libbar configured !0.1.0 available 1.0.0
+ !bux configured !1.0.0
+ libbar configured !0.1.0 available 1.0.0
+ libbar configured !0.1.0 available 1.0.0
+ !foo configured !0.1.0 available 1.0.0 0.2.0
+ libfoo configured !0.1.0 available 1.0.0
+ !fux configured !0.1.0 available 1.0.0 0.2.0 0.1.1
+ libfoo configured !0.1.0 available 1.0.0
+ !fex configured !0.1.0 available 1.0.0
+ !fux configured !0.1.0 available 1.0.0 0.2.0 0.1.1
+ libfoo configured !0.1.0 available 1.0.0
+ !baz configured !0.1.0 available 1.0.0
+ libbar configured !0.1.0 available 1.0.0
+ libfoo configured !0.1.0 available 1.0.0
+ !bac configured !1.0.0
+ libbar configured !0.1.0 available 1.0.0
+ libbaz configured !0.1.0 available 1.0.0
+ libfoo configured !0.1.0 available 1.0.0
+ !fix configured !0.1.0 available 1.0.0
+ !foo configured !0.1.0 available 1.0.0 0.2.0
+ libfoo configured !0.1.0 available 1.0.0
+ !bus configured !0.1.0 available 1.0.0
+ !foo configured !0.1.0 available 1.0.0 0.2.0
+ libfoo configured !0.1.0 available 1.0.0
+ !bas configured !1.0.0
+ !bus configured !0.1.0 available 1.0.0
+ !foo configured !0.1.0 available 1.0.0 0.2.0
+ libfoo configured !0.1.0 available 1.0.0
+ libbar configured !0.1.0 available 1.0.0
+ !bax configured !1.0.0
+ libbar configured !0.1.0 available 1.0.0
+ libbox configured !0.1.0 available 1.0.0
+ libfoo configured !0.1.0 available 1.0.0
+ !buc configured !1.0.0
+ !bux configured !1.0.0
+ libbar configured !0.1.0 available 1.0.0
+ libfoo configured !0.1.0 available 1.0.0
+ !tax configured !1.0.0
+ libbar configured !0.1.0 available 1.0.0
+ libfoo configured !0.1.0 available 1.0.0
+ !tex configured !0.1.0 available 1.0.0 0.3.0 0.2.0
+ libfoo configured !0.1.0 available 1.0.0
+ !tiz configured !1.0.0
+ libbar configured !0.1.0 available 1.0.0
+ !tex configured !0.1.0 available 1.0.0 0.3.0 0.2.0
+ libfoo configured !0.1.0 available 1.0.0
+ !dex configured !1.0.0
+ !bar configured !0.1.0 available 1.0.0
+ libbar configured !0.1.0 available 1.0.0
+ libfoo configured !0.1.0 available 1.0.0
+ !dox configured !1.0.0
+ !dex configured !1.0.0
+ !bar configured !0.1.0 available 1.0.0
+ libbar configured !0.1.0 available 1.0.0
+ libfoo configured !0.1.0 available 1.0.0
+ !dix configured !1.0.0
+ !dox configured !1.0.0
+ !dex configured !1.0.0
+ !bar configured !0.1.0 available 1.0.0
+ libbar configured !0.1.0 available 1.0.0
+ libfoo configured !0.1.0 available 1.0.0
+ libbar configured !0.1.0 available 1.0.0
+ libbox configured !0.1.0 available 1.0.0
+ !diz configured !1.0.0
+ !dox configured !1.0.0
+ !dex configured !1.0.0
+ !bar configured !0.1.0 available 1.0.0
+ libbar configured !0.1.0 available 1.0.0
+ libfoo configured !0.1.0 available 1.0.0
+ libbar configured !0.1.0 available 1.0.0
+ libbox configured !0.1.0 available 1.0.0
+ EOO
+
+ $* --upgrade --recursive 2>&1 | $warn_to_info 2>!;
+
+ $pkg_status -r >>EOO;
+ !bat configured 1.0.0
+ libbaz configured !1.0.0
+ !biz configured 1.0.0
+ !boo configured 1.0.0
+ libbar configured !1.0.0
+ !tix configured 0.1.0 available 1.0.0
+ !toz configured 0.1.0 available 1.0.0 0.2.0
+ !tuz configured 1.0.0
+ !toz configured 0.1.0 available 1.0.0 0.2.0
+ !box configured 1.0.0
+ libbar configured !1.0.0
+ libfoo configured !1.0.0
+ !tux configured 1.0.0
+ libbox configured !1.0.0
+ !tix configured 0.1.0 available 1.0.0
+ !fox configured 1.0.0
+ libfoo configured !1.0.0
+ !bar configured 1.0.0
+ libbar configured !1.0.0
+ !bux configured 1.0.0
+ libbar configured !1.0.0
+ !bex configured 1.0.0
+ libbar configured !1.0.0
+ !boo configured 1.0.0
+ libbar configured !1.0.0
+ !buz configured 1.0.0
+ !bux configured 1.0.0
+ libbar configured !1.0.0
+ !tez configured 1.0.0
+ libbar configured !1.0.0
+ libbox configured !1.0.0
+ !toz configured 0.1.0 available 1.0.0 0.2.0
+ !bix configured 1.0.0
+ !bar configured 1.0.0
+ libbar configured !1.0.0
+ !bux configured 1.0.0
+ libbar configured !1.0.0
+ libbar configured !1.0.0
+ !foo configured 1.0.0
+ libfoo configured !1.0.0
+ !fux configured 1.0.0
+ libfoo configured !1.0.0
+ !fex configured 1.0.0
+ !foo configured 1.0.0
+ libfoo configured !1.0.0
+ libfoo configured !1.0.0
+ !baz configured 1.0.0
+ libbar configured !1.0.0
+ libfoo configured !1.0.0
+ !bac configured 1.0.0
+ libbar configured !1.0.0
+ libbaz configured !1.0.0
+ libfoo configured !1.0.0
+ !fix configured 1.0.0
+ !foo configured 1.0.0
+ libfoo configured !1.0.0
+ !bus configured 1.0.0
+ !foo configured 1.0.0
+ libfoo configured !1.0.0
+ libbaz configured !1.0.0
+ !bas configured 1.0.0
+ !bus configured 1.0.0
+ !foo configured 1.0.0
+ libfoo configured !1.0.0
+ libbaz configured !1.0.0
+ libbar configured !1.0.0
+ !bax configured 1.0.0
+ libbar configured !1.0.0
+ libbox configured !1.0.0
+ libfoo configured !1.0.0
+ !buc configured 1.0.0
+ !bux configured 1.0.0
+ libbar configured !1.0.0
+ libfoo configured !1.0.0
+ !tax configured 1.0.0
+ libbar configured !1.0.0
+ libfoo configured !1.0.0
+ !tex configured 1.0.0
+ libbar configured !1.0.0
+ libfoo configured !1.0.0
+ !tiz configured 1.0.0
+ libbar configured !1.0.0
+ !tex configured 1.0.0
+ libbar configured !1.0.0
+ libfoo configured !1.0.0
+ !dex configured 1.0.0
+ !bar configured 1.0.0
+ libbar configured !1.0.0
+ libfoo configured !1.0.0
+ !dox configured 1.0.0
+ !dex configured 1.0.0
+ !bar configured 1.0.0
+ libbar configured !1.0.0
+ libfoo configured !1.0.0
+ !dix configured 1.0.0
+ !dox configured 1.0.0
+ !dex configured 1.0.0
+ !bar configured 1.0.0
+ libbar configured !1.0.0
+ libfoo configured !1.0.0
+ libbar configured !1.0.0
+ libbox configured !1.0.0
+ !diz configured 1.0.0
+ !dox configured 1.0.0
+ !dex configured 1.0.0
+ !bar configured 1.0.0
+ libbar configured !1.0.0
+ libfoo configured !1.0.0
+ libbar configured !1.0.0
+ libbox configured !1.0.0
+ EOO
+
+ $pkg_drop foo fox fux fix fex bar baz bac bat bas bus box bax bux \
+ bix bex boo biz buz buc tax tex tix tiz toz tez tuz tux \
+ dex dix diz dox
+ }
+ }
+
+ : immediate
+ :
+ {
+ +$clone_cfg
+
+ : libs
+ :
+ {
+ $clone_cfg;
+
+ $* ?libfoo/0.1.0 libbar/0.1.0 ?libbaz/0.1.0 ?libbox/0.1.0 \
+ libbiz/0.1.0 foo fox fux fix fex bar baz bac bat bas bus \
+ box bax bux bix bex boo biz buz buc tax tex tix tiz toz \
+ tez tuz tux dex dix diz dox 2>!;
+
+ $pkg_status -r >>EOO;
+ !libbar configured !0.1.0 available 1.0.0
+ !libbiz configured !0.1.0 available 1.0.0
+ !bat configured 1.0.0
+ libbaz configured !0.1.0 available 1.0.0
+ !tix configured 0.1.0 available 1.0.0
+ !toz configured 0.1.0 available 1.0.0 0.2.0
+ !tuz configured 1.0.0
+ !toz configured 0.1.0 available 1.0.0 0.2.0
+ !tux configured 1.0.0
+ libbox configured !0.1.0 available 1.0.0
+ !tix configured 0.1.0 available 1.0.0
+ !bar configured 1.0.0
+ !libbar configured !0.1.0 available 1.0.0
+ !bux configured 1.0.0
+ !libbar configured !0.1.0 available 1.0.0
+ !bex configured 1.0.0
+ !libbar configured !0.1.0 available 1.0.0
+ !boo configured 1.0.0
+ !libbar configured !0.1.0 available 1.0.0
+ !biz configured 1.0.0
+ !boo configured 1.0.0
+ !libbar configured !0.1.0 available 1.0.0
+ !buz configured 1.0.0
+ !bux configured 1.0.0
+ !libbar configured !0.1.0 available 1.0.0
+ !tez configured 1.0.0
+ !libbar configured !0.1.0 available 1.0.0
+ libbox configured !0.1.0 available 1.0.0
+ !toz configured 0.1.0 available 1.0.0 0.2.0
+ !bix configured 1.0.0
+ !bar configured 1.0.0
+ !libbar configured !0.1.0 available 1.0.0
+ !bux configured 1.0.0
+ !libbar configured !0.1.0 available 1.0.0
+ !libbar configured !0.1.0 available 1.0.0
+ !foo configured 1.0.0
+ libfoo configured !0.1.0 available 1.0.0
+ !fox configured 1.0.0
+ libfoo configured !0.1.0 available 1.0.0
+ !fux configured 1.0.0
+ libfoo configured !0.1.0 available 1.0.0
+ !baz configured 1.0.0
+ !libbar configured !0.1.0 available 1.0.0
+ libfoo configured !0.1.0 available 1.0.0
+ !bac configured 1.0.0
+ !libbar configured !0.1.0 available 1.0.0
+ libbaz configured !0.1.0 available 1.0.0
+ libfoo configured !0.1.0 available 1.0.0
+ !fix configured 1.0.0
+ !foo configured 1.0.0
+ libfoo configured !0.1.0 available 1.0.0
+ !fex configured 1.0.0
+ !foo configured 1.0.0
+ libfoo configured !0.1.0 available 1.0.0
+ libfoo configured !0.1.0 available 1.0.0
+ !bus configured 1.0.0
+ !foo configured 1.0.0
+ libfoo configured !0.1.0 available 1.0.0
+ libbaz configured !0.1.0 available 1.0.0
+ !bas configured 1.0.0
+ !bus configured 1.0.0
+ !foo configured 1.0.0
+ libfoo configured !0.1.0 available 1.0.0
+ libbaz configured !0.1.0 available 1.0.0
+ !libbar configured !0.1.0 available 1.0.0
+ !box configured 1.0.0
+ !libbar configured !0.1.0 available 1.0.0
+ libfoo configured !0.1.0 available 1.0.0
+ !bax configured 1.0.0
+ !libbar configured !0.1.0 available 1.0.0
+ libbox configured !0.1.0 available 1.0.0
+ libfoo configured !0.1.0 available 1.0.0
+ !buc configured 1.0.0
+ !bux configured 1.0.0
+ !libbar configured !0.1.0 available 1.0.0
+ libfoo configured !0.1.0 available 1.0.0
+ !tax configured 1.0.0
+ !libbar configured !0.1.0 available 1.0.0
+ libfoo configured !0.1.0 available 1.0.0
+ !tex configured 1.0.0
+ !libbar configured !0.1.0 available 1.0.0
+ libfoo configured !0.1.0 available 1.0.0
+ !tiz configured 1.0.0
+ !libbar configured !0.1.0 available 1.0.0
+ !tex configured 1.0.0
+ !libbar configured !0.1.0 available 1.0.0
+ libfoo configured !0.1.0 available 1.0.0
+ !dex configured 1.0.0
+ !bar configured 1.0.0
+ !libbar configured !0.1.0 available 1.0.0
+ libfoo configured !0.1.0 available 1.0.0
+ !dox configured 1.0.0
+ !dex configured 1.0.0
+ !bar configured 1.0.0
+ !libbar configured !0.1.0 available 1.0.0
+ libfoo configured !0.1.0 available 1.0.0
+ !dix configured 1.0.0
+ !dox configured 1.0.0
+ !dex configured 1.0.0
+ !bar configured 1.0.0
+ !libbar configured !0.1.0 available 1.0.0
+ libfoo configured !0.1.0 available 1.0.0
+ !libbar configured !0.1.0 available 1.0.0
+ libbox configured !0.1.0 available 1.0.0
+ !diz configured 1.0.0
+ !dox configured 1.0.0
+ !dex configured 1.0.0
+ !bar configured 1.0.0
+ !libbar configured !0.1.0 available 1.0.0
+ libfoo configured !0.1.0 available 1.0.0
+ !libbar configured !0.1.0 available 1.0.0
+ libbox configured !0.1.0 available 1.0.0
+ EOO
+
+ $* --upgrade --immediate 2>&1 | $warn_to_info 2>!;
+
+ $pkg_status -r >>EOO;
+ !libbar configured 1.0.0
+ !libbiz configured 1.0.0
+ !libbar configured 1.0.0
+ !bat configured 1.0.0
+ libbaz configured !1.0.0
+ !tix configured 0.1.0 available 1.0.0
+ !toz configured 0.1.0 available 1.0.0 0.2.0
+ !tuz configured 1.0.0
+ !toz configured 0.1.0 available 1.0.0 0.2.0
+ !tux configured 1.0.0
+ libbox configured !1.0.0
+ !tix configured 0.1.0 available 1.0.0
+ !bar configured 1.0.0
+ !libbar configured 1.0.0
+ !bux configured 1.0.0
+ !libbar configured 1.0.0
+ !bex configured 1.0.0
+ !libbar configured 1.0.0
+ !boo configured 1.0.0
+ !libbar configured 1.0.0
+ !biz configured 1.0.0
+ !boo configured 1.0.0
+ !libbar configured 1.0.0
+ !buz configured 1.0.0
+ !bux configured 1.0.0
+ !libbar configured 1.0.0
+ !tez configured 1.0.0
+ !libbar configured 1.0.0
+ libbox configured !1.0.0
+ !toz configured 0.1.0 available 1.0.0 0.2.0
+ !bix configured 1.0.0
+ !bar configured 1.0.0
+ !libbar configured 1.0.0
+ !bux configured 1.0.0
+ !libbar configured 1.0.0
+ !libbar configured 1.0.0
+ !foo configured 1.0.0
+ libfoo configured !1.0.0
+ !fox configured 1.0.0
+ libfoo configured !1.0.0
+ !fux configured 1.0.0
+ libfoo configured !1.0.0
+ !baz configured 1.0.0
+ !libbar configured 1.0.0
+ libfoo configured !1.0.0
+ !bac configured 1.0.0
+ !libbar configured 1.0.0
+ libbaz configured !1.0.0
+ libfoo configured !1.0.0
+ !fix configured 1.0.0
+ !foo configured 1.0.0
+ libfoo configured !1.0.0
+ !fex configured 1.0.0
+ !foo configured 1.0.0
+ libfoo configured !1.0.0
+ libfoo configured !1.0.0
+ !bus configured 1.0.0
+ !foo configured 1.0.0
+ libfoo configured !1.0.0
+ libbaz configured !1.0.0
+ !bas configured 1.0.0
+ !bus configured 1.0.0
+ !foo configured 1.0.0
+ libfoo configured !1.0.0
+ libbaz configured !1.0.0
+ !libbar configured 1.0.0
+ !box configured 1.0.0
+ !libbar configured 1.0.0
+ libfoo configured !1.0.0
+ !bax configured 1.0.0
+ !libbar configured 1.0.0
+ libbox configured !1.0.0
+ libfoo configured !1.0.0
+ !buc configured 1.0.0
+ !bux configured 1.0.0
+ !libbar configured 1.0.0
+ libfoo configured !1.0.0
+ !tax configured 1.0.0
+ !libbar configured 1.0.0
+ libfoo configured !1.0.0
+ !tex configured 1.0.0
+ !libbar configured 1.0.0
+ libfoo configured !1.0.0
+ !tiz configured 1.0.0
+ !libbar configured 1.0.0
+ !tex configured 1.0.0
+ !libbar configured 1.0.0
+ libfoo configured !1.0.0
+ !dex configured 1.0.0
+ !bar configured 1.0.0
+ !libbar configured 1.0.0
+ libfoo configured !1.0.0
+ !dox configured 1.0.0
+ !dex configured 1.0.0
+ !bar configured 1.0.0
+ !libbar configured 1.0.0
+ libfoo configured !1.0.0
+ !dix configured 1.0.0
+ !dox configured 1.0.0
+ !dex configured 1.0.0
+ !bar configured 1.0.0
+ !libbar configured 1.0.0
+ libfoo configured !1.0.0
+ !libbar configured 1.0.0
+ libbox configured !1.0.0
+ !diz configured 1.0.0
+ !dox configured 1.0.0
+ !dex configured 1.0.0
+ !bar configured 1.0.0
+ !libbar configured 1.0.0
+ libfoo configured !1.0.0
+ !libbar configured 1.0.0
+ libbox configured !1.0.0
+ EOO
+
+ $pkg_drop libfoo libbar libbiz foo fox fux fix fex bar baz bac \
+ bat bas bus box bax bux bix bex boo biz buz buc tax \
+ tex tix tiz toz tez tuz tux dex dix diz dox
+ }
+
+ : all
+ :
+ {
+ $clone_cfg;
+
+ $* ?libfoo/0.1.0 ?libbar/0.1.0 ?libbaz/0.1.0 ?libbox/0.1.0 \
+ ?libbiz/0.1.0 foo/0.1.0 fox/0.1.0 fux/0.1.0 fix/0.1.0 fex/0.1.0 \
+ bar/0.1.0 baz/0.1.0 bac/1.0.0 bat/1.0.0 bas/1.0.0 bus/0.1.0 \
+ box/0.1.0 bax/1.0.0 bux/1.0.0 bix/1.0.0 bex/1.0.0 boo/1.0.0 \
+ biz/0.1.0 buz/1.0.0 buc/1.0.0 tax/1.0.0 tex/0.1.0 tix/0.1.0 \
+ tiz/1.0.0 toz/0.1.0 tez/1.0.0 tuz/1.0.0 tux/1.0.0 dex/1.0.0 \
+ dix/1.0.0 diz/1.0.0 dox/1.0.0 2>!;
+
+ $pkg_status -r >>EOO;
+ !bat configured !1.0.0
+ libbaz configured !0.1.0 available 1.0.0
+ !biz configured !0.1.0 available 1.0.0
+ libbiz configured !0.1.0 available 1.0.0
+ !tix configured !0.1.0 available 1.0.0
+ !toz configured !0.1.0 available 1.0.0 0.2.0
+ !tuz configured !1.0.0
+ !toz configured !0.1.0 available 1.0.0 0.2.0
+ !box configured !0.1.0 available 1.0.0 0.2.0
+ libbox configured !0.1.0 available 1.0.0
+ !tux configured !1.0.0
+ libbox configured !0.1.0 available 1.0.0
+ !tix configured !0.1.0 available 1.0.0
+ !fox configured !0.1.0 available 1.0.0 0.2.0
+ libbar configured !0.1.0 available 1.0.0
+ !bar configured !0.1.0 available 1.0.0
+ libbar configured !0.1.0 available 1.0.0
+ !bux configured !1.0.0
+ libbar configured !0.1.0 available 1.0.0
+ !bex configured !1.0.0
+ libbar configured !0.1.0 available 1.0.0
+ !boo configured !1.0.0
+ libbar configured !0.1.0 available 1.0.0
+ !buz configured !1.0.0
+ !bux configured !1.0.0
+ libbar configured !0.1.0 available 1.0.0
+ !tez configured !1.0.0
+ libbar configured !0.1.0 available 1.0.0
+ libbox configured !0.1.0 available 1.0.0
+ !toz configured !0.1.0 available 1.0.0 0.2.0
+ !bix configured !1.0.0
+ !bar configured !0.1.0 available 1.0.0
+ libbar configured !0.1.0 available 1.0.0
+ !bux configured !1.0.0
+ libbar configured !0.1.0 available 1.0.0
+ libbar configured !0.1.0 available 1.0.0
+ !foo configured !0.1.0 available 1.0.0 0.2.0
+ libfoo configured !0.1.0 available 1.0.0
+ !fux configured !0.1.0 available 1.0.0 0.2.0 0.1.1
+ libfoo configured !0.1.0 available 1.0.0
+ !fex configured !0.1.0 available 1.0.0
+ !fux configured !0.1.0 available 1.0.0 0.2.0 0.1.1
+ libfoo configured !0.1.0 available 1.0.0
+ !baz configured !0.1.0 available 1.0.0
+ libbar configured !0.1.0 available 1.0.0
+ libfoo configured !0.1.0 available 1.0.0
+ !bac configured !1.0.0
+ libbar configured !0.1.0 available 1.0.0
+ libbaz configured !0.1.0 available 1.0.0
+ libfoo configured !0.1.0 available 1.0.0
+ !fix configured !0.1.0 available 1.0.0
+ !foo configured !0.1.0 available 1.0.0 0.2.0
+ libfoo configured !0.1.0 available 1.0.0
+ !bus configured !0.1.0 available 1.0.0
+ !foo configured !0.1.0 available 1.0.0 0.2.0
+ libfoo configured !0.1.0 available 1.0.0
+ !bas configured !1.0.0
+ !bus configured !0.1.0 available 1.0.0
+ !foo configured !0.1.0 available 1.0.0 0.2.0
+ libfoo configured !0.1.0 available 1.0.0
+ libbar configured !0.1.0 available 1.0.0
+ !bax configured !1.0.0
+ libbar configured !0.1.0 available 1.0.0
+ libbox configured !0.1.0 available 1.0.0
+ libfoo configured !0.1.0 available 1.0.0
+ !buc configured !1.0.0
+ !bux configured !1.0.0
+ libbar configured !0.1.0 available 1.0.0
+ libfoo configured !0.1.0 available 1.0.0
+ !tax configured !1.0.0
+ libbar configured !0.1.0 available 1.0.0
+ libfoo configured !0.1.0 available 1.0.0
+ !tex configured !0.1.0 available 1.0.0 0.3.0 0.2.0
+ libfoo configured !0.1.0 available 1.0.0
+ !tiz configured !1.0.0
+ libbar configured !0.1.0 available 1.0.0
+ !tex configured !0.1.0 available 1.0.0 0.3.0 0.2.0
+ libfoo configured !0.1.0 available 1.0.0
+ !dex configured !1.0.0
+ !bar configured !0.1.0 available 1.0.0
+ libbar configured !0.1.0 available 1.0.0
+ libfoo configured !0.1.0 available 1.0.0
+ !dox configured !1.0.0
+ !dex configured !1.0.0
+ !bar configured !0.1.0 available 1.0.0
+ libbar configured !0.1.0 available 1.0.0
+ libfoo configured !0.1.0 available 1.0.0
+ !dix configured !1.0.0
+ !dox configured !1.0.0
+ !dex configured !1.0.0
+ !bar configured !0.1.0 available 1.0.0
+ libbar configured !0.1.0 available 1.0.0
+ libfoo configured !0.1.0 available 1.0.0
+ libbar configured !0.1.0 available 1.0.0
+ libbox configured !0.1.0 available 1.0.0
+ !diz configured !1.0.0
+ !dox configured !1.0.0
+ !dex configured !1.0.0
+ !bar configured !0.1.0 available 1.0.0
+ libbar configured !0.1.0 available 1.0.0
+ libfoo configured !0.1.0 available 1.0.0
+ libbar configured !0.1.0 available 1.0.0
+ libbox configured !0.1.0 available 1.0.0
+ EOO
+
+ $* --upgrade --immediate 2>&1 | $warn_to_info 2>!;
+
+ $pkg_status -r >>EOO;
+ !bat configured 1.0.0
+ libbaz configured !1.0.0
+ !biz configured 1.0.0
+ !boo configured 1.0.0
+ libbar configured !1.0.0
+ !tix configured 0.1.0 available 1.0.0
+ !toz configured 0.1.0 available 1.0.0 0.2.0
+ !tuz configured 1.0.0
+ !toz configured 0.1.0 available 1.0.0 0.2.0
+ !box configured 1.0.0
+ libbar configured !1.0.0
+ libfoo configured !1.0.0
+ !tux configured 1.0.0
+ libbox configured !1.0.0
+ !tix configured 0.1.0 available 1.0.0
+ !fox configured 1.0.0
+ libfoo configured !1.0.0
+ !bar configured 1.0.0
+ libbar configured !1.0.0
+ !bux configured 1.0.0
+ libbar configured !1.0.0
+ !bex configured 1.0.0
+ libbar configured !1.0.0
+ !boo configured 1.0.0
+ libbar configured !1.0.0
+ !buz configured 1.0.0
+ !bux configured 1.0.0
+ libbar configured !1.0.0
+ !tez configured 1.0.0
+ libbar configured !1.0.0
+ libbox configured !1.0.0
+ !toz configured 0.1.0 available 1.0.0 0.2.0
+ !bix configured 1.0.0
+ !bar configured 1.0.0
+ libbar configured !1.0.0
+ !bux configured 1.0.0
+ libbar configured !1.0.0
+ libbar configured !1.0.0
+ !foo configured 1.0.0
+ libfoo configured !1.0.0
+ !fux configured 1.0.0
+ libfoo configured !1.0.0
+ !fex configured 1.0.0
+ !foo configured 1.0.0
+ libfoo configured !1.0.0
+ libfoo configured !1.0.0
+ !baz configured 1.0.0
+ libbar configured !1.0.0
+ libfoo configured !1.0.0
+ !bac configured 1.0.0
+ libbar configured !1.0.0
+ libbaz configured !1.0.0
+ libfoo configured !1.0.0
+ !fix configured 1.0.0
+ !foo configured 1.0.0
+ libfoo configured !1.0.0
+ !bus configured 1.0.0
+ !foo configured 1.0.0
+ libfoo configured !1.0.0
+ libbaz configured !1.0.0
+ !bas configured 1.0.0
+ !bus configured 1.0.0
+ !foo configured 1.0.0
+ libfoo configured !1.0.0
+ libbaz configured !1.0.0
+ libbar configured !1.0.0
+ !bax configured 1.0.0
+ libbar configured !1.0.0
+ libbox configured !1.0.0
+ libfoo configured !1.0.0
+ !buc configured 1.0.0
+ !bux configured 1.0.0
+ libbar configured !1.0.0
+ libfoo configured !1.0.0
+ !tax configured 1.0.0
+ libbar configured !1.0.0
+ libfoo configured !1.0.0
+ !tex configured 1.0.0
+ libbar configured !1.0.0
+ libfoo configured !1.0.0
+ !tiz configured 1.0.0
+ libbar configured !1.0.0
+ !tex configured 1.0.0
+ libbar configured !1.0.0
+ libfoo configured !1.0.0
+ !dex configured 1.0.0
+ !bar configured 1.0.0
+ libbar configured !1.0.0
+ libfoo configured !1.0.0
+ !dox configured 1.0.0
+ !dex configured 1.0.0
+ !bar configured 1.0.0
+ libbar configured !1.0.0
+ libfoo configured !1.0.0
+ !dix configured 1.0.0
+ !dox configured 1.0.0
+ !dex configured 1.0.0
+ !bar configured 1.0.0
+ libbar configured !1.0.0
+ libfoo configured !1.0.0
+ libbar configured !1.0.0
+ libbox configured !1.0.0
+ !diz configured 1.0.0
+ !dox configured 1.0.0
+ !dex configured 1.0.0
+ !bar configured 1.0.0
+ libbar configured !1.0.0
+ libfoo configured !1.0.0
+ libbar configured !1.0.0
+ libbox configured !1.0.0
+ EOO
+
+ $pkg_drop foo fox fux fix fex bar baz bac bat bas bus box bax bux \
+ bix bex boo biz buz buc tax tex tix tiz toz tez tuz tux \
+ dex dix diz dox
+ }
+ }
+ }
+
+ : incrementally
+ :
+ {
+ +$clone_cfg
+
+ : all
+ :
+ {
+ $clone_cfg;
+
+ ps = libfoo libbar libbaz libbox libbiz foo fox fux fix fex bar baz \
+ bac bat bas bus box bax bux bix bex boo biz buz buc tax tex \
+ tix tiz toz tez tuz tux dex dix diz dox;
+
+ for p: $ps
+ $* $p 2>&1 | $warn_to_info 2>!
+ end;
+
+ $pkg_status -r >>EOO;
+ !libfoo configured 1.0.0
+ !libbar configured 1.0.0
+ !libbaz configured 1.0.0
+ !libbox configured 1.0.0
+ !libbiz configured 1.0.0
+ !libbar configured 1.0.0
+ !foo configured 1.0.0
+ !libfoo configured 1.0.0
+ !fox configured 1.0.0
+ !libfoo configured 1.0.0
+ !fux configured 1.0.0
+ !libfoo configured 1.0.0
+ !fix configured 1.0.0
+ !foo configured 1.0.0
+ !libfoo configured 1.0.0
+ !fex configured 1.0.0
+ !foo configured 1.0.0
+ !libfoo configured 1.0.0
+ !libfoo configured 1.0.0
+ !bar configured 1.0.0
+ !libbar configured 1.0.0
+ !baz configured 1.0.0
+ !libbar configured 1.0.0
+ !libfoo configured 1.0.0
+ !bac configured 1.0.0
+ !libbar configured 1.0.0
+ !libbaz configured 1.0.0
+ !libfoo configured 1.0.0
+ !bat configured 1.0.0
+ !libbaz configured 1.0.0
+ !bus configured 1.0.0
+ !foo configured 1.0.0
+ !libfoo configured 1.0.0
+ !libbaz configured 1.0.0
+ !bas configured 1.0.0
+ !bus configured 1.0.0
+ !foo configured 1.0.0
+ !libfoo configured 1.0.0
+ !libbaz configured 1.0.0
+ !libbar configured 1.0.0
+ !box configured 1.0.0
+ !libbar configured 1.0.0
+ !libfoo configured 1.0.0
+ !bax configured 1.0.0
+ !libbar configured 1.0.0
+ !libbox configured 1.0.0
+ !libfoo configured 1.0.0
+ !bux configured 1.0.0
+ !libbar configured 1.0.0
+ !bix configured 1.0.0
+ !bar configured 1.0.0
+ !libbar configured 1.0.0
+ !bux configured 1.0.0
+ !libbar configured 1.0.0
+ !libbar configured 1.0.0
+ !bex configured 1.0.0
+ !libbar configured 1.0.0
+ !boo configured 1.0.0
+ !libbar configured 1.0.0
+ !biz configured 1.0.0
+ !boo configured 1.0.0
+ !libbar configured 1.0.0
+ !buz configured 1.0.0
+ !bux configured 1.0.0
+ !libbar configured 1.0.0
+ !buc configured 1.0.0
+ !bux configured 1.0.0
+ !libbar configured 1.0.0
+ !libfoo configured 1.0.0
+ !tax configured 1.0.0
+ !libbar configured 1.0.0
+ !libfoo configured 1.0.0
+ !tex configured 1.0.0
+ !libbar configured 1.0.0
+ !libfoo configured 1.0.0
+ !tix configured 0.1.0 available 1.0.0
+ !tiz configured 1.0.0
+ !libbar configured 1.0.0
+ !tex configured 1.0.0
+ !libbar configured 1.0.0
+ !libfoo configured 1.0.0
+ !toz configured 0.1.0 available 1.0.0 0.2.0
+ !tez configured 1.0.0
+ !libbar configured 1.0.0
+ !libbox configured 1.0.0
+ !toz configured 0.1.0 available 1.0.0 0.2.0
+ !tuz configured 1.0.0
+ !toz configured 0.1.0 available 1.0.0 0.2.0
+ !tux configured 1.0.0
+ !libbox configured 1.0.0
+ !tix configured 0.1.0 available 1.0.0
+ !dex configured 1.0.0
+ !bar configured 1.0.0
+ !libbar configured 1.0.0
+ !libfoo configured 1.0.0
+ !dox configured 1.0.0
+ !dex configured 1.0.0
+ !bar configured 1.0.0
+ !libbar configured 1.0.0
+ !libfoo configured 1.0.0
+ !dix configured 1.0.0
+ !dox configured 1.0.0
+ !dex configured 1.0.0
+ !bar configured 1.0.0
+ !libbar configured 1.0.0
+ !libfoo configured 1.0.0
+ !libbar configured 1.0.0
+ !libbox configured 1.0.0
+ !diz configured 1.0.0
+ !dox configured 1.0.0
+ !dex configured 1.0.0
+ !bar configured 1.0.0
+ !libbar configured 1.0.0
+ !libfoo configured 1.0.0
+ !libbar configured 1.0.0
+ !libbox configured 1.0.0
+ EOO
+
+ # Drop the configured packages by sequentially turning all of them
+ # into dependencies.
+ #
+ for p: $ps
+ $* ?$p 2>!
+ end
+ }
+
+ : applications
+ :
+ {
+ +$clone_cfg
+
+ : direct-order
+ :
+ {
+ $clone_cfg;
+
+ ps = foo fox fux fix fex bar baz bac bat bas bus \
+ box bax bux bix bex boo biz buz buc tax tex \
+ tix tiz toz tez tuz tux dex dix diz dox;
+
+ for p: $ps
+ $* $p 2>&1 | $warn_to_info 2>!
+ end;
+
+ $pkg_status -r >>EOO;
+ !foo configured 1.0.0
+ libfoo configured 1.0.0
+ !fox configured 1.0.0
+ libfoo configured 1.0.0
+ !fux configured 1.0.0
+ libfoo configured 1.0.0
+ !fix configured 1.0.0
+ !foo configured 1.0.0
+ libfoo configured 1.0.0
+ !fex configured 1.0.0
+ !foo configured 1.0.0
+ libfoo configured 1.0.0
+ libfoo configured 1.0.0
+ !bar configured 1.0.0
+ libbar configured 1.0.0
+ !baz configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured 1.0.0
+ !bac configured 1.0.0
+ libbar configured 1.0.0
+ libbaz configured 1.0.0
+ libfoo configured 1.0.0
+ !bat configured 1.0.0
+ libbaz configured 1.0.0
+ !bus configured 1.0.0
+ !foo configured 1.0.0
+ libfoo configured 1.0.0
+ libbaz configured 1.0.0
+ !bas configured 1.0.0
+ !bus configured 1.0.0
+ !foo configured 1.0.0
+ libfoo configured 1.0.0
+ libbaz configured 1.0.0
+ libbar configured 1.0.0
+ !box configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured 1.0.0
+ !bax configured 1.0.0
+ libbar configured 1.0.0
+ libbox configured 1.0.0
+ libfoo configured 1.0.0
+ !bux configured 1.0.0
+ libbar configured 1.0.0
+ !bix configured 1.0.0
+ !bar configured 1.0.0
+ libbar configured 1.0.0
+ !bux configured 1.0.0
+ libbar configured 1.0.0
+ libbar configured 1.0.0
+ !bex configured 1.0.0
+ libbar configured 1.0.0
+ !boo configured 1.0.0
+ libbar configured 1.0.0
+ !biz configured 1.0.0
+ !boo configured 1.0.0
+ libbar configured 1.0.0
+ !buz configured 1.0.0
+ !bux configured 1.0.0
+ libbar configured 1.0.0
+ !buc configured 1.0.0
+ !bux configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured 1.0.0
+ !tax configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured 1.0.0
+ !tex configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured 1.0.0
+ !tix configured 0.1.0 available 1.0.0
+ !tiz configured 1.0.0
+ libbar configured 1.0.0
+ !tex configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured 1.0.0
+ !toz configured 0.1.0 available 1.0.0 0.2.0
+ !tez configured 1.0.0
+ libbar configured 1.0.0
+ libbox configured 1.0.0
+ !toz configured 0.1.0 available 1.0.0 0.2.0
+ !tuz configured 1.0.0
+ !toz configured 0.1.0 available 1.0.0 0.2.0
+ !tux configured 1.0.0
+ libbox configured 1.0.0
+ !tix configured 0.1.0 available 1.0.0
+ !dex configured 1.0.0
+ !bar configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured 1.0.0
+ !dox configured 1.0.0
+ !dex configured 1.0.0
+ !bar configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured 1.0.0
+ !dix configured 1.0.0
+ !dox configured 1.0.0
+ !dex configured 1.0.0
+ !bar configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured 1.0.0
+ libbar configured 1.0.0
+ libbox configured 1.0.0
+ !diz configured 1.0.0
+ !dox configured 1.0.0
+ !dex configured 1.0.0
+ !bar configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured 1.0.0
+ libbar configured 1.0.0
+ libbox configured 1.0.0
+ EOO
+
+ # Drop the configured packages by sequentially turning all of them
+ # into dependencies.
+ #
+ for p: $ps
+ $* ?$p 2>&1 | $warn_to_info 2>!
+ end
+ }
+
+ : reverse-order
+ :
+ {
+ $clone_cfg;
+
+ ps = dox diz dix dex tux tuz tez toz tiz tix tex tax buc buz biz \
+ boo bex bix bux bax box bus bas bat bac baz bar fex fix fux \
+ fox foo;
+
+ for p: $ps
+ $* $p 2>&1 | $warn_to_info 2>!
+ end;
+
+ $pkg_status -r >>EOO;
+ !bar configured 1.0.0
+ libbar configured 1.0.0
+ !dex configured 1.0.0
+ !bar configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured 1.0.0
+ !dox configured 1.0.0
+ !dex configured 1.0.0
+ !bar configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured 1.0.0
+ !diz configured 1.0.0
+ !dox configured 1.0.0
+ !dex configured 1.0.0
+ !bar configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured 1.0.0
+ libbar configured 1.0.0
+ libbox configured 1.0.0
+ !dix configured 1.0.0
+ !dox configured 1.0.0
+ !dex configured 1.0.0
+ !bar configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured 1.0.0
+ libbar configured 1.0.0
+ libbox configured 1.0.0
+ !tix configured 0.1.0 available 1.0.0
+ !tux configured 1.0.0
+ libbox configured 1.0.0
+ !tix configured 0.1.0 available 1.0.0
+ !toz configured 0.1.0 available 1.0.0 0.2.0
+ !tuz configured 1.0.0
+ !toz configured 0.1.0 available 1.0.0 0.2.0
+ !tez configured 1.0.0
+ libbar configured 1.0.0
+ libbox configured 1.0.0
+ !toz configured 0.1.0 available 1.0.0 0.2.0
+ !tex configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured 1.0.0
+ !tiz configured 1.0.0
+ libbar configured 1.0.0
+ !tex configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured 1.0.0
+ !tax configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured 1.0.0
+ !bux configured 1.0.0
+ libbar configured 1.0.0
+ !buc configured 1.0.0
+ !bux configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured 1.0.0
+ !buz configured 1.0.0
+ !bux configured 1.0.0
+ libbar configured 1.0.0
+ !boo configured 1.0.0
+ libbar configured 1.0.0
+ !biz configured 1.0.0
+ !boo configured 1.0.0
+ libbar configured 1.0.0
+ !bex configured 1.0.0
+ libbar configured 1.0.0
+ !bix configured 1.0.0
+ !bar configured 1.0.0
+ libbar configured 1.0.0
+ !bux configured 1.0.0
+ libbar configured 1.0.0
+ libbar configured 1.0.0
+ !bax configured 1.0.0
+ libbar configured 1.0.0
+ libbox configured 1.0.0
+ libfoo configured 1.0.0
+ !box configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured 1.0.0
+ !foo configured 1.0.0
+ libfoo configured 1.0.0
+ !bus configured 1.0.0
+ !foo configured 1.0.0
+ libfoo configured 1.0.0
+ libbaz configured 1.0.0
+ !bas configured 1.0.0
+ !bus configured 1.0.0
+ !foo configured 1.0.0
+ libfoo configured 1.0.0
+ libbaz configured 1.0.0
+ libbar configured 1.0.0
+ !bat configured 1.0.0
+ libbaz configured 1.0.0
+ !bac configured 1.0.0
+ libbar configured 1.0.0
+ libbaz configured 1.0.0
+ libfoo configured 1.0.0
+ !baz configured 1.0.0
+ libbar configured 1.0.0
+ libfoo configured 1.0.0
+ !fex configured 1.0.0
+ !foo configured 1.0.0
+ libfoo configured 1.0.0
+ libfoo configured 1.0.0
+ !fix configured 1.0.0
+ !foo configured 1.0.0
+ libfoo configured 1.0.0
+ !fux configured 1.0.0
+ libfoo configured 1.0.0
+ !fox configured 1.0.0
+ libfoo configured 1.0.0
+ EOO
+
+ # Drop the configured packages by sequentially turning all of them
+ # into dependencies.
+ #
+ for p: $ps
+ $* ?$p 2>!
+ end
+ }
+
+ : reordered
+ :
+ : Vary the order of package builds. Note that the first order is the
+ : original direct package order and the last one is the reverse
+ : order.
+ :
+ if $all
+ {
+ $clone_cfg;
+
+ ds = foo fox fux fix fex bar baz bac bat bas bus box bax bux bix \
+ bex boo biz buz buc tax tex tix tiz toz tez tuz tux dex dix \
+ diz dox tvz '';
+
+ # Prepare the package build list.
+ #
+ for i: $integer.integer_sequence(1, $name.size($ds))
+ d = ($ds[$i])
+ ps =
+
+ prepend = true
+ for p: $ds
+ if ($p == $d)
+ prepend = false
+ end
+
+ if ($p != '')
+ if $prepend
+ ps =+ $p
+ else
+ ps += $p
+ end
+ end
+ end
+
+ echo $ps >&2 2>|
+
+ # Build the packages, one at a time, creating the list of
+ # successfully built packages.
+ #
+ # Note that a package build may potentially fail due to some
+ # ambiguity which requires user's additional input. We just
+ # silently ignore such failures.
+ #
+ cps =
+ for p: $ps
+ timeout 120
+ if $* $p 2>&1 | $warn_to_info 2>!
+ cps += $p
+ end
+ end
+
+ # Drop the configured packages by sequentially turning all of
+ # them into dependencies.
+ #
+ for p: $cps
+ timeout 120
+ $* ?$p 2>!
+ end
+ end
+ }
+ }
+ }
+ }
+ }
+
+ : config-negotiation
+ :
+ {
+ test.arguments += --yes --plan 'build plan:'
+
+ : proj-better-choice
+ :
+ {
+ +$clone_root_cfg && $rep_add $rep/t13a && $rep_fetch
+
+ : bar-baz-biz
+ :
+ {
+ $clone_cfg;
+
+ $* bar 2>>~%EOE%;
+ build plan:
+ new liba/1.0.0 (required by bar)
+ config.liba.backend=cli (set by bar)
+ new bar/1.0.0
+ fetched liba/1.0.0
+ unpacked liba/1.0.0
+ fetched bar/1.0.0
+ unpacked bar/1.0.0
+ configured liba/1.0.0
+ configured bar/1.0.0
+ %info: .+bar.+ is up to date%
+ updated bar/1.0.0
+ EOE
+
+ $pkg_status -r >>EOO;
+ !bar configured 1.0.0
+ liba configured 1.0.0
+ EOO
+
+ cat cfg/liba-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.liba.backend = cli
+ EOO
+
+ # Make sure that since baz doesn't reconfigure liba (it also accepts
+ # the cli backend for liba) the re-evaluated existing dependents bar
+ # and the liba dependency doesn't get reconfigured.
+ #
+ $* baz 2>>~%EOE%;
+ build plan:
+ new baz/1.0.0
+ fetched baz/1.0.0
+ unpacked baz/1.0.0
+ configured baz/1.0.0
+ %info: .+baz.+ is up to date%
+ updated baz/1.0.0
+ EOE
+
+ $pkg_status -r >>EOO;
+ !bar configured 1.0.0
+ liba configured 1.0.0
+ !baz configured 1.0.0
+ liba configured 1.0.0
+ EOO
+
+ cat cfg/liba-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.liba.backend = cli
+ EOO
+
+ # Make sure that when biz reconfigures liba (it only accepts the gui
+ # backend for liba), the re-evaluated existing dependents bar and baz
+ # are also reconfigured.
+ #
+ $* biz 2>>~%EOE%;
+ build plan:
+ reconfigure/update liba/1.0.0 (required by bar, baz, biz)
+ config.liba.backend=gui (set by biz)
+ reconfigure baz/1.0.0 (dependent of liba)
+ reconfigure bar/1.0.0 (dependent of liba)
+ new biz/1.0.0
+ disfigured bar/1.0.0
+ disfigured baz/1.0.0
+ disfigured liba/1.0.0
+ fetched biz/1.0.0
+ unpacked biz/1.0.0
+ configured liba/1.0.0
+ configured baz/1.0.0
+ configured bar/1.0.0
+ configured biz/1.0.0
+ %info: .+biz.+ is up to date%
+ %info: .+baz.+ is up to date%
+ %info: .+bar.+ is up to date%
+ updated biz/1.0.0
+ updated baz/1.0.0
+ updated bar/1.0.0
+ EOE
+
+ $pkg_status -r >>EOO;
+ !bar configured 1.0.0
+ liba configured 1.0.0
+ !baz configured 1.0.0
+ liba configured 1.0.0
+ !biz configured 1.0.0
+ liba configured 1.0.0
+ EOO
+
+ cat cfg/liba-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.liba.backend = gui
+ EOO
+
+ $pkg_drop bar baz biz
+ }
+
+ : baz
+ :
+ {
+ $clone_cfg;
+
+ $* baz 2>>~%EOE%;
+ build plan:
+ new liba/1.0.0 (required by baz)
+ config.liba.backend=gui (set by baz)
+ new baz/1.0.0
+ fetched liba/1.0.0
+ unpacked liba/1.0.0
+ fetched baz/1.0.0
+ unpacked baz/1.0.0
+ configured liba/1.0.0
+ configured baz/1.0.0
+ %info: .+baz.+ is up to date%
+ updated baz/1.0.0
+ EOE
+
+ $pkg_status -r >>EOO;
+ !baz configured 1.0.0
+ liba configured 1.0.0
+ EOO
+
+ cat cfg/liba-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.liba.backend = gui
+ EOO
+
+ $pkg_drop baz
+ }
+
+ : bar-baz
+ :
+ {
+ $clone_cfg;
+
+ $* bar baz 2>>~%EOE%;
+ build plan:
+ new liba/1.0.0 (required by bar, baz)
+ config.liba.backend=cli (set by bar)
+ new bar/1.0.0
+ new baz/1.0.0
+ fetched liba/1.0.0
+ unpacked liba/1.0.0
+ fetched bar/1.0.0
+ unpacked bar/1.0.0
+ fetched baz/1.0.0
+ unpacked baz/1.0.0
+ configured liba/1.0.0
+ configured bar/1.0.0
+ configured baz/1.0.0
+ %info: .+bar.+ is up to date%
+ %info: .+baz.+ is up to date%
+ updated bar/1.0.0
+ updated baz/1.0.0
+ EOE
+
+ $pkg_status -r >>EOO;
+ !bar configured 1.0.0
+ liba configured 1.0.0
+ !baz configured 1.0.0
+ liba configured 1.0.0
+ EOO
+
+ cat cfg/liba-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.liba.backend = cli
+ EOO
+
+ $pkg_drop bar baz
+ }
+
+ : baz-bar
+ :
+ {
+ $clone_cfg;
+
+ $* baz bar 2>>~%EOE%;
+ build plan:
+ new liba/1.0.0 (required by bar, baz)
+ config.liba.backend=cli (set by bar)
+ new baz/1.0.0
+ new bar/1.0.0
+ fetched liba/1.0.0
+ unpacked liba/1.0.0
+ fetched baz/1.0.0
+ unpacked baz/1.0.0
+ fetched bar/1.0.0
+ unpacked bar/1.0.0
+ configured liba/1.0.0
+ configured baz/1.0.0
+ configured bar/1.0.0
+ %info: .+baz.+ is up to date%
+ %info: .+bar.+ is up to date%
+ updated baz/1.0.0
+ updated bar/1.0.0
+ EOE
+
+ $pkg_status -r >>EOO;
+ !baz configured 1.0.0
+ liba configured 1.0.0
+ !bar configured 1.0.0
+ liba configured 1.0.0
+ EOO
+
+ # Would have been cool to end up with gui but looks like we have a map
+ # in configuration cluster.
+ #
+ cat cfg/liba-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.liba.backend = cli
+ EOO
+
+ $pkg_drop baz bar
+ }
+
+ : bar-baz-biz-box
+ :
+ {
+ $clone_cfg;
+
+ $* bar baz biz 2>>~%EOE%;
+ build plan:
+ new liba/1.0.0 (required by bar, baz, biz)
+ config.liba.backend=gui (set by biz)
+ new bar/1.0.0
+ new baz/1.0.0
+ new biz/1.0.0
+ fetched liba/1.0.0
+ unpacked liba/1.0.0
+ fetched bar/1.0.0
+ unpacked bar/1.0.0
+ fetched baz/1.0.0
+ unpacked baz/1.0.0
+ fetched biz/1.0.0
+ unpacked biz/1.0.0
+ configured liba/1.0.0
+ configured bar/1.0.0
+ configured baz/1.0.0
+ configured biz/1.0.0
+ %info: .+bar.+ is up to date%
+ %info: .+baz.+ is up to date%
+ %info: .+biz.+ is up to date%
+ updated bar/1.0.0
+ updated baz/1.0.0
+ updated biz/1.0.0
+ EOE
+
+ $pkg_status -r >>EOO;
+ !bar configured 1.0.0
+ liba configured 1.0.0
+ !baz configured 1.0.0
+ liba configured 1.0.0
+ !biz configured 1.0.0
+ liba configured 1.0.0
+ EOO
+
+ cat cfg/liba-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.liba.backend = gui
+ EOO
+
+ # Make sure that since box doesn't reconfigure liba (it accepts any
+ # backend for liba and just reflects it in its own configuration) none
+ # of the re-evaluated existing dependents nor the liba dependency get
+ # reconfigured.
+ #
+ $* box 2>>~%EOE%;
+ build plan:
+ new box/1.0.0
+ config.box.liba_backend=gui (set by box)
+ fetched box/1.0.0
+ unpacked box/1.0.0
+ configured box/1.0.0
+ %info: .+box.+ is up to date%
+ updated box/1.0.0
+ EOE
+
+ $pkg_status -r >>EOO;
+ !bar configured 1.0.0
+ liba configured 1.0.0
+ !baz configured 1.0.0
+ liba configured 1.0.0
+ !biz configured 1.0.0
+ liba configured 1.0.0
+ !box configured 1.0.0
+ liba configured 1.0.0
+ EOO
+
+ cat cfg/liba-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.liba.backend = gui
+ EOO
+
+ cat cfg/box-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.box.liba_backend = gui
+ EOO
+
+ $pkg_drop bar baz biz box
+ }
+ }
+
+ : proj-better-value
+ :
+ {
+ +$clone_root_cfg && $rep_add $rep/t13b && $rep_fetch
+
+ : liba
+ :
+ {
+ $clone_cfg;
+
+ $* liba 2>>~%EOE%;
+ build plan:
+ new liba/1.0.0
+ fetched liba/1.0.0
+ unpacked liba/1.0.0
+ configured liba/1.0.0
+ %info: .+liba.+ is up to date%
+ updated liba/1.0.0
+ EOE
+
+ $pkg_status -r >>EOO;
+ !liba configured 1.0.0
+ EOO
+
+ cat cfg/liba-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.liba.x = false
+ config.liba.buffer = 1024
+ EOO
+
+ $pkg_drop liba
+ }
+
+ : bar
+ :
+ {
+ $clone_cfg;
+
+ $* bar 2>>~%EOE%;
+ build plan:
+ new liba/1.0.0 (required by bar)
+ config.liba.buffer=2048 (set by bar)
+ new bar/1.0.0
+ fetched liba/1.0.0
+ unpacked liba/1.0.0
+ fetched bar/1.0.0
+ unpacked bar/1.0.0
+ configured liba/1.0.0
+ configured bar/1.0.0
+ %info: .+bar.+ is up to date%
+ updated bar/1.0.0
+ EOE
+
+ $pkg_status -r >>EOO;
+ !bar configured 1.0.0
+ liba configured 1.0.0
+ EOO
+
+ cat cfg/liba-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.liba.x = false
+ config.liba.buffer = 2048
+ EOO
+
+ $pkg_drop bar
+ }
+
+ : bar-liba
+ :
+ {
+ $clone_cfg;
+
+ $* bar ?liba +{ config.liba.x=true } 2>>~%EOE%;
+ build plan:
+ new liba/1.0.0 (required by bar)
+ config.liba.x=true (user configuration)
+ config.liba.buffer=10240 (set by bar)
+ new bar/1.0.0
+ fetched liba/1.0.0
+ unpacked liba/1.0.0
+ fetched bar/1.0.0
+ unpacked bar/1.0.0
+ configured liba/1.0.0
+ configured bar/1.0.0
+ %info: .+bar.+ is up to date%
+ updated bar/1.0.0
+ EOE
+
+ $pkg_status -r >>EOO;
+ !bar configured 1.0.0
+ liba configured 1.0.0
+ EOO
+
+ cat cfg/liba-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.liba.x = true
+ config.liba.buffer = 10240
+ EOO
+
+ $pkg_drop bar
+ }
+
+ : baz
+ :
+ {
+ $clone_cfg;
+
+ $* baz 2>>~%EOE%;
+ build plan:
+ new liba/1.0.0 (required by baz)
+ config.liba.buffer=4096 (set by baz)
+ new baz/1.0.0
+ fetched liba/1.0.0
+ unpacked liba/1.0.0
+ fetched baz/1.0.0
+ unpacked baz/1.0.0
+ configured liba/1.0.0
+ configured baz/1.0.0
+ %info: .+baz.+ is up to date%
+ updated baz/1.0.0
+ EOE
+
+ $pkg_status -r >>EOO;
+ !baz configured 1.0.0
+ liba configured 1.0.0
+ EOO
+
+ cat cfg/liba-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.liba.x = false
+ config.liba.buffer = 4096
+ EOO
+
+ $pkg_drop baz
+ }
+
+ : baz-liba
+ :
+ {
+ $clone_cfg;
+
+ $* baz ?liba +{ config.liba.x=true } 2>>~%EOE%;
+ build plan:
+ new liba/1.0.0 (required by baz)
+ config.liba.x=true (user configuration)
+ config.liba.buffer=10240 (set by baz)
+ new baz/1.0.0
+ fetched liba/1.0.0
+ unpacked liba/1.0.0
+ fetched baz/1.0.0
+ unpacked baz/1.0.0
+ configured liba/1.0.0
+ configured baz/1.0.0
+ %info: .+baz.+ is up to date%
+ updated baz/1.0.0
+ EOE
+
+ $pkg_status -r >>EOO;
+ !baz configured 1.0.0
+ liba configured 1.0.0
+ EOO
+
+ cat cfg/liba-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.liba.x = true
+ config.liba.buffer = 10240
+ EOO
+
+ $pkg_drop baz
+ }
+
+ : baz-biz
+ :
+ {
+ $clone_cfg;
+
+ $* baz biz 2>>~%EOE%;
+ build plan:
+ new liba/1.0.0 (required by baz, biz)
+ %(
+ config.liba.buffer=10240 (set by baz)
+ config.liba.x=true (set by biz)
+ %|
+ config.liba.x=true (set by biz)
+ config.liba.buffer=10240 (set by baz)
+ %)
+ new baz/1.0.0
+ new biz/1.0.0
+ fetched liba/1.0.0
+ unpacked liba/1.0.0
+ fetched baz/1.0.0
+ unpacked baz/1.0.0
+ fetched biz/1.0.0
+ unpacked biz/1.0.0
+ configured liba/1.0.0
+ configured baz/1.0.0
+ configured biz/1.0.0
+ %info: .+baz.+ is up to date%
+ %info: .+biz.+ is up to date%
+ updated baz/1.0.0
+ updated biz/1.0.0
+ EOE
+
+ $pkg_status -r >>EOO;
+ !baz configured 1.0.0
+ liba configured 1.0.0
+ !biz configured 1.0.0
+ liba configured 1.0.0
+ EOO
+
+ cat cfg/liba-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.liba.x = true
+ config.liba.buffer = 10240
+ EOO
+
+ $pkg_drop baz biz
+ }
+ }
+
+ : proj-disable-unused
+ :
+ {
+ +$clone_root_cfg && $rep_add $rep/t13c && $rep_fetch
+
+ : liba
+ :
+ {
+ $clone_cfg;
+
+ $* liba 2>>~%EOE%;
+ build plan:
+ new liba/1.0.0
+ fetched liba/1.0.0
+ unpacked liba/1.0.0
+ configured liba/1.0.0
+ %info: .+liba.+ is up to date%
+ updated liba/1.0.0
+ EOE
+
+ $pkg_status -r >>EOO;
+ !liba configured 1.0.0
+ EOO
+
+ cat cfg/liba-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.liba.x = true
+ EOO
+
+ $pkg_drop liba
+ }
+
+ : bar
+ :
+ {
+ $clone_cfg;
+
+ $* bar 2>>~%EOE%;
+ <depends-reflect-clause>:1:5: info: false
+ info: reflect clause:
+ info $config.liba.x
+ info: in depends manifest value of package bar
+ build plan:
+ new liba/1.0.0 (required by bar)
+ config.liba.x=false (set by bar)
+ new bar/1.0.0
+ fetched liba/1.0.0
+ unpacked liba/1.0.0
+ fetched bar/1.0.0
+ unpacked bar/1.0.0
+ configured liba/1.0.0
+ configured bar/1.0.0
+ %info: .+bar.+ is up to date%
+ updated bar/1.0.0
+ EOE
+
+ $pkg_status -r >>EOO;
+ !bar configured 1.0.0
+ liba configured 1.0.0
+ EOO
+
+ cat cfg/liba-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.liba.x = false
+ EOO
+
+ $pkg_drop bar
+ }
+
+ : bar-baz
+ :
+ {
+ $clone_cfg;
+
+ $* bar baz 2>>~%EOE%;
+ <depends-reflect-clause>:1:5: info: true
+ info: reflect clause:
+ info $config.liba.x
+ info: in depends manifest value of package bar
+ build plan:
+ new liba/1.0.0 (required by bar, baz)
+ config.liba.x=true (set by baz)
+ new bar/1.0.0
+ new baz/1.0.0
+ fetched liba/1.0.0
+ unpacked liba/1.0.0
+ fetched bar/1.0.0
+ unpacked bar/1.0.0
+ fetched baz/1.0.0
+ unpacked baz/1.0.0
+ configured liba/1.0.0
+ configured bar/1.0.0
+ configured baz/1.0.0
+ %info: .+bar.+ is up to date%
+ %info: .+baz.+ is up to date%
+ updated bar/1.0.0
+ updated baz/1.0.0
+ EOE
+
+ $pkg_status -r >>EOO;
+ !bar configured 1.0.0
+ liba configured 1.0.0
+ !baz configured 1.0.0
+ liba configured 1.0.0
+ EOO
+
+ cat cfg/liba-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.liba.x = true
+ EOO
+
+ $pkg_drop bar baz
+ }
+
+ : bar-liba
+ :
+ {
+ $clone_cfg;
+
+ $* bar ?liba +{ config.liba.x=true } 2>>~%EOE%;
+ <depends-reflect-clause>:1:5: info: true
+ info: reflect clause:
+ info $config.liba.x
+ info: in depends manifest value of package bar
+ build plan:
+ new liba/1.0.0 (required by bar)
+ config.liba.x=true (user configuration)
+ new bar/1.0.0
+ fetched liba/1.0.0
+ unpacked liba/1.0.0
+ fetched bar/1.0.0
+ unpacked bar/1.0.0
+ configured liba/1.0.0
+ configured bar/1.0.0
+ %info: .+bar.+ is up to date%
+ updated bar/1.0.0
+ EOE
+
+ $pkg_status -r >>EOO;
+ !bar configured 1.0.0
+ liba configured 1.0.0
+ EOO
+
+ cat cfg/liba-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.liba.x = true
+ EOO
+
+ $pkg_drop bar
+ }
+ }
+
+ : proj-use-if-available
+ :
+ {
+ +$clone_root_cfg && $rep_add $rep/t13d && $rep_fetch
+
+ : baz
+ :
+ {
+ $clone_cfg;
+
+ $* baz 2>>~%EOE%;
+ build plan:
+ new liba/1.0.0 (required by bar, baz)
+ config.liba.x=true (set by baz)
+ new bar/1.0.0 (required by baz)
+ config.bar.liba_x=true (set by bar)
+ new baz/1.0.0
+ fetched liba/1.0.0
+ unpacked liba/1.0.0
+ fetched bar/1.0.0
+ unpacked bar/1.0.0
+ fetched baz/1.0.0
+ unpacked baz/1.0.0
+ configured liba/1.0.0
+ configured bar/1.0.0
+ configured baz/1.0.0
+ %info: .+baz.+ is up to date%
+ updated baz/1.0.0
+ EOE
+
+ $pkg_status -r >>EOO;
+ !baz configured 1.0.0
+ bar configured 1.0.0
+ liba configured 1.0.0
+ liba configured 1.0.0
+ EOO
+
+ cat cfg/liba-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.liba.x = true
+ EOO
+
+ $pkg_drop baz
+ }
+
+ : bar
+ :
+ {
+ $clone_cfg;
+
+ $* bar 2>>~%EOE%;
+ build plan:
+ new liba/1.0.0 (required by bar)
+ new libb/1.0.0 (required by bar)
+ new bar/1.0.0
+ config.bar.liba_x=false (set by bar)
+ fetched liba/1.0.0
+ unpacked liba/1.0.0
+ fetched libb/1.0.0
+ unpacked libb/1.0.0
+ fetched bar/1.0.0
+ unpacked bar/1.0.0
+ configured liba/1.0.0
+ configured libb/1.0.0
+ configured bar/1.0.0
+ %info: .+bar.+ is up to date%
+ updated bar/1.0.0
+ EOE
+
+ $pkg_status -r >>EOO;
+ !bar configured 1.0.0
+ liba configured 1.0.0
+ libb configured 1.0.0
+ EOO
+
+ cat cfg/liba-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.liba.x = false
+ EOO
+
+ $pkg_drop bar
+ }
+ }
+
+ : proj-bogus-config
+ :
+ {
+ +$clone_root_cfg && $rep_add $rep/t13e && $rep_fetch
+
+ : baz
+ :
+ {
+ $clone_cfg;
+
+ $* baz 2>>~%EOE%;
+ build plan:
+ new liba/1.0.0 (required by baz, biz)
+ config.liba.x=true (set by biz)
+ new biz/1.0.0 (required by baz)
+ new baz/1.0.0
+ fetched liba/1.0.0
+ unpacked liba/1.0.0
+ fetched biz/1.0.0
+ unpacked biz/1.0.0
+ fetched baz/1.0.0
+ unpacked baz/1.0.0
+ configured liba/1.0.0
+ configured biz/1.0.0
+ configured baz/1.0.0
+ %info: .+baz.+ is up to date%
+ updated baz/1.0.0
+ EOE
+
+ $pkg_status -r >>EOO;
+ !baz configured 1.0.0
+ biz configured 1.0.0
+ liba configured 1.0.0
+ liba configured 1.0.0
+ EOO
+
+ cat cfg/liba-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.liba.x = true
+ config.liba.buffer = 1024
+ EOO
+
+ $pkg_drop baz
+ }
+ }
+
+ : proj-bogus-config-cycle
+ :
+ {
+ +$clone_root_cfg && $rep_add $rep/t13f && $rep_fetch
+
+ : baz
+ :
+ {
+ $clone_cfg;
+
+ $* baz 2>>~%EOE% != 0;
+ error: unable to remove bogus configuration values without causing configuration refinement cycle
+ info: consider manually specifying one or more of the following variables as user configuration
+ %(
+ config.liba.buffer=2048
+ config.liba.x=true
+ %|
+ config.liba.x=true
+ config.liba.buffer=2048
+ %)
+ EOE
+
+ $* baz ?liba +{ config.liba.x=true } 2>>~%EOE%;
+ build plan:
+ new liba/1.0.0 (required by baz)
+ config.liba.x=true (user configuration)
+ new baz/1.0.0
+ fetched liba/1.0.0
+ unpacked liba/1.0.0
+ fetched baz/1.0.0
+ unpacked baz/1.0.0
+ configured liba/1.0.0
+ configured baz/1.0.0
+ %info: .+baz.+ is up to date%
+ updated baz/1.0.0
+ EOE
+
+ $pkg_status -r >>EOO;
+ !baz configured 1.0.0
+ liba configured 1.0.0
+ EOO
+
+ cat cfg/liba-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.liba.x = true
+ config.liba.buffer = 1024
+ EOO
+
+ $pkg_drop baz
+ }
+ }
+
+ : proj-detect-unset
+ :
+ {
+ +$clone_root_cfg && $rep_add $rep/t13g && $rep_fetch
+
+ : bar
+ :
+ {
+ $clone_cfg;
+
+ $* bar 2>>EOE != 0
+ <depends-accept-clause>:1:3: error: undefined dependency configuration variable config.liba.x
+ info: was config.liba.x set in earlier prefer or require clause?
+ info: accept condition: ($config.liba.x)
+ info: in depends manifest value of package bar
+ EOE
+ }
+
+ : baz
+ :
+ {
+ $clone_cfg;
+
+ $* baz 2>>EOE != 0
+ <depends-enable-clause>:1:3: error: undefined dependency configuration variable config.liba.x
+ info: was config.liba.x set in earlier prefer or require clause?
+ info: enable condition: ($config.liba.x)
+ info: in depends manifest value of package baz
+ info: while satisfying baz/1.0.0
+ EOE
+ }
+
+ : biz
+ :
+ {
+ $clone_cfg;
+
+ $* biz 2>>EOE != 0
+ <depends-prefer-clause>:1:12: error: undefined dependency configuration variable config.liba.x
+ info: was config.liba.x set in earlier prefer or require clause?
+ info: prefer clause:
+ x = (!$config.liba.x) # Error.
+ info: in depends manifest value of package biz
+ EOE
+ }
+
+ : box
+ :
+ {
+ $clone_cfg;
+
+ $* box 2>>EOE != 0
+ <depends-reflect-clause>:1:12: error: undefined dependency configuration variable config.liba.x
+ info: was config.liba.x set in earlier prefer or require clause?
+ info: reflect clause:
+ x = (!$config.liba.x) # Error.
+ info: in depends manifest value of package box
+ EOE
+ }
+ }
+
+ : proj-cycle
+ :
+ {
+ +$clone_root_cfg && $rep_add $rep/t13h && $rep_fetch
+
+ : bar baz
+ :
+ {
+ $clone_cfg;
+
+ $* bar baz 2>>EOE != 0
+ error: unable to negotiate acceptable configuration between dependents baz, bar for dependencies liba
+ info: configuration before negotiation:
+ config.liba.buffer=4096 (set by bar)
+ info: configuration after negotiation:
+ config.liba.buffer=8192 (set by baz)
+ EOE
+ }
+ }
+
+ : proj-dependency-reflect
+ :
+ {
+ +$clone_root_cfg && $rep_add $rep/t13i && $rep_fetch
+
+ : bar
+ :
+ {
+ $clone_cfg;
+
+ $* bar 2>>~%EOE%;
+ <depends-reflect-clause>:1:5: info: yes
+ info: reflect clause:
+ info ($config.liba.x ? yes : no)
+ info: in depends manifest value of package bar
+ build plan:
+ new liba/1.0.0 (required by bar)
+ config.liba.x=true (set by bar)
+ new bar/1.0.0
+ fetched liba/1.0.0
+ unpacked liba/1.0.0
+ fetched bar/1.0.0
+ unpacked bar/1.0.0
+ configured liba/1.0.0
+ configured bar/1.0.0
+ %info: .+bar.+ is up to date%
+ updated bar/1.0.0
+ EOE
+
+ $pkg_status -r >>EOO;
+ !bar configured 1.0.0
+ liba configured 1.0.0
+ EOO
+
+ cat cfg/liba-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.liba.x = true
+ config.liba.buffer = 1024
+ EOO
+
+ $pkg_drop bar
+ }
+
+ : baz
+ :
+ {
+ $clone_cfg;
+
+ $* baz 2>>~%EOE%;
+ <depends-reflect-clause>:1:5: info: 4096
+ info: reflect clause:
+ info $config.liba.buffer
+ info: in depends manifest value of package baz
+ build plan:
+ new liba/1.0.0 (required by baz)
+ config.liba.buffer=4096 (set by baz)
+ new baz/1.0.0
+ fetched liba/1.0.0
+ unpacked liba/1.0.0
+ fetched baz/1.0.0
+ unpacked baz/1.0.0
+ configured liba/1.0.0
+ configured baz/1.0.0
+ %info: .+baz.+ is up to date%
+ updated baz/1.0.0
+ EOE
+
+ $pkg_status -r >>EOO;
+ !baz configured 1.0.0
+ liba configured 1.0.0
+ EOO
+
+ cat cfg/liba-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.liba.x = false
+ config.liba.buffer = 4096
+ EOO
+
+ $pkg_drop baz
+ }
+ }
+
+ : proj-non-sensible
+ :
+ {
+ +$clone_root_cfg && $rep_add $rep/t13j && $rep_fetch
+
+ : bar-baz
+ :
+ {
+ $clone_cfg;
+
+ $* bar baz 2>>/~%EOE% != 0
+ error: unable to negotiate sensible configuration for dependency liba
+ % .+/root.build:5:1: error: buffer must be at least 4096 if feature x is enabled%
+ info: negotiated configuration:
+ %(
+ config.liba.buffer=2048 (set by bar)
+ config.liba.x=true (set by baz)
+ %|
+ config.liba.x=true (set by baz)
+ config.liba.buffer=2048 (set by bar)
+ %)
+ EOE
+ }
+
+ : biz-baz
+ :
+ {
+ $clone_cfg;
+
+ $* biz baz 2>>~%EOE%;
+ build plan:
+ new liba/1.0.0 (required by baz, biz)
+ %(
+ config.liba.buffer=4096 (set by biz)
+ config.liba.x=true (set by baz)
+ %|
+ config.liba.x=true (set by baz)
+ config.liba.buffer=4096 (set by biz)
+ %)
+ new biz/1.0.0
+ new baz/1.0.0
+ fetched liba/1.0.0
+ unpacked liba/1.0.0
+ fetched biz/1.0.0
+ unpacked biz/1.0.0
+ fetched baz/1.0.0
+ unpacked baz/1.0.0
+ configured liba/1.0.0
+ configured biz/1.0.0
+ configured baz/1.0.0
+ %info: .+biz.+ is up to date%
+ %info: .+baz.+ is up to date%
+ updated biz/1.0.0
+ updated baz/1.0.0
+ EOE
+
+ $pkg_status -r >>EOO;
+ !biz configured 1.0.0
+ liba configured 1.0.0
+ !baz configured 1.0.0
+ liba configured 1.0.0
+ EOO
+
+ cat cfg/liba-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.liba.x = true
+ config.liba.buffer = 4096
+ EOO
+
+ $pkg_drop biz baz
+ }
+ }
+
+ : proj-unaccept
+ :
+ {
+ +$clone_root_cfg && $rep_add $rep/t13k && $rep_fetch
+
+ : bar-baz
+ :
+ {
+ $clone_cfg;
+
+ $* bar baz 2>>EOE != 0
+ error: unable to negotiate acceptable configuration with dependent bar for dependencies liba
+ info: configuration before negotiation:
+ config.liba.buffer=8192 (set by baz)
+ EOE
+ }
+ }
+
+ : proj-require-system
+ :
+ {
+ +$clone_root_cfg && $rep_add $rep/t13l && $rep_fetch
+
+ : bar
+ :
+ {
+ +$clone_cfg
+
+ : basic
+ :
+ {
+ $clone_cfg;
+
+ $* bar 2>>~%EOE%;
+ build plan:
+ new liba/1.0.0 (required by bar)
+ config.liba.x=true (set by bar)
+ new libb/1.0.0 (required by bar)
+ new bar/1.0.0
+ fetched liba/1.0.0
+ unpacked liba/1.0.0
+ fetched libb/1.0.0
+ unpacked libb/1.0.0
+ fetched bar/1.0.0
+ unpacked bar/1.0.0
+ configured liba/1.0.0
+ configured libb/1.0.0
+ configured bar/1.0.0
+ %info: .+bar.+ is up to date%
+ updated bar/1.0.0
+ EOE
+
+ $pkg_status -r >>EOO;
+ !bar configured 1.0.0
+ liba configured 1.0.0
+ libb configured 1.0.0
+ EOO
+
+ cat cfg/liba-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.liba.x = true
+ EOO
+
+ $pkg_drop bar
+ }
+
+ : sys-liba-1
+ :
+ {
+ $clone_cfg;
+
+ $* bar '?sys:liba' 2>>~%EOE%;
+ build plan:
+ configure sys:liba/* (required by bar)
+ config.liba.x=true (expected by bar)
+ new libb/1.0.0 (required by bar)
+ new bar/1.0.0
+ fetched libb/1.0.0
+ unpacked libb/1.0.0
+ fetched bar/1.0.0
+ unpacked bar/1.0.0
+ configured sys:liba/*
+ configured libb/1.0.0
+ configured bar/1.0.0
+ %info: .+bar.+ is up to date%
+ updated bar/1.0.0
+ EOE
+
+ $pkg_status -r >>EOO;
+ !bar configured 1.0.0
+ liba configured,system !* available 1.0.0
+ libb configured 1.0.0
+ EOO
+
+ $pkg_drop bar
+ }
+
+ : sys-liba-2
+ :
+ {
+ $clone_cfg;
+
+ $* bar '?sys:liba' +{ config.liba.x=false } 2>>EOE != 0
+ error: unable to negotiate acceptable configuration with dependent bar for dependencies liba
+ info: configuration before negotiation:
+ config.liba.x=false (user configuration)
+ EOE
+ }
+
+ : bar-sys-liba-3
+ :
+ {
+ $clone_cfg;
+
+ $* bar '?sys:liba' +{ config.liba.x=[null] } 2>>EOE != 0
+ error: unable to negotiate acceptable configuration with dependent bar for dependencies liba
+ info: configuration before negotiation:
+ config.liba.x=[null] (user configuration)
+ EOE
+ }
+
+ : sys-liba-4
+ :
+ {
+ $clone_cfg;
+
+ $* bar '?sys:liba' +{ config.liba.x=true } 2>>~%EOE%;
+ build plan:
+ configure sys:liba/* (required by bar)
+ config.liba.x=true (expected user configuration)
+ new libb/1.0.0 (required by bar)
+ new bar/1.0.0
+ fetched libb/1.0.0
+ unpacked libb/1.0.0
+ fetched bar/1.0.0
+ unpacked bar/1.0.0
+ configured sys:liba/*
+ configured libb/1.0.0
+ configured bar/1.0.0
+ %info: .+bar.+ is up to date%
+ updated bar/1.0.0
+ EOE
+
+ $pkg_status -r >>EOO;
+ !bar configured 1.0.0
+ liba configured,system !* available 1.0.0
+ libb configured 1.0.0
+ EOO
+
+ $pkg_drop bar
+ }
+ }
+
+ : baz
+ :
+ {
+ +$clone_cfg
+
+ : basic
+ :
+ {
+ $clone_cfg;
+
+ $* baz 2>>~%EOE%;
+ build plan:
+ new liba/1.0.0 (required by baz)
+ config.liba.x=true (set by baz)
+ new libb/1.0.0 (required by baz)
+ new baz/1.0.0
+ fetched liba/1.0.0
+ unpacked liba/1.0.0
+ fetched libb/1.0.0
+ unpacked libb/1.0.0
+ fetched baz/1.0.0
+ unpacked baz/1.0.0
+ configured liba/1.0.0
+ configured libb/1.0.0
+ configured baz/1.0.0
+ %info: .+baz.+ is up to date%
+ updated baz/1.0.0
+ EOE
+
+ $pkg_status -r >>EOO;
+ !baz configured 1.0.0
+ liba configured 1.0.0
+ libb configured 1.0.0
+ EOO
+
+ cat cfg/liba-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.liba.x = true
+ EOO
+
+ $pkg_drop baz
+ }
+
+ : sys-liba-1
+ :
+ {
+ $clone_cfg;
+
+ $* baz '?sys:liba' +{ config.liba.x=false } 2>>EOE != 0
+ error: unable to negotiate configuration for system dependency liba without configuration information
+ info: consider specifying system dependency version that has corresponding available package
+ info: dependent baz has prefer/accept clauses that cannot be evaluated without configuration information
+ EOE
+ }
+
+ : sys-liba-2
+ :
+ {
+ $clone_cfg;
+
+ $* baz '?sys:liba/1.0.0' 2>>~%EOE%;
+ build plan:
+ configure sys:liba/1.0.0 (required by baz)
+ config.liba.x=true (expected by baz)
+ new libb/1.0.0 (required by baz)
+ new baz/1.0.0
+ fetched libb/1.0.0
+ unpacked libb/1.0.0
+ fetched baz/1.0.0
+ unpacked baz/1.0.0
+ configured sys:liba/1.0.0
+ configured libb/1.0.0
+ configured baz/1.0.0
+ %info: .+baz.+ is up to date%
+ updated baz/1.0.0
+ EOE
+
+ $pkg_status -r >>EOO;
+ !baz configured 1.0.0
+ liba configured,system !1.0.0
+ libb configured 1.0.0
+ EOO
+
+ $pkg_drop baz
+ }
+
+ : sys-liba-3
+ :
+ {
+ $clone_cfg;
+
+ $* baz '?sys:liba/1.0.0' +{ config.liba.x=false } 2>>EOE != 0
+ error: unable to negotiate acceptable configuration with dependent baz for dependencies liba
+ info: configuration before negotiation:
+ config.liba.x=false (user configuration)
+ EOE
+ }
+
+ : bar-sys-liba-4
+ :
+ {
+ $clone_cfg;
+
+ $* baz '?sys:liba/1.0.0' +{ config.liba.x=[null] } 2>>EOE != 0
+ <depends-accept-clause>:1: error: invalid bool value: null
+ info: accept condition: ($config.liba.x)
+ info: in depends manifest value of package baz
+ EOE
+ }
+
+ : sys-liba-5
+ :
+ {
+ $clone_cfg;
+
+ $* baz '?sys:liba/1.0.0' +{ config.liba.x=true } 2>>~%EOE%;
+ build plan:
+ configure sys:liba/1.0.0 (required by baz)
+ config.liba.x=true (expected user configuration)
+ new libb/1.0.0 (required by baz)
+ new baz/1.0.0
+ fetched libb/1.0.0
+ unpacked libb/1.0.0
+ fetched baz/1.0.0
+ unpacked baz/1.0.0
+ configured sys:liba/1.0.0
+ configured libb/1.0.0
+ configured baz/1.0.0
+ %info: .+baz.+ is up to date%
+ updated baz/1.0.0
+ EOE
+
+ $pkg_status -r >>EOO;
+ !baz configured 1.0.0
+ liba configured,system !1.0.0
+ libb configured 1.0.0
+ EOO
+
+ $pkg_drop baz
+ }
+ }
+ }
+
+ : proj-require-basics
+ :
+ {
+ +$clone_root_cfg && $rep_add $rep/t13m && $rep_fetch
+
+ : bar
+ :
+ {
+ $clone_cfg;
+
+ $* bar 2>>~%EOE%;
+ build plan:
+ new liba/1.0.0 (required by bar)
+ config.liba.x=true (set by bar)
+ new bar/1.0.0
+ fetched liba/1.0.0
+ unpacked liba/1.0.0
+ fetched bar/1.0.0
+ unpacked bar/1.0.0
+ configured liba/1.0.0
+ configured bar/1.0.0
+ %info: .+bar.+ is up to date%
+ updated bar/1.0.0
+ EOE
+
+ $pkg_status -r >>EOO;
+ !bar configured 1.0.0
+ liba configured 1.0.0
+ EOO
+
+ cat cfg/liba-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.liba.x = true
+ config.liba.y = false
+ config.liba.n = 1024
+ EOO
+
+ $pkg_drop bar
+ }
+
+ : baz
+ :
+ {
+ $clone_cfg;
+
+ $* baz 2>>~%EOE%;
+ build plan:
+ new liba/1.0.0 (required by bar, baz)
+ %(
+ config.liba.y=true (set by baz)
+ config.liba.x=true (set by bar)
+ %|
+ config.liba.x=true (set by bar)
+ config.liba.y=true (set by baz)
+ %)
+ new bar/1.0.0 (required by baz)
+ new baz/1.0.0
+ config.baz.bar=true (set by baz)
+ fetched liba/1.0.0
+ unpacked liba/1.0.0
+ fetched bar/1.0.0
+ unpacked bar/1.0.0
+ fetched baz/1.0.0
+ unpacked baz/1.0.0
+ configured liba/1.0.0
+ configured bar/1.0.0
+ configured baz/1.0.0
+ %info: .+baz.+ is up to date%
+ updated baz/1.0.0
+ EOE
+
+ $pkg_status -r >>EOO;
+ !baz configured 1.0.0
+ bar configured 1.0.0
+ liba configured 1.0.0
+ liba configured 1.0.0
+ EOO
+
+ cat cfg/liba-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.liba.x = true
+ config.liba.y = true
+ config.liba.n = 1024
+ EOO
+
+ cat cfg/baz-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.baz.bar = true
+ EOO
+
+ $pkg_drop baz
+ }
+
+ : biz
+ :
+ {
+ $clone_cfg;
+
+ $* biz 2>>EOE != 0
+ error: configuration variable config.liba.x is not set to true
+ info: config.liba.x set in require clause of dependent biz
+ info: require clause:
+ config.liba.x = false # Error: not true
+ info: in depends manifest value of package biz
+ EOE
+ }
+
+ : bix
+ :
+ {
+ $clone_cfg;
+
+ $* bix 2>>EOE != 0
+ error: configuration variable config.liba.n is not of bool type
+ info: config.liba.n set in require clause of dependent bix
+ info: require clause:
+ config.liba.n = 1 # Error: not bool
+ info: in depends manifest value of package bix
+ EOE
+ }
+
+ : box
+ :
+ {
+ $clone_cfg;
+
+ $* box 2>>EOE != 0
+ error: package liba has no configuration variable config.liba.z
+ info: config.liba.z set in require clause of dependent box
+ info: require clause:
+ config.liba.z = true # Error: no such variable
+ info: in depends manifest value of package box
+ EOE
+ }
+ }
+
+ : proj-reflect-append
+ :
+ {
+ +$clone_root_cfg && $rep_add $rep/t13n && $rep_fetch
+
+ : bar
+ :
+ {
+ $clone_cfg;
+
+ $* bar 2>>~%EOE%;
+ build plan:
+ new liba/1.0.0 (required by bar)
+ new libb/1.0.0 (required by bar)
+ new bar/1.0.0
+ config.bar.libs=liba libb (set by bar)
+ config.bar.x=true (set by bar)
+ config.bar.y=true (set by bar)
+ fetched liba/1.0.0
+ unpacked liba/1.0.0
+ fetched libb/1.0.0
+ unpacked libb/1.0.0
+ fetched bar/1.0.0
+ unpacked bar/1.0.0
+ configured liba/1.0.0
+ configured libb/1.0.0
+ configured bar/1.0.0
+ %info: .+bar.+ is up to date%
+ updated bar/1.0.0
+ EOE
+
+ $pkg_status -r >>EOO;
+ !bar configured 1.0.0
+ liba configured 1.0.0
+ libb configured 1.0.0
+ EOO
+
+ cat cfg/bar-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.bar.x = true
+ config.bar.y = true
+ config.bar.libs = liba libb
+ EOO
+
+ $pkg_drop bar
+ }
+ }
+
+ : proj-reflect-override
+ :
+ {
+ +$clone_root_cfg && $rep_add $rep/t13o && $rep_fetch
+
+ : bar
+ :
+ {
+ $clone_cfg;
+
+ $* bar 2>>~%EOE%;
+ build plan:
+ new liba/1.0.0 (required by bar)
+ new bar/1.0.0
+ config.bar.x=true (set by bar)
+ config.bar.y=true (set by bar)
+ config.bar.z=true (set by bar)
+ fetched liba/1.0.0
+ unpacked liba/1.0.0
+ fetched bar/1.0.0
+ unpacked bar/1.0.0
+ configured liba/1.0.0
+ configured bar/1.0.0
+ %info: .+bar.+ is up to date%
+ updated bar/1.0.0
+ EOE
+
+ $pkg_status -r >>EOO;
+ !bar configured 1.0.0
+ liba configured 1.0.0
+ EOO
+
+ cat cfg/bar-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.bar.x = true
+ config.bar.y = true
+ config.bar.z = true
+ EOO
+
+ $pkg_drop bar
+ }
+
+ : bar-config-1
+ :
+ {
+ $clone_cfg;
+
+ $* config.bar.y=true -- bar 2>>~%EOE%;
+ build plan:
+ new liba/1.0.0 (required by bar)
+ new bar/1.0.0
+ config.bar.y=true (user configuration)
+ config.bar.x=true (set by bar)
+ config.bar.z=true (set by bar)
+ fetched liba/1.0.0
+ unpacked liba/1.0.0
+ fetched bar/1.0.0
+ unpacked bar/1.0.0
+ configured liba/1.0.0
+ configured bar/1.0.0
+ %info: .+bar.+ is up to date%
+ updated bar/1.0.0
+ EOE
+
+ $pkg_status -r >>EOO;
+ !bar configured 1.0.0
+ liba configured 1.0.0
+ EOO
+
+ cat cfg/bar-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.bar.x = true
+ config.bar.y = true
+ config.bar.z = true
+ EOO
+
+ $pkg_drop bar
+ }
+
+ : bar-config-2
+ :
+ {
+ $clone_cfg;
+
+ $* config.bar.y=false -- bar 2>>EOE != 0
+ error: reflect variable config.bar.y overriden by user configuration
+ info: reflect value: config.bar.y=true
+ info: user value: config.bar.y=false
+ info: reflect clause:
+ config.bar.x = true
+ config.bar.y = true
+ if ($config.origin(config.bar.z) != 'override')
+ config.bar.z = true
+ info: in depends manifest value of package bar
+ info: while satisfying bar/1.0.0
+ EOE
+ }
+
+ : bar-config-3
+ :
+ {
+ $clone_cfg;
+
+ $* config.bar.z=false -- bar 2>>~%EOE%;
+ build plan:
+ new liba/1.0.0 (required by bar)
+ new bar/1.0.0
+ config.bar.z=false (user configuration)
+ config.bar.x=true (set by bar)
+ config.bar.y=true (set by bar)
+ fetched liba/1.0.0
+ unpacked liba/1.0.0
+ fetched bar/1.0.0
+ unpacked bar/1.0.0
+ configured liba/1.0.0
+ configured bar/1.0.0
+ %info: .+bar.+ is up to date%
+ updated bar/1.0.0
+ EOE
+
+ $pkg_status -r >>EOO;
+ !bar configured 1.0.0
+ liba configured 1.0.0
+ EOO
+
+ cat cfg/bar-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.bar.x = true
+ config.bar.y = true
+ config.bar.z = false
+ EOO
+
+ $pkg_drop bar
+ }
+
+ : baz
+ :
+ {
+ $clone_cfg;
+
+ $* baz 2>>~%EOE%;
+ build plan:
+ new liba/1.0.0 (required by bar)
+ new bar/1.0.0 (required by baz)
+ config.bar.y=true (set by baz)
+ config.bar.x=true (set by bar)
+ config.bar.z=true (set by bar)
+ new baz/1.0.0
+ fetched liba/1.0.0
+ unpacked liba/1.0.0
+ fetched bar/1.0.0
+ unpacked bar/1.0.0
+ fetched baz/1.0.0
+ unpacked baz/1.0.0
+ configured liba/1.0.0
+ configured bar/1.0.0
+ configured baz/1.0.0
+ %info: .+baz.+ is up to date%
+ updated baz/1.0.0
+ EOE
+
+ $pkg_status -r >>EOO;
+ !baz configured 1.0.0
+ bar configured 1.0.0
+ liba configured 1.0.0
+ EOO
+
+ cat cfg/bar-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.bar.x = true
+ config.bar.y = true
+ config.bar.z = true
+ EOO
+
+ $pkg_drop baz
+ }
+
+ : biz
+ :
+ {
+ $clone_cfg;
+
+ $* biz 2>>EOE != 0
+ error: reflect variable config.bar.y overriden by dependent biz
+ info: reflect value: config.bar.y=true
+ info: dependent value: config.bar.y=false
+ info: reflect clause:
+ config.bar.x = true
+ config.bar.y = true
+ if ($config.origin(config.bar.z) != 'override')
+ config.bar.z = true
+ info: in depends manifest value of package bar
+ info: while satisfying bar/1.0.0
+ EOE
+ }
+
+ : biz-bar
+ :
+ {
+ $clone_cfg;
+
+ $* biz ?bar +{ config.bar.y=false } 2>>EOE != 0
+ error: reflect variable config.bar.y overriden by user configuration
+ info: reflect value: config.bar.y=true
+ info: user value: config.bar.y=false
+ info: reflect clause:
+ config.bar.x = true
+ config.bar.y = true
+ if ($config.origin(config.bar.z) != 'override')
+ config.bar.z = true
+ info: in depends manifest value of package bar
+ info: while satisfying bar/1.0.0
+ EOE
+ }
+
+ : bix
+ :
+ {
+ $clone_cfg;
+
+ $* bix 2>>~%EOE%;
+ build plan:
+ new liba/1.0.0 (required by bar)
+ new bar/1.0.0 (required by bix)
+ config.bar.z=false (set by bix)
+ config.bar.x=true (set by bar)
+ config.bar.y=true (set by bar)
+ new bix/1.0.0
+ fetched liba/1.0.0
+ unpacked liba/1.0.0
+ fetched bar/1.0.0
+ unpacked bar/1.0.0
+ fetched bix/1.0.0
+ unpacked bix/1.0.0
+ configured liba/1.0.0
+ configured bar/1.0.0
+ configured bix/1.0.0
+ %info: .+bix.+ is up to date%
+ updated bix/1.0.0
+ EOE
+
+ $pkg_status -r >>EOO;
+ !bix configured 1.0.0
+ bar configured 1.0.0
+ liba configured 1.0.0
+ EOO
+
+ cat cfg/bar-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.bar.x = true
+ config.bar.y = true
+ config.bar.z = false
+ EOO
+
+ $pkg_drop bix
+ }
+
+ : bix-bar
+ :
+ {
+ $clone_cfg;
+
+ $* bix ?bar +{ config.bar.z=false } 2>>~%EOE%;
+ build plan:
+ new liba/1.0.0 (required by bar)
+ new bar/1.0.0 (required by bix)
+ config.bar.z=false (user configuration)
+ config.bar.x=true (set by bar)
+ config.bar.y=true (set by bar)
+ new bix/1.0.0
+ fetched liba/1.0.0
+ unpacked liba/1.0.0
+ fetched bar/1.0.0
+ unpacked bar/1.0.0
+ fetched bix/1.0.0
+ unpacked bix/1.0.0
+ configured liba/1.0.0
+ configured bar/1.0.0
+ configured bix/1.0.0
+ %info: .+bix.+ is up to date%
+ updated bix/1.0.0
+ EOE
+
+ $pkg_status -r >>EOO;
+ !bix configured 1.0.0
+ bar configured 1.0.0
+ liba configured 1.0.0
+ EOO
+
+ cat cfg/bar-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.bar.x = true
+ config.bar.y = true
+ config.bar.z = false
+ EOO
+
+ $pkg_drop bix
+ }
+ }
+ }
+}
+
+: test-dependency
+:
+{
+ +$clone_cfg
+ +$rep_add $rep/t10 && $rep_fetch
+
+ : simultaneously
+ :
+ {
+ $clone_cfg;
+
+ $* libfoo-bar libfoo-tests 2>>~%EOE%;
+ fetched libfoo-bar/1.0.0
+ unpacked libfoo-bar/1.0.0
+ fetched libfoo-tests/1.0.0
+ unpacked libfoo-tests/1.0.0
+ configured libfoo-bar/1.0.0
+ configured libfoo-tests/1.0.0
+ %info: .+libfoo-bar-1.0.0.+ is up to date%
+ %info: .+libfoo-tests-1.0.0.+ is up to date%
+ updated libfoo-bar/1.0.0
+ updated libfoo-tests/1.0.0
+ EOE
+
+ cat cfg/libfoo-tests-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.libfoo_tests.test = libfoo-bar
+ %.*
+ EOO
+
+ $pkg_drop libfoo-bar libfoo-tests
+ }
+
+ : sequentially
+ :
+ {
+ $clone_cfg;
+
+ $* libfoo-baz 2>>~%EOE%;
+ fetched libfoo-baz/1.0.0
+ unpacked libfoo-baz/1.0.0
+ configured libfoo-baz/1.0.0
+ %info: .+libfoo-baz-1.0.0.+ is up to date%
+ updated libfoo-baz/1.0.0
+ EOE
+
+ $* libfoo-tests 2>>~%EOE%;
+ fetched libfoo-tests/1.0.0
+ unpacked libfoo-tests/1.0.0
+ configured libfoo-tests/1.0.0
+ %info: .+libfoo-tests-1.0.0.+ is up to date%
+ updated libfoo-tests/1.0.0
+ EOE
+
+ cat cfg/libfoo-tests-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.libfoo_tests.test = libfoo-baz
+ %.*
+ EOO
+
+ $pkg_drop libfoo-baz libfoo-tests
+ }
+
+ : tests-only
+ :
+ {
+ $clone_cfg;
+
+ $* libfoo-tests 2>>~%EOE% != 0
+ error: unable to select dependency alternative for package libfoo-tests/1.0.0
+ info: explicitly specify dependency packages to manually select the alternative
+ info: alternative: libfoo-bar
+ info: alternative: libfoo-baz
+ info: while satisfying libfoo-tests/1.0.0
+ EOE
+ }
+
+ : reflection-foo
+ :
+ {
+ $clone_cfg;
+
+ $* libbar-tests 2>>EOE != 0;
+ error: unable to select dependency alternative for package libbar-tests/1.0.0
+ info: explicitly specify dependency packages to manually select the alternative
+ info: alternative: libbar-baz
+ info: alternative: libbar-foo
+ info: while satisfying libbar-tests/1.0.0
+ EOE
+
+ $* libbar-foo libbar-tests 2>>~%EOE%;
+ fetched libbar-foo/1.0.0
+ unpacked libbar-foo/1.0.0
+ fetched libbar-tests/1.0.0
+ unpacked libbar-tests/1.0.0
+ configured libbar-foo/1.0.0
+ configured libbar-tests/1.0.0
+ %info: .+libbar-foo-1.0.0.+ is up to date%
+ %info: .+libbar-tests-1.0.0.+ is up to date%
+ updated libbar-foo/1.0.0
+ updated libbar-tests/1.0.0
+ EOE
+
+ cat cfg/libbar-tests-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.libbar_tests.test = libbar-foo
+ %.*
+ EOO
+
+ $* libbar-baz 2>>~%EOE%;
+ fetched libbar-baz/1.0.0
+ unpacked libbar-baz/1.0.0
+ configured libbar-baz/1.0.0
+ %info: .+libbar-baz-1.0.0.+ is up to date%
+ updated libbar-baz/1.0.0
+ EOE
+
+ cat cfg/libbar-tests-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.libbar_tests.test = libbar-foo
+ %.*
+ EOO
+
+ $pkg_status -r >>EOO;
+ !libbar-foo configured 1.0.0
+ !libbar-tests configured 1.0.0
+ !libbar-foo configured 1.0.0
+ !libbar-baz configured 1.0.0
+ EOO
+
+ $pkg_drop libbar-foo libbar-baz libbar-tests
+ }
+
+ : reflection-baz
+ :
+ {
+ $clone_cfg;
+
+ $* libbar-baz libbar-tests 2>>~%EOE%;
+ fetched libbar-baz/1.0.0
+ unpacked libbar-baz/1.0.0
+ fetched libbar-tests/1.0.0
+ unpacked libbar-tests/1.0.0
+ configured libbar-baz/1.0.0
+ configured libbar-tests/1.0.0
+ %info: .+libbar-baz-1.0.0.+ is up to date%
+ %info: .+libbar-tests-1.0.0.+ is up to date%
+ updated libbar-baz/1.0.0
+ updated libbar-tests/1.0.0
+ EOE
+
+ cat cfg/libbar-tests-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.libbar_tests.test = libbar-baz
+ %.*
+ EOO
+
+ $* libbar-foo 2>>~%EOE%;
+ fetched libbar-foo/1.0.0
+ unpacked libbar-foo/1.0.0
+ configured libbar-foo/1.0.0
+ %info: .+libbar-foo-1.0.0.+ is up to date%
+ updated libbar-foo/1.0.0
+ EOE
+
+ cat cfg/libbar-tests-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.libbar_tests.test = libbar-baz
+ %.*
+ EOO
+
+ $pkg_status -r >>EOO;
+ !libbar-baz configured 1.0.0
+ !libbar-tests configured 1.0.0
+ !libbar-baz configured 1.0.0
+ !libbar-foo configured 1.0.0
+ EOO
+
+ $pkg_drop libbar-baz libbar-foo libbar-tests
+ }
+
+ : reflection-both
+ :
+ {
+ $clone_cfg;
+
+ $* libbar-foo libbar-baz libbar-tests 2>!;
+
+ cat cfg/libbar-tests-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.libbar_tests.test = libbar-baz
+ %.*
+ EOO
+
+ $pkg_status -r >>EOO;
+ !libbar-foo configured 1.0.0
+ !libbar-baz configured 1.0.0
+ !libbar-tests configured 1.0.0
+ !libbar-baz configured 1.0.0
+ EOO
+
+ $pkg_drop libbar-foo libbar-baz libbar-tests
+ }
+
+ : configure-explicit
+ :
+ {
+ $clone_cfg;
+
+ $* --yes "config.libbar_tests.test = libbar-foo libbar-baz" -- libbar-tests 2>!;
+
+ cat cfg/libbar-tests-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.libbar_tests.test = libbar-foo libbar-baz
+ %.*
+ EOO
+
+ $pkg_status -r >>EOO;
+ !libbar-tests configured 1.0.0
+ libbar-baz configured 1.0.0
+ libbar-foo configured 1.0.0
+ EOO
+
+ $pkg_drop libbar-tests
+ }
}
: dependent
@@ -2853,7 +26626,7 @@ test.options += --no-progress
{
test.arguments += --yes
- +$cfg_create cxx $config_cxx -d cfg 2>- &cfg/***
+ +$cfg_create cxx $config_cxx -d cfg &cfg/***
: direct
:
@@ -2872,6 +26645,27 @@ test.options += --no-progress
EOE
}
+ : direct-config
+ :
+ : As above but with a linked configuration.
+ :
+ {
+ $clone_cfg;
+ $cfg_create -d cfg2 &cfg2/***;
+ $cfg_link -d cfg cfg2;
+
+ cp -r $src/libfoo-1.1.0/ libfoo;
+ echo "depends: libfoo" >+ libfoo/manifest;
+ $rep_add libfoo --type dir -d cfg2;
+
+ $rep_fetch -d cfg2;
+
+ $* libfoo +{ --config-id 1 } 2>>~%EOE% != 0
+ %error: dependency cycle detected involving package libfoo \[cfg2.\]%
+ % info: libfoo/1.1.0 \[cfg2.\] depends on libfoo/1.1.0 \[cfg2.\]%
+ EOE
+ }
+
: indirect
:
{
@@ -2950,7 +26744,7 @@ test.options += --no-progress
{
test.arguments += --yes
- +$cfg_create cxx $config_cxx -d cfg 2>- &cfg/***
+ +$cfg_create cxx $config_cxx -d cfg &cfg/***
+cp -r $src/libhello-1.0.0 ./libhello
+$rep_add libhello --type dir
+$rep_fetch
@@ -3092,7 +26886,7 @@ test.options += --no-progress
configured libfix/0.0.3
EOE
- clone_cfg = cp --no-cleanup -r ../cfg ./ &cfg/***
+ clone_cfg = [cmdline] cp --no-cleanup -r ../cfg ./ &cfg/***
: explicitly
:
@@ -3194,6 +26988,32 @@ test.options += --no-progress
EOE
}
+ : unavailable-masked
+ :
+ : As above but using --mask-repository instead of rep-remove.
+ :
+ {
+ $clone_cfg;
+
+ $* '?sys:libbaz/0.0.3' 2>>EOE;
+ disfigured libbox/0.0.1
+ disfigured libfix/0.0.3
+ disfigured libbaz/0.0.3
+ disfigured libfoo/1.0.0
+ purged libfoo/1.0.0
+ purged libbaz/0.0.3
+ configured sys:libbaz/0.0.3
+ configured libfix/0.0.3
+ configured libbox/0.0.1
+ EOE
+
+ $rep_fetch $rep/t0b;
+
+ $* --mask-repository $rep/t0c ?libbaz --patch --yes 2>>EOE != 0
+ error: patch version for sys:libbaz/0.0.3 is not available from its dependents' repositories
+ EOE
+ }
+
-$pkg_drop libbox libfix libbaz libfoo
}
@@ -3215,7 +27035,7 @@ test.options += --no-progress
configured libbaz/0.0.3
EOE
- clone_cfg = cp --no-cleanup -r ../cfg ./ &cfg/***
+ clone_cfg = [cmdline] cp --no-cleanup -r ../cfg ./ &cfg/***
: explicit
:
@@ -3426,6 +27246,30 @@ test.options += --no-progress
$pkg_drop libfoo
}
+ : no-patch-config
+ :
+ : As above but with a linked configuration.
+ :
+ {
+ $clone_root_cfg;
+ $cfg_create -d cfg2 &cfg2/***;
+ $cfg_link -d cfg cfg2;
+
+ $* "libfoo@$rep/t1" +{ --config-id 1 } --patch 2>>~%EOE%;
+ %.+
+ %configured libfoo/1.0.0 \[cfg2.\]%
+ %info: .+ is up to date%
+ %updated libfoo/1.0.0 \[cfg2.\]%
+ EOE
+
+ $* "libfoo@$rep/t3" +{ --config-id 1 } --patch 2>>~%EOE% != 0;
+ %.+
+ %error: patch version for libfoo/1.0.0 \[cfg2.\] is not found in pkg:build2.org/pkg-build/t3%
+ EOE
+
+ $pkg_drop -d cfg2 libfoo
+ }
+
: package-in-complement
:
{
@@ -3523,12 +27367,117 @@ test.options += --no-progress
$pkg_disfigure libfoo 2>'disfigured libfoo/1.1.0';
$pkg_purge libfoo 2>'purged libfoo/1.1.0'
}
+
+ : preferred-config
+ :
+ : As above but with a linked configuration.
+ :
+ {
+ $clone_root_cfg;
+
+ $cfg_create -d cfg2 &cfg2/***;
+ $cfg_link -d cfg cfg2;
+
+ $rep_fetch -d cfg2 $rep/t4a;
+ $rep_add -d cfg2 $src/libfoo-1.1.0 --type dir;
+
+ d = $canonicalize([dir_path] $src/libfoo-1.1.0);
+
+ if! $posix
+ d = [dir_path] $string.lcase($d)
+ end;
+
+ $rep_fetch -d cfg2 "dir:$d";
+
+ $* libfoo +{ --config-id 1 } 2>>~%EOE%;
+ %using libfoo/1.1.0 \[cfg2.\] \(external\)%
+ %configured libfoo/1.1.0 \[cfg2.\]%
+ %info: .+ is up to date%
+ %updated libfoo/1.1.0 \[cfg2.\]%
+ EOE
+
+ $pkg_disfigure -d cfg2 libfoo 2>'disfigured libfoo/1.1.0';
+ $pkg_purge -d cfg2 libfoo 2>'purged libfoo/1.1.0'
+ }
+}
+
+: disfigure
+:
+{
+ +$cfg_create cxx $config_cxx -d cfg &cfg/***
+
+ # Build libhello as an external package.
+ #
+ +cp -r $src/libhello-1.0.0 ./libhello
+ +cat <<EOI >+libhello/build/root.build
+ config [bool] config.libhello.develop ?= false
+ config [bool] config.libhello.extras ?= false
+ if ($build.mode != 'skeleton')
+ text "develop=$config.libhello.develop"
+ EOI
+ +$rep_add libhello --type dir
+ +$rep_fetch
+ +$* config.libhello.develop=true -- libhello 2>!
+
+ : without
+ :
+ : Make sure without --disfigure old configuration is preserved.
+ :
+ {
+ $clone_cfg;
+ $* libhello 2>!; # Update after output directory change.
+
+ # Specify a configuration variable to trigger reconfiguration.
+ #
+ $* config.libhello.extras=true -- libhello 2>>~%EOE%
+ disfigured libhello/1.0.0
+ %.*: develop=true%
+ configured libhello/1.0.0
+ %.*: develop=true%
+ %(mkdir|c\+\+|ld|ar) .+%{8}
+ updated libhello/1.0.0
+ EOE
+ }
+
+ : alone
+ :
+ : Make sure --disfigure alone causes reconfiguration.
+ :
+ {
+ $clone_cfg;
+ $* libhello 2>!; # Update after output directory change.
+
+ $* --disfigure libhello 2>>~%EOE%
+ disfigured libhello/1.0.0
+ %.*: develop=false%
+ configured libhello/1.0.0
+ %.*: develop=false%
+ %(mkdir|c\+\+|ld|ar) .+%{8}
+ updated libhello/1.0.0
+ EOE
+ }
+
+ : with-keep-out
+ :
+ {
+ $clone_cfg;
+ $* libhello 2>!; # Update after output directory change.
+
+ $* --disfigure --keep-out libhello 2>>~%EOE%
+ disfigured libhello/1.0.0
+ %.*: develop=false%
+ configured libhello/1.0.0
+ %.*: develop=false%
+ %info: .+ is up to date%
+ updated libhello/1.0.0
+ EOE
+ }
}
: keep-out
:
{
- +$cfg_create cxx $config_cxx -d cfg 2>- &cfg/***
+ +$cfg_create cxx $config_cxx -d cfg &cfg/***
# Build libhello as an external package.
#
@@ -3641,12 +27590,12 @@ test.options += --no-progress
: Test that libhello is fully rebuilt, as the resulted package is not
: external.
{
- clone_cfg = cp -pr --no-cleanup ../../cfg ./
+ clone_cfg = [cmdline] cp -pr --no-cleanup ../../cfg ./
# Distribute using the dedicated configuration to avoid the 'c and cxx
# module toolchain pattern mismatch' warning.
#
- +$cfg_create cxx $config_cxx -d cfg 2>- &cfg/***
+ +$cfg_create cxx $config_cxx -d cfg &cfg/***
+$build 'dist(../../libhello/@./cfg/libhello/)' \
config.dist.root=./ \
@@ -3867,6 +27816,38 @@ else
$pkg_purge style-basic
}
+ : prerequisite-repo-config
+ :
+ : As above but with a linked configuration.
+ :
+ {
+ $clone_root_cfg;
+ $rep_add "$rep0/libbar.git#master";
+ $rep_fetch &cfg/.bpkg/repos/*/***;
+
+ $cfg_create -d cfg2 &cfg2/***;
+ $cfg_link -d cfg cfg2;
+
+ $rep_add -d cfg2 "$rep0/style-basic.git#master";
+ $rep_fetch -d cfg2 &cfg/.bpkg/repos/*/***;
+
+ $* libmbar style-basic +{ --config-id 1 } 2>>~%EOE%;
+ %checked out style-basic/.+ \[cfg2.\]%
+ checked out libmbar/1.0.0
+ %configured style-basic/.+ \[cfg2.\]%
+ configured libmbar/1.0.0
+ %info: .+ is up to date%{2}
+ %updated style-basic/.+ \[cfg2.\]%
+ updated libmbar/1.0.0
+ EOE
+
+ $pkg_disfigure libmbar;
+ $pkg_disfigure -d cfg2 style-basic;
+
+ $pkg_purge libmbar;
+ $pkg_purge -d cfg2 style-basic
+ }
+
: no-prerequisite-repos
:
: Dependent package repository has no prerequisites nor complements. Its
@@ -3898,7 +27879,7 @@ else
: build-unpacked
:
: Test that the unpacked external package is properly built for the first
- : time and is not rebuilt afterwards via the directory argument.
+ : time and is replaced afterwards via the directory argument.
:
if! $remote
{
@@ -3914,6 +27895,9 @@ else
EOE
$* $d 2>>~%EOE%;
+ %disfigured style-basic/1\.1\.0-a\.0\.\d+\..+%
+ %using style-basic/1\.1\.0-a\.0\.\d+\..+%
+ %configured style-basic/1\.1\.0-a\.0\.\d+\..+%
%info: .+ is up to date%
%updated style-basic/1\.1\.0-a\.0\.\d+\..+%
EOE
@@ -3921,27 +27905,6449 @@ else
$pkg_disfigure style-basic
}
+ : build-unpacked-config
+ :
+ : As above but with a linked configuration.
+ :
+ if! $remote
+ {
+ $clone_root_cfg;
+
+ $cfg_create -d cfg2 &cfg2/***;
+ $cfg_link -d cfg cfg2;
+
+ d = $canonicalize([dir_path] $out_git/state0/style-basic.git);
+ $pkg_unpack -d cfg2 -e $d;
+
+ $* style-basic +{ --config-id 1 } 2>>~%EOE%;
+ %configured style-basic/1\.1\.0-a\.0\.\d+\..+%
+ %info: .+ is up to date%
+ %updated style-basic/1\.1\.0-a\.0\.\d+\..+%
+ EOE
+
+ $* $d +{ --config-id 1 } 2>>~%EOE%;
+ %disfigured style-basic/1\.1\.0-a\.0\.\d+\..+%
+ %using style-basic/1\.1\.0-a\.0\.\d+\..+%
+ %configured style-basic/1\.1\.0-a\.0\.\d+\..+%
+ %info: .+ is up to date%
+ %updated style-basic/1\.1\.0-a\.0\.\d+\..+%
+ EOE
+
+ $pkg_disfigure -d cfg2 style-basic
+ }
+
: checkout-root
:
{
$clone_root_cfg;
$rep_fetch "$rep0/libbar.git#master" &cfg/.bpkg/repos/*/***;
- $* libmbar --checkout-root $~ --checkout-purge 2>>~%EOE%;
+ # While at it, test the package checkout cache (thus build multiple
+ # packages from the same git repository).
+ #
+ $* libbar libmbar --checkout-root $~ --checkout-purge 2>>~%EOE%;
%checked out style-basic/.+%
+ checked out libbar/1.0.0+1
checked out libmbar/1.0.0
%configured style-basic/.+%
+ configured libbar/1.0.0+1
configured libmbar/1.0.0
- %info: .+ is up to date%
+ %info: .+ is up to date%{2}
+ updated libbar/1.0.0+1
updated libmbar/1.0.0
EOE
test -d libmbar-1.0.0;
- $pkg_disfigure libmbar;
- $pkg_disfigure style-basic;
+ $pkg_drop libbar libmbar
+ }
+}
- $pkg_purge libmbar;
- $pkg_purge style-basic
+: linked-configs
+:
+{
+ : 2-configs
+ :
+ {
+ +$clone_root_cfg && $rep_add $rep/t4c && $rep_fetch
+
+ : invalid-cfg
+ :
+ {
+ +$clone_cfg
+
+ : config-id
+ :
+ {
+ $clone_cfg;
+ $* libbaz --config-id 1 2>>/EOE != 0
+ error: no configuration with id 1 is linked with cfg/
+ EOE
+ }
+
+ : config-name
+ :
+ {
+ $clone_cfg;
+ $* libbaz --config-name foo 2>>/EOE != 0
+ error: no configuration with name 'foo' is linked with cfg/
+ EOE
+ }
+
+ : config-uuid
+ :
+ {
+ $clone_cfg;
+ $* libbaz --config-uuid '18f48b4b-b5d9-4712-b98c-1930df1c4228' 2>>/EOE != 0
+ error: no configuration with uuid 18f48b4b-b5d9-4712-b98c-1930df1c4228 is linked with cfg/
+ EOE
+ }
+ }
+
+ : baz
+ :
+ {
+ $clone_cfg;
+ $cfg_create -d cfg-bar-foo &cfg-bar-foo/***;
+ $cfg_link -d cfg cfg-bar-foo;
+
+ $* libbaz ?libbar +{ --config-id 1 } ?libfoo +{ --config-id 1 } 2>>~%EOE%;
+ %fetched libfoo/1.1.0 \[cfg-bar-foo.\]%
+ %unpacked libfoo/1.1.0 \[cfg-bar-foo.\]%
+ %fetched libbar/1.1.0 \[cfg-bar-foo.\]%
+ %unpacked libbar/1.1.0 \[cfg-bar-foo.\]%
+ fetched libbaz/1.1.0
+ unpacked libbaz/1.1.0
+ %configured libfoo/1.1.0 \[cfg-bar-foo.\]%
+ %configured libbar/1.1.0 \[cfg-bar-foo.\]%
+ configured libbaz/1.1.0
+ %info: cfg[^-].+libbaz-1.1.0.+ is up to date%
+ updated libbaz/1.1.0
+ EOE
+
+ $pkg_status -r libbaz >>/EOO;
+ !libbaz configured 1.1.0
+ libbar [cfg-bar-foo/] configured 1.1.0
+ libfoo [cfg-bar-foo/] configured 1.1.0
+ libfoo [cfg-bar-foo/] configured 1.1.0
+ EOO
+
+ $pkg_status -d cfg-bar-foo -r libbar >>/EOO;
+ libbar configured 1.1.0
+ libfoo configured 1.1.0
+ EOO
+
+ $pkg_status -d cfg-bar-foo libfoo >'libfoo configured 1.1.0';
+
+ $pkg_drop libbaz;
+
+ $pkg_status libbaz libbar libfoo >>/EOO
+ libbaz available 1.1.0
+ libbar available [1.1.0]
+ libfoo available [1.1.0] 1.0.0
+ EOO
+ }
+ }
+
+ : 3-configs
+ :
+ {
+ +$clone_root_cfg && $rep_add $rep/t4c && $rep_fetch
+
+ : baz
+ :
+ {
+ uuid = '28f48b4b-b5d9-4712-b98c-1930df1c4228';
+
+ $clone_cfg;
+ $cfg_create -d cfg-bar &cfg-bar/***;
+ $cfg_create -d cfg-foo --uuid $uuid &cfg-foo/***;
+
+ $cfg_link -d cfg cfg-bar;
+ $cfg_link -d cfg-bar cfg-foo;
+
+ test.arguments = $regex.apply($test.arguments, cfg, cfg-bar);
+
+ $* libbar@"$rep/t4b" ?libfoo +{ --config-id 2 } --trust-yes 2>>~%EOE%;
+ added pkg:build2.org/pkg-build/t4b
+ fetching pkg:build2.org/pkg-build/t4b
+ fetching pkg:build2.org/pkg-build/t4a (prerequisite of pkg:build2.org/pkg-build/t4b)
+ %fetched libfoo/1.1.0 \[cfg-foo.\]%
+ %unpacked libfoo/1.1.0 \[cfg-foo.\]%
+ fetched libbar/1.1.0
+ unpacked libbar/1.1.0
+ %configured libfoo/1.1.0 \[cfg-foo.\]%
+ configured libbar/1.1.0
+ %info: cfg-bar.+libbar-1.1.0.+ is up to date%
+ updated libbar/1.1.0
+ EOE
+
+ test.arguments = $regex.apply($test.arguments, cfg-bar, cfg);
+
+ $* libfoo --config-uuid $uuid 2>>~%EOE%;
+ %info: cfg-foo.+libfoo-1.1.0.+ is up to date%
+ %updated libfoo/1.1.0 \[cfg-foo.\]%
+ EOE
+
+ $* libbaz 2>>~%EOE%;
+ fetched libbaz/1.1.0
+ unpacked libbaz/1.1.0
+ configured libbaz/1.1.0
+ %info: cfg[^-].+libbaz-1.1.0.+ is up to date%
+ updated libbaz/1.1.0
+ EOE
+
+ $pkg_status --link -r >>/EOO;
+ !libbaz configured 1.1.0
+ !libbar [cfg-bar/] configured !1.1.0
+ !libfoo [cfg-foo/] configured 1.1.0
+ !libfoo [cfg-foo/] configured 1.1.0
+ !libbar [cfg-bar/] configured !1.1.0
+ !libfoo [cfg-foo/] configured 1.1.0
+ !libfoo [cfg-foo/] configured 1.1.0
+ EOO
+
+ $pkg_drop libbaz;
+
+ $* ?libfoo --config-uuid $uuid; # Unhold.
+
+ $pkg_status libbaz libbar libfoo >>/EOO;
+ libbaz available 1.1.0
+ !libbar [cfg-bar/] configured !1.1.0
+ libfoo [cfg-foo/] configured 1.1.0
+ EOO
+
+ $* ?libbar +{ --config-id 1 } <'y' 2>>~%EOE%;
+ % drop libfoo/1.1.0 \[cfg-foo.\] \(unused\)%
+ % drop libbar/1.1.0 \[cfg-bar.\] \(unused\)%
+ %continue\? \[Y/n\] disfigured libbar/1.1.0 \[cfg-bar.\]%
+ %disfigured libfoo/1.1.0 \[cfg-foo.\]%
+ %purged libfoo/1.1.0 \[cfg-foo.\]%
+ %purged libbar/1.1.0 \[cfg-bar.\]%
+ EOE
+
+ $pkg_status libbar libfoo >>/EOO
+ libbar available [1.1.0]
+ libfoo available [1.1.0] 1.0.0
+ EOO
+ }
+ }
+
+ : buildtime-dep
+ :
+ {
+ +$clone_root_cfg && $rep_add $rep/t7a && $rep_fetch
+
+ : external-config
+ :
+ {
+ +$clone_cfg
+ +$cfg_create -d cfg2 --type host --name cfg2 &cfg2/***
+ +$cfg_link -d cfg cfg2
+
+ : downgrade-dependency
+ :
+ {
+ $clone_cfg;
+ cp -pr ../cfg2 ./;
+
+ $* libbar <'y' 2>>~%EOE%;
+ % new libbuild2-bar/1.0.0 \[cfg2..bpkg.build2.\] \(required by foo \[cfg2.\]\)%
+ % new libbaz/1.0.0 \[cfg2.\] \(required by foo \[cfg2.\]\)%
+ % new foo/1.0.0 \[cfg2.\] \(required by libbar\)%
+ % new libbaz/1.0.0 \(required by libbar\)%
+ new libbar/1.0.0
+ %continue\? \[Y/n\] fetched libbuild2-bar/1.0.0 \[cfg2..bpkg.build2.\]%
+ %unpacked libbuild2-bar/1.0.0 \[cfg2..bpkg.build2.\]%
+ %fetched libbaz/1.0.0 \[cfg2.\]%
+ %unpacked libbaz/1.0.0 \[cfg2.\]%
+ %fetched foo/1.0.0 \[cfg2.\]%
+ %unpacked foo/1.0.0 \[cfg2.\]%
+ fetched libbaz/1.0.0
+ unpacked libbaz/1.0.0
+ fetched libbar/1.0.0
+ unpacked libbar/1.0.0
+ %configured libbuild2-bar/1.0.0 \[cfg2..bpkg.build2.\]%
+ %configured libbaz/1.0.0 \[cfg2.\]%
+ %configured foo/1.0.0 \[cfg2.\]%
+ configured libbaz/1.0.0
+ configured libbar/1.0.0
+ %info: .+libbar-1.0.0.+ is up to date%
+ updated libbar/1.0.0
+ EOE
+
+ $pkg_status -r libbar >>/EOO;
+ !libbar configured 1.0.0
+ foo [cfg2/] configured 1.0.0
+ libbaz [cfg2/] configured 1.0.0
+ libbuild2-bar [cfg2/.bpkg/build2/] configured 1.0.0
+ libbaz configured 1.0.0
+ EOO
+
+ # While at it, make sure that from several available host
+ # configurations the selected package configuration is chosen and we
+ # don't fail with the 'multiple possible host configurations'
+ # diagnostics.
+ #
+ $cfg_create -d cfg3 --type host &cfg3/***;
+ $cfg_link -d cfg cfg3;
+
+ $rep_add $rep/t7b && $rep_fetch;
+
+ $* libbar <'y' 2>>~%EOE%;
+ % drop libbuild2-bar/1.0.0 \[cfg2..bpkg.build2.\] \(unused\)%
+ % upgrade libbaz/1.1.0 \[cfg2.\] \(required by foo \[cfg2.\]\)%
+ % upgrade foo/1.1.0 \[cfg2.\] \(required by libbar\)%
+ upgrade libbar/1.1.0
+ %continue\? \[Y/n\] disfigured libbar/1.0.0%
+ %disfigured foo/1.0.0 \[cfg2.\]%
+ %disfigured libbaz/1.0.0 \[cfg2.\]%
+ %disfigured libbuild2-bar/1.0.0 \[cfg2..bpkg.build2.\]%
+ %purged libbuild2-bar/1.0.0 \[cfg2..bpkg.build2.\]%
+ %fetched libbaz/1.1.0 \[cfg2.\]%
+ %unpacked libbaz/1.1.0 \[cfg2.\]%
+ %fetched foo/1.1.0 \[cfg2.\]%
+ %unpacked foo/1.1.0 \[cfg2.\]%
+ fetched libbar/1.1.0
+ unpacked libbar/1.1.0
+ %configured libbaz/1.1.0 \[cfg2.\]%
+ %configured foo/1.1.0 \[cfg2.\]%
+ configured libbar/1.1.0
+ %info: .+libbar-1.1.0.+ is up to date%
+ updated libbar/1.1.0
+ EOE
+
+ $pkg_status -r libbar >>/EOO;
+ !libbar configured 1.1.0
+ foo [cfg2/] configured 1.1.0
+ libbaz [cfg2/] configured 1.1.0
+ libbaz configured 1.0.0 available 1.1.0
+ EOO
+
+ $* libbar/1.0.0 ?foo/1.0.0 +{ --config-name cfg2 } \
+ ?libbaz/1.0.0 +{ --config-id 1 } <'y' &cfg2/.bpkg/build2/*** 2>>~%EOE%;
+ % new libbuild2-bar/1.0.0 \[cfg2..bpkg.build2.\] \(required by foo \[cfg2.\]\)%
+ % downgrade libbaz/1.0.0 \[cfg2.\]%
+ % downgrade foo/1.0.0 \[cfg2.\]%
+ downgrade libbar/1.0.0
+ continue? [Y/n] disfigured libbar/1.1.0
+ %disfigured foo/1.1.0 \[cfg2.\]%
+ %disfigured libbaz/1.1.0 \[cfg2.\]%
+ %fetched libbuild2-bar/1.0.0 \[cfg2..bpkg.build2.\]%
+ %unpacked libbuild2-bar/1.0.0 \[cfg2..bpkg.build2.\]%
+ %fetched libbaz/1.0.0 \[cfg2.\]%
+ %unpacked libbaz/1.0.0 \[cfg2.\]%
+ %fetched foo/1.0.0 \[cfg2.\]%
+ %unpacked foo/1.0.0 \[cfg2.\]%
+ fetched libbar/1.0.0
+ unpacked libbar/1.0.0
+ %configured libbuild2-bar/1.0.0 \[cfg2..bpkg.build2.\]%
+ %configured libbaz/1.0.0 \[cfg2.\]%
+ %configured foo/1.0.0 \[cfg2.\]%
+ configured libbar/1.0.0
+ %info: cfg.+libbar-1.0.0.+ is up to date%
+ updated libbar/1.0.0
+ EOE
+
+ $pkg_status -r libbar >>/EOO;
+ !libbar configured !1.0.0 available 1.1.0
+ foo [cfg2/] configured !1.0.0 available 1.1.0
+ libbaz [cfg2/] configured !1.0.0 available 1.1.0
+ libbuild2-bar [cfg2/.bpkg/build2/] configured 1.0.0
+ libbaz configured 1.0.0 available 1.1.0
+ EOO
+
+ $pkg_drop libbar libbaz
+ }
+
+ : resolve-host-config
+ :
+ {
+ $clone_cfg;
+ cp -pr ../cfg2 ./;
+
+ $cfg_create -d cfg3 --type host &cfg3/***;
+ $cfg_link -d cfg cfg3;
+
+ $* libbar 2>>/~%EOE% != 0;
+ error: multiple possible host configurations for build-time dependency (foo ^1.0.0)
+ info: cfg2/
+ info: cfg3/
+ info: use --config-* to select the configuration
+ %info: while satisfying libbar.1.0.0%
+ EOE
+
+ $* libbar ?foo +{ --config-id 2 } --yes 2>!;
+
+ $pkg_status -r libbar >>/EOO;
+ !libbar configured 1.0.0
+ foo [cfg3/] configured 1.0.0
+ libbaz [cfg3/] configured 1.0.0
+ libbuild2-bar [cfg3/.bpkg/build2/] configured 1.0.0
+ libbaz configured 1.0.0
+ EOO
+
+ $pkg_drop libbar
+ }
+
+ : 3-configs
+ :
+ {
+ $clone_cfg;
+ cp -pr ../cfg2 ./;
+
+ $cfg_create -d cfg3 --type host &cfg3/***;
+ $cfg_link -d cfg2 cfg3;
+
+ $rep_add -d cfg3 $rep/t7a && $rep_fetch -d cfg3;
+
+ test.arguments = $regex.apply($test.arguments, cfg, cfg2);
+
+ $* libbaz +{ --config-id 2 } 2>!;
+
+ test.arguments = $regex.apply($test.arguments, cfg2, cfg);
+
+ $* libbar --yes &cfg2/.bpkg/build2/*** 2>>~%EOE%;
+ %fetched libbuild2-bar/1.0.0 \[cfg2..bpkg.build2.\]%
+ %unpacked libbuild2-bar/1.0.0 \[cfg2..bpkg.build2.\]%
+ %fetched foo/1.0.0 \[cfg2.\]%
+ %unpacked foo/1.0.0 \[cfg2.\]%
+ fetched libbaz/1.0.0
+ unpacked libbaz/1.0.0
+ fetched libbar/1.0.0
+ unpacked libbar/1.0.0
+ %configured libbuild2-bar/1.0.0 \[cfg2..bpkg.build2.\]%
+ %configured foo/1.0.0 \[cfg2.\]%
+ configured libbaz/1.0.0
+ configured libbar/1.0.0
+ %info: .+libbar-1.0.0.+ is up to date%
+ updated libbar/1.0.0
+ EOE
+
+ $pkg_status -r libbar >>/EOO;
+ !libbar configured 1.0.0
+ foo [cfg2/] configured 1.0.0
+ !libbaz [cfg3/] configured 1.0.0
+ libbuild2-bar [cfg2/.bpkg/build2/] configured 1.0.0
+ libbaz configured 1.0.0
+ EOO
+
+ $pkg_drop libbar
+ }
+ }
+
+ : private-config
+ :
+ {
+ $clone_cfg;
+
+ $* libbar <'y' 2>>~%EOE% &cfg/.bpkg/host/*** &cfg/.bpkg/build2/***;
+ % new libbuild2-bar/1.0.0 \[cfg..bpkg.build2.\] \(required by foo \[cfg..bpkg.host.\]\)%
+ % new libbaz/1.0.0 \[cfg..bpkg.host.\] \(required by foo \[cfg..bpkg.host.\]\)%
+ % new foo/1.0.0 \[cfg..bpkg.host.\] \(required by libbar\)%
+ % new libbaz/1.0.0 \(required by libbar\)%
+ new libbar/1.0.0
+ %continue\? \[Y/n\] fetched libbuild2-bar/1.0.0 \[cfg..bpkg.build2.\]%
+ %unpacked libbuild2-bar/1.0.0 \[cfg..bpkg.build2.\]%
+ %fetched libbaz/1.0.0 \[cfg..bpkg.host.\]%
+ %unpacked libbaz/1.0.0 \[cfg..bpkg.host.\]%
+ %fetched foo/1.0.0 \[cfg..bpkg.host.\]%
+ %unpacked foo/1.0.0 \[cfg..bpkg.host.\]%
+ fetched libbaz/1.0.0
+ unpacked libbaz/1.0.0
+ fetched libbar/1.0.0
+ unpacked libbar/1.0.0
+ %configured libbuild2-bar/1.0.0 \[cfg..bpkg.build2.\]%
+ %configured libbaz/1.0.0 \[cfg..bpkg.host.\]%
+ %configured foo/1.0.0 \[cfg..bpkg.host.\]%
+ configured libbaz/1.0.0
+ configured libbar/1.0.0
+ %info: .+libbar-1.0.0.+ is up to date%
+ updated libbar/1.0.0
+ EOE
+
+ $pkg_status -r libbar >>/EOO;
+ !libbar configured 1.0.0
+ foo [cfg/.bpkg/host/] configured 1.0.0
+ libbaz [cfg/.bpkg/host/] configured 1.0.0
+ libbuild2-bar [cfg/.bpkg/build2/] configured 1.0.0
+ libbaz configured 1.0.0
+ EOO
+
+ $rep_add $rep/t7b && $rep_fetch;
+
+ $* libbar <'y' 2>>~%EOE%;
+ % drop libbuild2-bar/1.0.0 \[cfg..bpkg.build2.\] \(unused\)%
+ % upgrade libbaz/1.1.0 \[cfg..bpkg.host.\] \(required by foo \[cfg..bpkg.host.\]\)%
+ % upgrade foo/1.1.0 \[cfg..bpkg.host.\] \(required by libbar\)%
+ upgrade libbar/1.1.0
+ %continue\? \[Y/n\] disfigured libbar/1.0.0%
+ %disfigured foo/1.0.0 \[cfg..bpkg.host.\]%
+ %disfigured libbaz/1.0.0 \[cfg..bpkg.host.\]%
+ %disfigured libbuild2-bar/1.0.0 \[cfg..bpkg.build2.\]%
+ %purged libbuild2-bar/1.0.0 \[cfg..bpkg.build2.\]%
+ %fetched libbaz/1.1.0 \[cfg..bpkg.host.\]%
+ %unpacked libbaz/1.1.0 \[cfg..bpkg.host.\]%
+ %fetched foo/1.1.0 \[cfg..bpkg.host.\]%
+ %unpacked foo/1.1.0 \[cfg..bpkg.host.\]%
+ fetched libbar/1.1.0
+ unpacked libbar/1.1.0
+ %configured libbaz/1.1.0 \[cfg..bpkg.host.\]%
+ %configured foo/1.1.0 \[cfg..bpkg.host.\]%
+ configured libbar/1.1.0
+ %info: .+libbar-1.1.0.+ is up to date%
+ updated libbar/1.1.0
+ EOE
+
+ $pkg_status -r libbar >>/EOO;
+ !libbar configured 1.1.0
+ foo [cfg/.bpkg/host/] configured 1.1.0
+ libbaz [cfg/.bpkg/host/] configured 1.1.0
+ libbaz configured 1.0.0 available 1.1.0
+ EOO
+
+ $pkg_drop libbar
+ }
+
+ : self-hosted-config
+ :
+ {
+ $cfg_create -d cfg --type host &cfg/***;
+ $rep_add $rep/t7a && $rep_fetch;
+
+ $* libbar <'y' 2>>~%EOE%;
+ % new libbuild2-bar/1.0.0 \[cfg..bpkg.build2.\] \(required by foo\)%
+ new libbaz/1.0.0 (required by foo, libbar)
+ new foo/1.0.0 (required by libbar)
+ new libbar/1.0.0
+ %continue\? \[Y/n\] fetched libbuild2-bar/1.0.0 \[cfg..bpkg.build2.\]%
+ %unpacked libbuild2-bar/1.0.0 \[cfg..bpkg.build2.\]%
+ fetched libbaz/1.0.0
+ unpacked libbaz/1.0.0
+ fetched foo/1.0.0
+ unpacked foo/1.0.0
+ fetched libbar/1.0.0
+ unpacked libbar/1.0.0
+ %configured libbuild2-bar/1.0.0 \[cfg..bpkg.build2.\]%
+ configured libbaz/1.0.0
+ configured foo/1.0.0
+ configured libbar/1.0.0
+ %info: .+libbar-1.0.0.+ is up to date%
+ updated libbar/1.0.0
+ EOE
+
+ $pkg_status -r libbar >>/EOO;
+ !libbar configured 1.0.0
+ foo configured 1.0.0
+ libbaz configured 1.0.0
+ libbuild2-bar [cfg/.bpkg/build2/] configured 1.0.0
+ libbaz configured 1.0.0
+ EOO
+
+ $rep_add $rep/t7b && $rep_fetch;
+
+ $* libbar <'y' 2>>~%EOE%;
+ % drop libbuild2-bar/1.0.0 \[cfg..bpkg.build2.\] \(unused\)%
+ upgrade libbaz/1.1.0 (required by foo, libbar)
+ upgrade foo/1.1.0 (required by libbar)
+ upgrade libbar/1.1.0
+ continue? [Y/n] disfigured libbar/1.0.0
+ disfigured foo/1.0.0
+ disfigured libbaz/1.0.0
+ %disfigured libbuild2-bar/1.0.0 \[cfg..bpkg.build2.\]%
+ %purged libbuild2-bar/1.0.0 \[cfg..bpkg.build2.\]%
+ fetched libbaz/1.1.0
+ unpacked libbaz/1.1.0
+ fetched foo/1.1.0
+ unpacked foo/1.1.0
+ fetched libbar/1.1.0
+ unpacked libbar/1.1.0
+ configured libbaz/1.1.0
+ configured foo/1.1.0
+ configured libbar/1.1.0
+ %info: .+libbar-1.1.0.+ is up to date%
+ updated libbar/1.1.0
+ EOE
+
+ $pkg_status -r libbar >>/EOO;
+ !libbar configured 1.1.0
+ foo configured 1.1.0
+ libbaz configured 1.1.0
+ libbaz configured 1.1.0
+ EOO
+
+ $pkg_drop libbar
+ }
+ }
+
+ : verify-dependencies
+ :
+ {
+ +$clone_root_cfg && $rep_add $rep/t7a && $rep_fetch
+
+ : fail
+ :
+ {
+ $cfg_create -d cfg2 &cfg2/***;
+ $rep_add -d cfg2 $rep/t7a && $rep_fetch -d cfg2;
+
+ $cfg_create -d cfg3 &cfg3/***;
+ $rep_add -d cfg3 $rep/t7a && $rep_fetch -d cfg3;
+
+ test.arguments = $regex.apply($test.arguments, cfg, cfg2);
+
+ $* --yes libbar 2>!;
+
+ test.arguments = $regex.apply($test.arguments, cfg2, cfg3);
+
+ $* --yes libbox 2>!;
+
+ $clone_cfg;
+ $cfg_link -d cfg cfg2;
+ $cfg_link -d cfg cfg3;
+
+ test.arguments = $regex.apply($test.arguments, cfg3, cfg);
+
+ $* libfix --yes 2>>~%EOE% != 0
+ error: package libbaz indirectly required by libfix/1.0.0 is configured in multiple configurations
+ % info: libbaz/1.0.0 \[cfg3.\]%
+ % info: libbaz/1.0.0 \[cfg2.\]%
+ EOE
+ }
+
+ : succeed
+ :
+ {
+ $cfg_create -d cfg2 &cfg2/***;
+ $rep_add -d cfg2 $rep/t7b && $rep_fetch -d cfg2;
+
+ $cfg_create -d cfg3 &cfg3/***;
+ $rep_add -d cfg3 $rep/t7b && $rep_fetch -d cfg3;
+
+ test.arguments = $regex.apply($test.arguments, cfg, cfg2);
+
+ $* --yes libbar 2>!;
+
+ test.arguments = $regex.apply($test.arguments, cfg2, cfg3);
+
+ $* --yes libbox 2>!;
+
+ $clone_cfg;
+ $cfg_link -d cfg cfg2;
+ $cfg_link -d cfg cfg3;
+
+ test.arguments = $regex.apply($test.arguments, cfg3, cfg);
+
+ $* libfix --yes 2>>~%EOE%;
+ fetched libfax/1.0.0
+ unpacked libfax/1.0.0
+ fetched libfix/1.0.0
+ unpacked libfix/1.0.0
+ configured libfax/1.0.0
+ configured libfix/1.0.0
+ %info: .+libfix-1.0.0.+ is up to date%
+ updated libfix/1.0.0
+ EOE
+
+ $pkg_drop libfix
+ }
+ }
+
+ : change-config
+ :
+ {
+ : copy
+ :
+ {
+ $cfg_create -d t1 --name t1 &t1/***;
+ $cfg_create -d t2 --name t2 &t2/***;
+
+ $cfg_link -d t1 t2;
+
+ $rep_add -d t1 $rep/t7a && $rep_fetch -d t1;
+
+ test.arguments = $regex.apply($test.arguments, cfg, t1);
+
+ $* libbaz --yes 2>!;
+
+ $pkg_status -d t1 -r >>/EOO;
+ !libbaz configured 1.0.0
+ EOO
+
+ $rep_add -d t2 $rep/t7a && $rep_fetch -d t2;
+
+ $* libbaz +{ --config-name t2 } 2>>~%EOE%;
+ %fetched libbaz/1.0.0 \[t2.\]%
+ %unpacked libbaz/1.0.0 \[t2.\]%
+ %configured libbaz/1.0.0 \[t2.\]%
+ %info: t2.+libbaz-1.0.0.+ is up to date%
+ %updated libbaz/1.0.0 \[t2.\]%
+ EOE
+
+ $pkg_status -d t1 --link -r >>/EOO
+ !libbaz configured 1.0.0
+ !libbaz [t2/] configured 1.0.0
+ EOO
+ }
+
+ : copy-unhold
+ :
+ {
+ $cfg_create -d t1 --name t1 &t1/***;
+ $cfg_create -d t2 --name t2 &t2/***;
+
+ $cfg_link -d t1 t2;
+
+ $rep_add -d t1 $rep/t7a && $rep_fetch -d t1;
+
+ test.arguments = $regex.apply($test.arguments, cfg, t1);
+
+ $* libbaz --yes 2>!;
+
+ $pkg_status -d t1 -r >>/EOO;
+ !libbaz configured 1.0.0
+ EOO
+
+ $* ?libbaz +{ --config-name t2 };
+
+ $pkg_status -d t1 -r >>/EOO
+ !libbaz configured 1.0.0
+ EOO
+ }
+
+ : copy-point
+ :
+ {
+ $cfg_create -d t1 --name t1 &t1/***;
+ $cfg_create -d t2 --name t2 &t2/***;
+
+ $cfg_link -d t1 t2;
+
+ $rep_add -d t1 $rep/t7a && $rep_fetch -d t1;
+
+ test.arguments = $regex.apply($test.arguments, cfg, t1);
+
+ $* libbaz --yes 2>!;
+
+ $pkg_status -d t1 -r >>/EOO;
+ !libbaz configured 1.0.0
+ EOO
+
+ $rep_add -d t2 $rep/t7a && $rep_fetch -d t2;
+
+ $* foo libbaz +{ --config-name t2 } <<EOI 2>>~%EOE%;
+ y
+ EOI
+ % new libbuild2-bar/1.0.0 \[t1..bpkg.build2.\] \(required by foo\)%
+ % new libbaz/1.0.0 \[t2.\]%
+ new foo/1.0.0
+ %continue\? \[Y.n\] fetched libbuild2-bar/1.0.0 \[t1..bpkg.build2.\]%
+ %unpacked libbuild2-bar/1.0.0 \[t1..bpkg.build2.\]%
+ %fetched libbaz/1.0.0 \[t2.\]%
+ %unpacked libbaz/1.0.0 \[t2.\]%
+ fetched foo/1.0.0
+ unpacked foo/1.0.0
+ %configured libbuild2-bar/1.0.0 \[t1..bpkg.build2.\]%
+ %configured libbaz/1.0.0 \[t2.\]%
+ configured foo/1.0.0
+ %info: t2.+libbaz-1.0.0.+ is up to date%
+ %info: t1.+foo-1.0.0.+ is up to date%
+ %updated libbaz/1.0.0 \[t2.\]%
+ updated foo/1.0.0
+ EOE
+
+ $pkg_status -d t1 --link -r >>/EOO
+ !libbaz configured 1.0.0
+ !foo configured 1.0.0
+ !libbaz [t2/] configured 1.0.0
+ libbuild2-bar [t1/.bpkg/build2/] configured 1.0.0
+ !libbaz [t2/] configured 1.0.0
+ EOO
+ }
+
+ : copy-unhold-point
+ :
+ {
+ $cfg_create -d t1 --name t1 &t1/***;
+ $cfg_create -d t2 --name t2 &t2/***;
+
+ $cfg_link -d t1 t2;
+
+ $rep_add -d t1 $rep/t7a && $rep_fetch -d t1;
+
+ test.arguments = $regex.apply($test.arguments, cfg, t1);
+
+ $* libbaz --yes 2>!;
+
+ $pkg_status -d t1 -r >>/EOO;
+ !libbaz configured 1.0.0
+ EOO
+
+ $* foo ?libbaz +{ --config-name t2 } <<EOI 2>>~%EOE%;
+ y
+ EOI
+ % new libbuild2-bar/1.0.0 \[t1..bpkg.build2.\] \(required by foo\)%
+ % new libbaz/1.0.0 \[t2.\] \(required by foo\)%
+ % new foo/1.0.0%
+ %continue\? \[Y.n\] fetched libbuild2-bar/1.0.0 \[t1..bpkg.build2.\]%
+ %unpacked libbuild2-bar/1.0.0 \[t1..bpkg.build2.\]%
+ %fetched libbaz/1.0.0 \[t2.\]%
+ %unpacked libbaz/1.0.0 \[t2.\]%
+ fetched foo/1.0.0
+ unpacked foo/1.0.0
+ %configured libbuild2-bar/1.0.0 \[t1..bpkg.build2.\]%
+ %configured libbaz/1.0.0 \[t2.\]%
+ configured foo/1.0.0
+ %info: t1.+foo-1.0.0.+ is up to date%
+ updated foo/1.0.0
+ EOE
+
+ $pkg_status -d t1 -r >>/EOO
+ !libbaz configured 1.0.0
+ !foo configured 1.0.0
+ libbaz [t2/] configured 1.0.0
+ libbuild2-bar [t1/.bpkg/build2/] configured 1.0.0
+ EOO
+ }
+
+ : copy-repoint
+ :
+ {
+ $cfg_create -d t1 --name t1 &t1/***;
+ $cfg_create -d t2 --name t2 &t2/***;
+
+ $cfg_create -d h1 --type host --name h1 &h1/***;
+ $cfg_create -d h2 --type host --name h2 &h2/***;
+
+ $cfg_link -d t1 h1;
+ $cfg_link -d t1 h2;
+
+ $cfg_link -d t2 h1;
+
+ $rep_add -d t1 $rep/t7a && $rep_fetch -d t1;
+ $rep_add -d t2 $rep/t7a && $rep_fetch -d t2;
+
+ test.arguments = $regex.apply($test.arguments, cfg, t1);
+
+ $* libbar ?foo +{ --config-name h1 } <<EOI 2>>~%EOE%;
+ y
+ EOI
+ % new libbuild2-bar/1.0.0 \[h1..bpkg.build2.\] \(required by foo \[h1.\]\)%
+ % new libbaz/1.0.0 \[h1.\] \(required by foo \[h1.\]\)%
+ % new foo/1.0.0 \[h1.\] \(required by libbar\)%
+ new libbaz/1.0.0 (required by libbar)
+ new libbar/1.0.0
+ %continue\? \[Y/n\] fetched libbuild2-bar/1.0.0 \[h1..bpkg.build2.\]%
+ %unpacked libbuild2-bar/1.0.0 \[h1..bpkg.build2.\]%
+ %fetched libbaz/1.0.0 \[h1.\]%
+ %unpacked libbaz/1.0.0 \[h1.\]%
+ %fetched foo/1.0.0 \[h1.\]%
+ %unpacked foo/1.0.0 \[h1.\]%
+ fetched libbaz/1.0.0
+ unpacked libbaz/1.0.0
+ fetched libbar/1.0.0
+ unpacked libbar/1.0.0
+ %configured libbuild2-bar/1.0.0 \[h1..bpkg.build2.\]%
+ %configured libbaz/1.0.0 \[h1.\]%
+ %configured foo/1.0.0 \[h1.\]%
+ configured libbaz/1.0.0
+ configured libbar/1.0.0
+ %info: t1.+libbar-1.0.0.+ is up to date%
+ updated libbar/1.0.0
+ EOE
+
+ $pkg_status -d t1 -r >>/EOO;
+ !libbar configured 1.0.0
+ foo [h1/] configured 1.0.0
+ libbaz [h1/] configured 1.0.0
+ libbuild2-bar [h1/.bpkg/build2/] configured 1.0.0
+ libbaz configured 1.0.0
+ EOO
+
+ test.arguments = $regex.apply($test.arguments, t1, t2);
+
+ $* libbox ?foo +{ --config-name h1 } <<EOI 2>>~%EOE%;
+ y
+ EOI
+ % update foo/1.0.0 \[h1.\]%
+ new libbaz/1.0.0 (required by libbox)
+ new libbox/1.0.0
+ continue? [Y/n] fetched libbaz/1.0.0
+ unpacked libbaz/1.0.0
+ fetched libbox/1.0.0
+ unpacked libbox/1.0.0
+ configured libbaz/1.0.0
+ configured libbox/1.0.0
+ %info: t2.+libbox-1.0.0.+ is up to date%
+ updated libbox/1.0.0
+ EOE
+
+ $pkg_status -d t2 -r >>/EOO;
+ !libbox configured 1.0.0
+ foo [h1/] configured 1.0.0
+ libbaz [h1/] configured 1.0.0
+ libbuild2-bar [h1/.bpkg/build2/] configured 1.0.0
+ libbaz configured 1.0.0
+ EOO
+
+ test.arguments = $regex.apply($test.arguments, t2, t1);
+
+ $* ?foo +{ --config-name h2 } <<EOI 2>>~%EOE%;
+ y
+ y
+ EOI
+ % new libbuild2-bar/1.0.0 \[h2..bpkg.build2.\] \(required by foo \[h2.\]\)%
+ % new libbaz/1.0.0 \[h2.\] \(required by foo \[h2.\]\)%
+ % new foo/1.0.0 \[h2.\] \(required by libbar\)%
+ % reconfigure libbar/1.0.0 \(dependent of foo \[h2.\]\)%
+ continue? [Y/n] update dependent packages? [Y/n] disfigured libbar/1.0.0
+ %fetched libbuild2-bar/1.0.0 \[h2..bpkg.build2.\]%
+ %unpacked libbuild2-bar/1.0.0 \[h2..bpkg.build2.\]%
+ %fetched libbaz/1.0.0 \[h2.\]%
+ %unpacked libbaz/1.0.0 \[h2.\]%
+ %fetched foo/1.0.0 \[h2.\]%
+ %unpacked foo/1.0.0 \[h2.\]%
+ %configured libbuild2-bar/1.0.0 \[h2..bpkg.build2.\]%
+ %configured libbaz/1.0.0 \[h2.\]%
+ %configured foo/1.0.0 \[h2.\]%
+ configured libbar/1.0.0
+ %info: t1.+libbar-1.0.0.+ is up to date%
+ updated libbar/1.0.0
+ EOE
+
+ $pkg_status -d t1 -r >>/EOO;
+ !libbar configured 1.0.0
+ foo [h2/] configured 1.0.0
+ libbaz [h2/] configured 1.0.0
+ libbuild2-bar [h2/.bpkg/build2/] configured 1.0.0
+ libbaz configured 1.0.0
+ EOO
+
+ $pkg_status -d t2 -r >>/EOO
+ !libbox configured 1.0.0
+ foo [h1/] configured 1.0.0
+ libbaz [h1/] configured 1.0.0
+ libbuild2-bar [h1/.bpkg/build2/] configured 1.0.0
+ libbaz configured 1.0.0
+ EOO
+ }
+
+ : copy-repoint-drop
+ :
+ {
+ $cfg_create -d t1 --name t1 &t1/***;
+ $cfg_create -d t2 --name t2 &t2/***;
+
+ $cfg_link -d t1 t2;
+
+ $rep_add -d t1 $rep/t7a && $rep_fetch -d t1;
+
+ test.arguments = $regex.apply($test.arguments, cfg, t1);
+
+ $* foo --yes 2>!;
+
+ $pkg_status -d t1 -r >>/EOO;
+ !foo configured 1.0.0
+ libbaz configured 1.0.0
+ libbuild2-bar [t1/.bpkg/build2/] configured 1.0.0
+ EOO
+
+ $rep_add -d t2 $rep/t7a && $rep_fetch -d t2;
+
+ $* libbaz +{ --config-name t2 } <<EOI 2>>~%EOE%;
+ y
+ y
+ EOI
+ drop libbaz/1.0.0 (unused)
+ % new libbaz/1.0.0 \[t2.\]%
+ % reconfigure foo/1.0.0 \(dependent of libbaz \[t2.\]\)%
+ continue? [Y/n] update dependent packages? [Y/n] disfigured foo/1.0.0
+ disfigured libbaz/1.0.0
+ purged libbaz/1.0.0
+ %fetched libbaz/1.0.0 \[t2.\]%
+ %unpacked libbaz/1.0.0 \[t2.\]%
+ %configured libbaz/1.0.0 \[t2.\]%
+ configured foo/1.0.0
+ %info: t2.+libbaz-1.0.0.+ is up to date%
+ %info: t1.+foo-1.0.0.+ is up to date%
+ %updated libbaz/1.0.0 \[t2.\]%
+ updated foo/1.0.0
+ EOE
+
+ $pkg_status -d t1 --link -r >>/EOO
+ !foo configured 1.0.0
+ !libbaz [t2/] configured 1.0.0
+ libbuild2-bar [t1/.bpkg/build2/] configured 1.0.0
+ !libbaz [t2/] configured 1.0.0
+ EOO
+ }
+
+ : drop-repointed
+ :
+ {
+ $cfg_create -d t1 --name t1 &t1/***;
+ $cfg_create -d t2 --name t2 &t2/***;
+
+ $cfg_link -d t1 t2;
+
+ $rep_add -d t1 $rep/t7a && $rep_fetch -d t1;
+
+ test.arguments = $regex.apply($test.arguments, cfg, t1);
+
+ $* foo --yes 2>!;
+
+ $pkg_status -d t1 -r >>/EOO;
+ !foo configured 1.0.0
+ libbaz configured 1.0.0
+ libbuild2-bar [t1/.bpkg/build2/] configured 1.0.0
+ EOO
+
+ $rep_add -d t2 $rep/t7a && $rep_fetch -d t2;
+
+ $* ?foo libbaz +{ --config-name t2 } --verbose 5 <<EOI 2>>~%EOE%;
+ y
+ EOI
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ %.*
+ %trace: collect_build: add libbaz/1.0.0 \[t2.\]%
+ %trace: collect_build_prerequisites: begin libbaz/1.0.0 \[t2.\]%
+ %trace: collect_build_prerequisites: end libbaz/1.0.0 \[t2.\]%
+ %.*
+ trace: collect_build_prerequisites: begin foo/1.0.0
+ %.*
+ %trace: collect_build_prerequisites: no cfg-clause for dependency libbaz/1.0.0 \[t2.\] of dependent foo/1.0.0%
+ trace: collect_build_prerequisites: end foo/1.0.0
+ %.*
+ trace: execute_plan: simulate: yes
+ %.*
+ trace: evaluate_dependency: libbaz/1.0.0: unused
+ %.*
+ trace: evaluate_dependency: foo/1.0.0: unused
+ %.*
+ trace: pkg_build: refine package collection/plan execution
+ %.*
+ trace: collect_drop: foo/1.0.0 package version needs to be replaced with drop
+ trace: pkg_build: collection failed due to package version replacement, retry from scratch
+ %.*
+ trace: pkg_build: refine package collection/plan execution from scratch
+ %.*
+ %trace: collect_build: add libbaz/1.0.0 \[t2.\]%
+ %trace: collect_build_prerequisites: begin libbaz/1.0.0 \[t2.\]%
+ %trace: collect_build_prerequisites: end libbaz/1.0.0 \[t2.\]%
+ trace: collect_build: apply version replacement for foo/1.0.0
+ trace: collect_build: replacement: drop
+ %.*
+ trace: execute_plan: simulate: yes
+ %.*
+ %trace: evaluate_dependency: libbuild2-bar/1.0.0 \[t1..bpkg.build2.\]: unused%
+ %.*
+ trace: pkg_build: refine package collection/plan execution
+ %.*
+ trace: execute_plan: simulate: yes
+ %.*
+ % drop libbuild2-bar/1.0.0 \[t1..bpkg.build2.\] \(unused\)%
+ drop libbaz/1.0.0 (unused)
+ drop foo/1.0.0 (unused)
+ % new libbaz/1.0.0 \[t2.\]%
+ continue? [Y/n] trace: execute_plan: simulate: no
+ %.*
+ disfigured foo/1.0.0
+ %.*
+ disfigured libbaz/1.0.0
+ %.*
+ %disfigured libbuild2-bar/1.0.0 \[t1..bpkg.build2.\]%
+ %.*
+ %purged libbuild2-bar/1.0.0 \[t1..bpkg.build2.\]%
+ %.*
+ purged libbaz/1.0.0
+ %.*
+ purged foo/1.0.0
+ %.*
+ %fetched libbaz/1.0.0 \[t2.\]%
+ %.*
+ %unpacked libbaz/1.0.0 \[t2.\]%
+ %.*
+ %configured libbaz/1.0.0 \[t2.\]%
+ %.*
+ %info: .+t2.+libbaz-1.0.0.+ is up to date%
+ %.*
+ %updated libbaz/1.0.0 \[t2.\]%
+ %.*
+ EOE
+
+ $pkg_status -d t1 --link -r >>/EOO
+ !libbaz [t2/] configured 1.0.0
+ EOO
+ }
+
+ : dependency-repointed
+ :
+ {
+ $cfg_create -d h1 --type host --name h1 &h1/***;
+ $cfg_create -d h2 --type host --name h2 &h2/***;
+
+ $cfg_link -d h1 h2;
+
+ $rep_add -d h1 $rep/t7a && $rep_fetch -d h1;
+
+ test.arguments = $regex.apply($test.arguments, cfg, h1);
+
+ $* foo --yes 2>!;
+
+ $pkg_status -d h1 -r >>/EOO;
+ !foo configured 1.0.0
+ libbaz configured 1.0.0
+ libbuild2-bar [h1/.bpkg/build2/] configured 1.0.0
+ EOO
+
+ $rep_add -d h2 $rep/t7a && $rep_fetch -d h2;
+
+ $* libbar libbaz +{ --config-name h2 } <<EOI 2>>~%EOE%;
+ y
+ EOI
+ drop libbaz/1.0.0 (unused)
+ % new libbaz/1.0.0 \[h2.\]%
+ reconfigure/update foo/1.0.0 (required by libbar)
+ new libbar/1.0.0
+ continue? [Y/n] disfigured foo/1.0.0
+ disfigured libbaz/1.0.0
+ purged libbaz/1.0.0
+ %fetched libbaz/1.0.0 \[h2.\]%
+ %unpacked libbaz/1.0.0 \[h2.\]%
+ fetched libbar/1.0.0
+ unpacked libbar/1.0.0
+ %configured libbaz/1.0.0 \[h2.\]%
+ configured foo/1.0.0
+ configured libbar/1.0.0
+ %info: h2.+libbaz-1.0.0.+ is up to date%
+ %info: h1.+libbar-1.0.0.+ is up to date%
+ %updated libbaz/1.0.0 \[h2.\]%
+ updated libbar/1.0.0
+ EOE
+
+ $pkg_status -d h1 --link -r >>/EOO
+ !foo configured 1.0.0
+ !libbaz [h2/] configured 1.0.0
+ libbuild2-bar [h1/.bpkg/build2/] configured 1.0.0
+ !libbar configured 1.0.0
+ !foo configured 1.0.0
+ !libbaz [h2/] configured 1.0.0
+ libbuild2-bar [h1/.bpkg/build2/] configured 1.0.0
+ !libbaz [h2/] configured 1.0.0
+ !libbaz [h2/] configured 1.0.0
+ EOO
+ }
+
+ : dependency-repointed-system
+ :
+ {
+ $cfg_create -d h1 --type host --name h1 &h1/***;
+ $cfg_create -d h2 --type host --name h2 &h2/***;
+
+ $cfg_link -d h1 h2;
+
+ $rep_add -d h1 $rep/t7a && $rep_fetch -d h1;
+
+ test.arguments = $regex.apply($test.arguments, cfg, h1);
+
+ $* foo --yes 2>!;
+
+ $pkg_status -d h1 -r >>/EOO;
+ !foo configured 1.0.0
+ libbaz configured 1.0.0
+ libbuild2-bar [h1/.bpkg/build2/] configured 1.0.0
+ EOO
+
+ $* libbar '?sys:foo/1.2.0' +{ --config-name h1 } \
+ ?libbaz +{ --config-name h2 } <<EOI 2>>~%EOE%;
+ y
+ EOI
+ % drop libbuild2-bar/1.0.0 \[h1..bpkg.build2.\] \(unused\)%
+ drop libbaz/1.0.0 (unused)
+ reconfigure/unhold sys:foo/1.2.0
+ % new libbaz/1.0.0 \[h2.\] \(required by libbar\)%
+ new libbar/1.0.0
+ continue? [Y/n] disfigured foo/1.0.0
+ disfigured libbaz/1.0.0
+ %disfigured libbuild2-bar/1.0.0 \[h1..bpkg.build2.\]%
+ %purged libbuild2-bar/1.0.0 \[h1..bpkg.build2.\]%
+ purged libbaz/1.0.0
+ purged foo/1.0.0
+ %fetched libbaz/1.0.0 \[h2.\]%
+ %unpacked libbaz/1.0.0 \[h2.\]%
+ fetched libbar/1.0.0
+ unpacked libbar/1.0.0
+ configured sys:foo/1.2.0
+ %configured libbaz/1.0.0 \[h2.\]%
+ configured libbar/1.0.0
+ %info: h1.+libbar-1.0.0.+ is up to date%
+ updated libbar/1.0.0
+ EOE
+
+ $pkg_status -d h1 -r >>/EOO;
+ !libbar configured 1.0.0
+ foo configured,system !1.2.0
+ libbaz [h2/] configured 1.0.0
+ EOO
+
+ $* ?foo ?libbaz <<EOI 2>>~%EOE%;
+ y
+ y
+ EOI
+ % drop libbaz/1.0.0 \[h2.\] \(unused\)%
+ % new libbuild2-bar/1.0.0 \[h1..bpkg.build2.\] \(required by foo\)%
+ new libbaz/1.0.0 (required by foo, libbar)
+ downgrade foo/1.0.0
+ reconfigure libbar/1.0.0 (dependent of libbaz)
+ continue? [Y/n] update dependent packages? [Y/n] disfigured libbar/1.0.0
+ purged foo/1.2.0
+ %disfigured libbaz/1.0.0 \[h2.\]%
+ %purged libbaz/1.0.0 \[h2.\]%
+ %fetched libbuild2-bar/1.0.0 \[h1..bpkg.build2.\]%
+ %unpacked libbuild2-bar/1.0.0 \[h1..bpkg.build2.\]%
+ fetched libbaz/1.0.0
+ unpacked libbaz/1.0.0
+ fetched foo/1.0.0
+ unpacked foo/1.0.0
+ %configured libbuild2-bar/1.0.0 \[h1..bpkg.build2.\]%
+ configured libbaz/1.0.0
+ configured foo/1.0.0
+ configured libbar/1.0.0
+ %info: h1.+libbar-1.0.0.+ is up to date%
+ updated libbar/1.0.0
+ EOE
+
+ $pkg_status -d h1 -r >>/EOO
+ !libbar configured 1.0.0
+ foo configured 1.0.0
+ libbaz configured 1.0.0
+ libbuild2-bar [h1/.bpkg/build2/] configured 1.0.0
+ libbaz configured 1.0.0
+ EOO
+ }
+
+ : orphan-repointed
+ :
+ {
+ $cfg_create -d h1 --type host --name h1 &h1/***;
+ $cfg_create -d h2 --type host --name h2 &h2/***;
+
+ $cfg_link -d h1 h2;
+
+ $rep_add -d h1 $rep/t7b && $rep_fetch -d h1;
+
+ test.arguments = $regex.apply($test.arguments, cfg, h1);
+
+ $* foo --yes 2>!;
+
+ $rep_remove -d h1 $rep/t7b;
+ $rep_add -d h1 $rep/t7a && $rep_fetch -d h1;
+
+ $rep_add -d h2 $rep/t7a && $rep_fetch -d h2;
+
+ $* libbaz +{ --config-name h2 } 2>>EOE != 0;
+ error: package foo/1.1.0 is orphaned
+ info: explicitly upgrade it to a new version
+ info: while satisfying foo/1.1.0
+ EOE
+
+ # While at it, test foo deorphaning.
+ #
+ $* foo +{ --deorphan } libbaz +{ --config-name h2 } --yes --plan "" 2>>~%EOE%;
+ drop libbaz/1.1.0 (unused)
+ % new libbuild2-bar/1.0.0 \[h1.\.bpkg.build2.\] \(required by foo\)%
+ % new libbaz/1.0.0 \[h2.\]%
+ replace/downgrade foo/1.0.0
+ disfigured foo/1.1.0
+ disfigured libbaz/1.1.0
+ purged libbaz/1.1.0
+ %fetched libbuild2-bar/1.0.0 \[h1.\.bpkg.build2.\]%
+ %unpacked libbuild2-bar/1.0.0 \[h1.\.bpkg.build2.\]%
+ %fetched libbaz/1.0.0 \[h2.\]%
+ %unpacked libbaz/1.0.0 \[h2.\]%
+ fetched foo/1.0.0
+ unpacked foo/1.0.0
+ %configured libbuild2-bar/1.0.0 \[h1.\.bpkg.build2.\]%
+ %configured libbaz/1.0.0 \[h2.\]%
+ configured foo/1.0.0
+ %info: h2.+libbaz-1.0.0.+ is up to date%
+ %info: h1.+foo-1.0.0.+ is up to date%
+ %updated libbaz/1.0.0 \[h2.\]%
+ updated foo/1.0.0
+ EOE
+
+ $pkg_status -d h1 -r >>/EOO
+ !foo configured 1.0.0
+ !libbaz [h2/] configured 1.0.0
+ libbuild2-bar [h1/.bpkg/build2/] configured 1.0.0
+ EOO
+ }
+
+ : orphan-repointed-masked
+ :
+ : As above but using --mask-repository instead of rep-remove.
+ :
+ {
+ $cfg_create -d h1 --type host --name h1 &h1/***;
+ $cfg_create -d h2 --type host --name h2 &h2/***;
+
+ $cfg_link -d h1 h2;
+
+ $rep_add -d h1 $rep/t7b && $rep_fetch -d h1;
+
+ test.arguments = $regex.apply($test.arguments, cfg, h1);
+
+ $* foo --yes 2>!;
+
+ $rep_add -d h1 $rep/t7a && $rep_fetch -d h1;
+ $rep_add -d h2 $rep/t7a && $rep_fetch -d h2;
+
+ $* libbaz +{ --config-name h2 } --mask-repository $rep/t7b 2>>EOE != 0;
+ error: package foo/1.1.0 is orphaned
+ info: explicitly upgrade it to a new version
+ info: while satisfying foo/1.1.0
+ EOE
+
+ # While at it, test foo deorphaning.
+ #
+ $* foo +{ --deorphan } libbaz +{ --config-name h2 } --yes --plan "" \
+ --mask-repository $rep/t7b 2>>~%EOE%;
+ drop libbaz/1.1.0 (unused)
+ % new libbuild2-bar/1.0.0 \[h1.\.bpkg.build2.\] \(required by foo\)%
+ % new libbaz/1.0.0 \[h2.\]%
+ replace/downgrade foo/1.0.0
+ disfigured foo/1.1.0
+ disfigured libbaz/1.1.0
+ purged libbaz/1.1.0
+ %fetched libbuild2-bar/1.0.0 \[h1.\.bpkg.build2.\]%
+ %unpacked libbuild2-bar/1.0.0 \[h1.\.bpkg.build2.\]%
+ %fetched libbaz/1.0.0 \[h2.\]%
+ %unpacked libbaz/1.0.0 \[h2.\]%
+ fetched foo/1.0.0
+ unpacked foo/1.0.0
+ %configured libbuild2-bar/1.0.0 \[h1.\.bpkg.build2.\]%
+ %configured libbaz/1.0.0 \[h2.\]%
+ configured foo/1.0.0
+ %info: h2.+libbaz-1.0.0.+ is up to date%
+ %info: h1.+foo-1.0.0.+ is up to date%
+ %updated libbaz/1.0.0 \[h2.\]%
+ updated foo/1.0.0
+ EOE
+
+ $pkg_status -d h1 -r >>/EOO
+ !foo configured 1.0.0 available 1.1.0
+ !libbaz [h2/] configured 1.0.0 available 1.1.0
+ libbuild2-bar [h1/.bpkg/build2/] configured 1.0.0
+ EOO
+ }
+
+ : unhold-repointed
+ :
+ {
+ $cfg_create -d h1 --type host --name h1 &h1/***;
+ $cfg_create -d h2 --type host --name h2 &h2/***;
+
+ $cfg_link -d h1 h2;
+
+ $rep_add -d h1 $rep/t7a && $rep_fetch -d h1;
+
+ test.arguments = $regex.apply($test.arguments, cfg, h1);
+
+ $* foo --yes 2>!;
+
+ $pkg_status -d h1 -r >>/EOO;
+ !foo configured 1.0.0
+ libbaz configured 1.0.0
+ libbuild2-bar [h1/.bpkg/build2/] configured 1.0.0
+ EOO
+
+ $rep_add -d h2 $rep/t7a && $rep_fetch -d h2;
+
+ $* libbar ?foo libbaz +{ --config-name h2 } <<EOI 2>>~%EOE%;
+ y
+ EOI
+ drop libbaz/1.0.0 (unused)
+ % new libbaz/1.0.0 \[h2.\]%
+ reconfigure/update/unhold foo/1.0.0
+ new libbar/1.0.0
+ continue? [Y/n] disfigured foo/1.0.0
+ disfigured libbaz/1.0.0
+ purged libbaz/1.0.0
+ %fetched libbaz/1.0.0 \[h2.\]%
+ %unpacked libbaz/1.0.0 \[h2.\]%
+ fetched libbar/1.0.0
+ unpacked libbar/1.0.0
+ %configured libbaz/1.0.0 \[h2.\]%
+ configured foo/1.0.0
+ configured libbar/1.0.0
+ %info: h2.+libbaz-1.0.0.+ is up to date%
+ %info: h1.+libbar-1.0.0.+ is up to date%
+ %updated libbaz/1.0.0 \[h2.\]%
+ updated libbar/1.0.0
+ EOE
+
+ $pkg_status -d h1 --link -r >>/EOO
+ !libbar configured 1.0.0
+ foo configured 1.0.0
+ !libbaz [h2/] configured 1.0.0
+ libbuild2-bar [h1/.bpkg/build2/] configured 1.0.0
+ !libbaz [h2/] configured 1.0.0
+ !libbaz [h2/] configured 1.0.0
+ EOO
+ }
+
+ : satisfy
+ :
+ {
+ $cfg_create -d h1 --type host --name h1 &h1/***;
+ $cfg_create -d h2 --type host --name h2 &h2/***;
+
+ $cfg_link -d h1 h2;
+
+ $rep_add -d h2 $rep/t7b && $rep_fetch -d h2;
+
+ test.arguments = $regex.apply($test.arguments, cfg, h2);
+
+ $* foo --yes 2>!;
+
+ $rep_add -d h1 $rep/t7a && $rep_fetch -d h1;
+
+ test.arguments = $regex.apply($test.arguments, h2, h1);
+
+ $* foo ?libbaz <<EOI 2>>~%EOE%;
+ y
+ EOI
+ % new libbuild2-bar/1.0.0 \[h1..bpkg.build2.\] \(required by foo\)%
+ new libbaz/1.0.0 (required by foo)
+ new foo/1.0.0
+ %continue\? \[Y.n\] fetched libbuild2-bar/1.0.0 \[h1..bpkg.build2.\]%
+ %unpacked libbuild2-bar/1.0.0 \[h1..bpkg.build2.\]%
+ fetched libbaz/1.0.0
+ unpacked libbaz/1.0.0
+ fetched foo/1.0.0
+ unpacked foo/1.0.0
+ %configured libbuild2-bar/1.0.0 \[h1..bpkg.build2.\]%
+ configured libbaz/1.0.0
+ configured foo/1.0.0
+ %info: h1.+foo-1.0.0.+ is up to date%
+ updated foo/1.0.0
+ EOE
+
+ $pkg_status -d h1 --link -r >>/EOO;
+ !foo configured 1.0.0
+ libbaz configured 1.0.0
+ libbuild2-bar [h1/.bpkg/build2/] configured 1.0.0
+ !foo [h2/] configured 1.1.0
+ libbaz [h2/] configured 1.1.0
+ EOO
+
+ $* ?libbaz/1.0.0 +{ --config-name h2 } 2>>~%EOE% != 0;
+ %error: unable to downgrade package libbaz/1.1.0 \[h2.\] to 1.0.0%
+ % info: because configured package foo/1.1.0 \[h2.\] depends on \(libbaz \^1.1.0\)%
+ info: re-run with -v for additional dependency information
+ info: consider re-trying with --upgrade|-u potentially combined with --recursive|-r
+ info: or explicitly request up/downgrade of package foo
+ info: or explicitly specify package libbaz version to manually satisfy these constraints
+ EOE
+
+ $* ?libbaz +{ --config-name h2 } <<EOI 2>>~%EOE%;
+ y
+ n
+ EOI
+ drop libbaz/1.0.0 (unused)
+ % update libbaz/1.1.0 \[h2.\]%
+ % reconfigure foo/1.0.0 \(dependent of libbaz \[h2.\]\)%
+ continue? [Y/n] update dependent packages? [Y/n] disfigured foo/1.0.0
+ disfigured libbaz/1.0.0
+ purged libbaz/1.0.0
+ configured foo/1.0.0
+ EOE
+
+ $pkg_status -d h1 --link -r >>/EOO
+ !foo configured 1.0.0
+ libbaz [h2/] configured 1.1.0
+ libbuild2-bar [h1/.bpkg/build2/] configured 1.0.0
+ !foo [h2/] configured 1.1.0
+ libbaz [h2/] configured 1.1.0
+ EOO
+ }
+
+ : upgrade-repointed
+ :
+ {
+ $cfg_create -d t1 --name t1 &t1/***;
+
+ $rep_add -d t1 $rep/t7a && $rep_fetch -d t1;
+
+ test.arguments = $regex.apply($test.arguments, cfg, t1);
+
+ $* libbar --yes 2>!;
+
+ $pkg_status -d t1 -r >>/EOO;
+ !libbar configured 1.0.0
+ foo [t1/.bpkg/host/] configured 1.0.0
+ libbaz [t1/.bpkg/host/] configured 1.0.0
+ libbuild2-bar [t1/.bpkg/build2/] configured 1.0.0
+ libbaz configured 1.0.0
+ EOO
+
+ $cfg_create -d h1 --type host --name h1 &h1/***;
+
+ $cfg_link -d t1 h1 2>!;
+
+ $rep_add -d t1 $rep/t7b && $rep_fetch -d t1;
+
+ $* libbar ?foo +{ --config-name h1 } <<EOI 2>>~%EOE%;
+ y
+ EOI
+ % drop libbuild2-bar/1.0.0 \[t1..bpkg.build2.\] \(unused\)%
+ % drop libbaz/1.0.0 \[t1..bpkg.host.\] \(unused\)%
+ % drop foo/1.0.0 \[t1..bpkg.host.\] \(unused\)%
+ % new libbaz/1.1.0 \[h1.\] \(required by foo \[h1.\]\)%
+ % new foo/1.1.0 \[h1.\] \(required by libbar\)%
+ upgrade libbar/1.1.0
+ continue? [Y/n] disfigured libbar/1.0.0
+ %disfigured foo/1.0.0 \[t1..bpkg.host.\]%
+ %disfigured libbaz/1.0.0 \[t1..bpkg.host.\]%
+ %disfigured libbuild2-bar/1.0.0 \[t1..bpkg.build2.\]%
+ %purged libbuild2-bar/1.0.0 \[t1..bpkg.build2.\]%
+ %purged libbaz/1.0.0 \[t1..bpkg.host.\]%
+ %purged foo/1.0.0 \[t1..bpkg.host.\]%
+ %fetched libbaz/1.1.0 \[h1.\]%
+ %unpacked libbaz/1.1.0 \[h1.\]%
+ %fetched foo/1.1.0 \[h1.\]%
+ %unpacked foo/1.1.0 \[h1.\]%
+ fetched libbar/1.1.0
+ unpacked libbar/1.1.0
+ %configured libbaz/1.1.0 \[h1.\]%
+ %configured foo/1.1.0 \[h1.\]%
+ configured libbar/1.1.0
+ %info: t1.+libbar-1.1.0.+ is up to date%
+ updated libbar/1.1.0
+ EOE
+
+ $pkg_status -d t1 -r >>/EOO
+ !libbar configured 1.1.0
+ foo [h1/] configured 1.1.0
+ libbaz [h1/] configured 1.1.0
+ libbaz configured 1.0.0 available 1.1.0
+ EOO
+ }
+
+ : upgrade-repointed-dependency
+ :
+ {
+ $cfg_create -d h1 --type host --name h1 &h1/***;
+ $cfg_create -d h2 --type host --name h2 &h2/***;
+
+ $cfg_link -d h1 h2;
+
+ $rep_add -d h1 $rep/t7a && $rep_fetch -d h1;
+
+ test.arguments = $regex.apply($test.arguments, cfg, h1);
+
+ $* libbar --yes 2>!;
+
+ $pkg_status -d h1 -r >>/EOO;
+ !libbar configured 1.0.0
+ foo configured 1.0.0
+ libbaz configured 1.0.0
+ libbuild2-bar [h1/.bpkg/build2/] configured 1.0.0
+ libbaz configured 1.0.0
+ EOO
+
+ $rep_add -d h1 $rep/t7b && $rep_fetch -d h1;
+
+ $* libbar ?foo ?libbaz +{ --config-name h2 } <<EOI 2>>~%EOE%;
+ y
+ y
+ EOI
+ % drop libbuild2-bar/1.0.0 \[h1..bpkg.build2.\] \(unused\)%
+ drop libbaz/1.0.0 (unused)
+ % new libbaz/1.1.0 \[h2.\] \(required by foo, libbar\)%
+ upgrade foo/1.1.0
+ upgrade libbar/1.1.0
+ continue? [Y/n] disfigured libbar/1.0.0
+ disfigured foo/1.0.0
+ disfigured libbaz/1.0.0
+ %disfigured libbuild2-bar/1.0.0 \[h1..bpkg.build2.\]%
+ %purged libbuild2-bar/1.0.0 \[h1..bpkg.build2.\]%
+ purged libbaz/1.0.0
+ %fetched libbaz/1.1.0 \[h2.\]%
+ %unpacked libbaz/1.1.0 \[h2.\]%
+ fetched foo/1.1.0
+ unpacked foo/1.1.0
+ fetched libbar/1.1.0
+ unpacked libbar/1.1.0
+ %configured libbaz/1.1.0 \[h2.\]%
+ configured foo/1.1.0
+ configured libbar/1.1.0
+ %info: h1.+libbar-1.1.0.+ is up to date%
+ updated libbar/1.1.0
+ EOE
+
+ $pkg_status -d h1 -r >>/EOO
+ !libbar configured 1.1.0
+ foo configured 1.1.0
+ libbaz [h2/] configured 1.1.0
+ libbaz [h2/] configured 1.1.0
+ EOO
+ }
+
+ : upgrade-prerequisite-replacement
+ :
+ {
+ $cfg_create -d t1 --name t1 &t1/***;
+
+ $rep_add -d t1 $rep/t7a && $rep_fetch -d t1;
+
+ test.arguments = $regex.apply($test.arguments, cfg, t1);
+
+ $* libbar --yes 2>!;
+
+ $pkg_status -d t1 -r >>/EOO;
+ !libbar configured 1.0.0
+ foo [t1/.bpkg/host/] configured 1.0.0
+ libbaz [t1/.bpkg/host/] configured 1.0.0
+ libbuild2-bar [t1/.bpkg/build2/] configured 1.0.0
+ libbaz configured 1.0.0
+ EOO
+
+ $cfg_create -d t2 --name t2 &t2/***;
+
+ $rep_add -d t2 $rep/t7a && $rep_fetch -d t2;
+
+ test.arguments = $regex.apply($test.arguments, t1, t2);
+
+ $* libbaz --yes 2>!;
+
+ $cfg_link -d t1 t2 2>!;
+
+ $rep_add -d t2 $rep/t7b && $rep_fetch -d t2;
+
+ test.arguments = $regex.apply($test.arguments, t2, t1);
+
+ $* libbaz +{ --config-name t2 } <<EOI 2>>~%EOE%;
+ y
+ y
+ EOI
+ drop libbaz/1.0.0 (unused)
+ % upgrade libbaz/1.1.0 \[t2.\]%
+ % reconfigure libbar/1.0.0 \(dependent of libbaz \[t2.\]\)%
+ continue? [Y/n] update dependent packages? [Y/n] disfigured libbar/1.0.0
+ %disfigured libbaz/1.0.0 \[t2.\]%
+ disfigured libbaz/1.0.0
+ purged libbaz/1.0.0
+ %fetched libbaz/1.1.0 \[t2.\]%
+ %unpacked libbaz/1.1.0 \[t2.\]%
+ %configured libbaz/1.1.0 \[t2.\]%
+ configured libbar/1.0.0
+ %info: t2.+libbaz-1.1.0.+ is up to date%
+ %info: t1.+libbar-1.0.0.+ is up to date%
+ %updated libbaz/1.1.0 \[t2.\]%
+ updated libbar/1.0.0
+ EOE
+
+ $pkg_status -d t1 --link -r >>/EOO
+ !libbar configured 1.0.0
+ foo [t1/.bpkg/host/] configured 1.0.0
+ libbaz [t1/.bpkg/host/] configured 1.0.0
+ libbuild2-bar [t1/.bpkg/build2/] configured 1.0.0
+ !libbaz [t2/] configured 1.1.0
+ !libbaz [t2/] configured 1.1.0
+ EOO
+ }
+
+ : copy-upgrade-dependency-tree
+ :
+ {
+ $cfg_create -d t1 --name t1 &t1/***;
+
+ $rep_add -d t1 $rep/t7a && $rep_fetch -d t1;
+
+ test.arguments = $regex.apply($test.arguments, cfg, t1);
+
+ $* libbar --yes 2>!;
+
+ $pkg_status -d t1 -r >>/EOO;
+ !libbar configured 1.0.0
+ foo [t1/.bpkg/host/] configured 1.0.0
+ libbaz [t1/.bpkg/host/] configured 1.0.0
+ libbuild2-bar [t1/.bpkg/build2/] configured 1.0.0
+ libbaz configured 1.0.0
+ EOO
+
+ $cfg_create -d t2 --name t2 &t2/***;
+
+ $cfg_link -d t1 t2 2>!;
+
+ $rep_add -d t2 $rep/t7b && $rep_fetch -d t2;
+
+ $* libbar +{ --config-name t2 } <<EOI 2>>~%EOE%;
+ y
+ EOI
+ % new libbaz/1.1.0 \[t2..bpkg.host.\] \(required by foo \[t2..bpkg.host.\]\)%
+ % new foo/1.1.0 \[t2..bpkg.host.\] \(required by libbar \[t2.\]\)%
+ % new libbaz/1.1.0 \[t2.\] \(required by libbar \[t2.\]\)%
+ % new libbar/1.1.0 \[t2.\]%
+ %continue\? \[Y/n\] fetched libbaz/1.1.0 \[t2..bpkg.host.\]%
+ %unpacked libbaz/1.1.0 \[t2..bpkg.host.\]%
+ %fetched foo/1.1.0 \[t2..bpkg.host.\]%
+ %unpacked foo/1.1.0 \[t2..bpkg.host.\]%
+ %fetched libbaz/1.1.0 \[t2.\]%
+ %unpacked libbaz/1.1.0 \[t2.\]%
+ %fetched libbar/1.1.0 \[t2.\]%
+ %unpacked libbar/1.1.0 \[t2.\]%
+ %configured libbaz/1.1.0 \[t2..bpkg.host.\]%
+ %configured foo/1.1.0 \[t2..bpkg.host.\]%
+ %configured libbaz/1.1.0 \[t2.\]%
+ %configured libbar/1.1.0 \[t2.\]%
+ %info: t2.+libbar-1.1.0.+ is up to date%
+ %updated libbar/1.1.0 \[t2.\]%
+ EOE
+
+ $pkg_status -d t1 --link -r >>/EOO
+ !libbar configured 1.0.0
+ foo [t1/.bpkg/host/] configured 1.0.0
+ libbaz [t1/.bpkg/host/] configured 1.0.0
+ libbuild2-bar [t1/.bpkg/build2/] configured 1.0.0
+ libbaz configured 1.0.0
+ !libbar [t2/] configured 1.1.0
+ foo [t2/.bpkg/host/] configured 1.1.0
+ libbaz [t2/.bpkg/host/] configured 1.1.0
+ libbaz [t2/] configured 1.1.0
+ EOO
+ }
+
+ : repointed-dependent-indirect-dependency-upgrade
+ :
+ {
+ $cfg_create -d t1 --name t1 &t1/***;
+ $cfg_create -d t2 --name t2 &t2/***;
+ $cfg_create -d h1 --name h1 --type host &h1/***;
+
+ $cfg_link -d t1 t2 2>!;
+ $cfg_link -d t1 h1 2>!;
+ $cfg_link -d t2 h1 2>!;
+
+ $rep_add -d t1 $rep/t7a && $rep_fetch -d t1;
+
+ test.arguments = $regex.apply($test.arguments, cfg, t1);
+
+ $* libfix --yes 2>!;
+
+ $pkg_status -d t1 -r >>/EOO;
+ !libfix configured 1.0.0
+ libbar configured 1.0.0
+ foo [h1/] configured 1.0.0
+ libbaz [h1/] configured 1.0.0
+ libbuild2-bar [h1/.bpkg/build2/] configured 1.0.0
+ libbaz configured 1.0.0
+ libbox configured 1.0.0
+ foo [h1/] configured 1.0.0
+ libbaz [h1/] configured 1.0.0
+ libbuild2-bar [h1/.bpkg/build2/] configured 1.0.0
+ libbaz configured 1.0.0
+ libfax configured 1.0.0
+ EOO
+
+ $rep_add -d t1 $rep/t7b && $rep_fetch -d t1;
+ $rep_add -d t2 $rep/t7b && $rep_fetch -d t2;
+ $rep_add -d h1 $rep/t7b && $rep_fetch -d h1;
+
+ $* libfix libfax +{ --config-name t2 } foo +{ --config-name h1 } <<EOI 2>>~%EOE%;
+ y
+ y
+ EOI
+ % drop libfax/1.0.0 \(unused\)%
+ % new libfax/1.0.0 \[t2.\]%
+ % drop libbuild2-bar/1.0.0 \[h1..bpkg.build2.\] \(unused\)%
+ % upgrade libbaz/1.1.0 \[h1.\] \(required by foo \[h1.\]\)%
+ % upgrade foo/1.1.0 \[h1.\]%
+ % reconfigure libbar \(dependent of foo \[h1.\]\)%
+ % reconfigure libbox \(dependent of foo \[h1.\]\)%
+ reconfigure/update libfix/1.0.0
+ continue? [Y/n] update dependent packages? [Y/n] disfigured libfix/1.0.0
+ disfigured libbox/1.0.0
+ disfigured libbar/1.0.0
+ %disfigured foo/1.0.0 \[h1.\]%
+ %disfigured libbaz/1.0.0 \[h1.\]%
+ %disfigured libbuild2-bar/1.0.0 \[h1..bpkg.build2.\]%
+ disfigured libfax/1.0.0
+ purged libfax/1.0.0
+ %fetched libfax/1.0.0 \[t2.\]%
+ %unpacked libfax/1.0.0 \[t2.\]%
+ %purged libbuild2-bar/1.0.0 \[h1..bpkg.build2.\]%
+ %fetched libbaz/1.1.0 \[h1.\]%
+ %unpacked libbaz/1.1.0 \[h1.\]%
+ %fetched foo/1.1.0 \[h1.\]%
+ %unpacked foo/1.1.0 \[h1.\]%
+ %configured libfax/1.0.0 \[t2.\]%
+ %configured libbaz/1.1.0 \[h1.\]%
+ %configured foo/1.1.0 \[h1.\]%
+ configured libbar/1.0.0
+ configured libbox/1.0.0
+ configured libfix/1.0.0
+ %info: t2.+libfax-1.0.0.+ is up to date%
+ %info: h1.+foo-1.1.0.+ is up to date%
+ %info: t1.+libfix-1.0.0.+ is up to date%
+ %info: t1.+libbar-1.0.0.+ is up to date%
+ %info: t1.+libbox-1.0.0.+ is up to date%
+ %updated libfax/1.0.0 \[t2.\]%
+ %updated foo/1.1.0 \[h1.\]%
+ updated libfix/1.0.0
+ updated libbar/1.0.0
+ updated libbox/1.0.0
+ EOE
+
+ $pkg_status -d t1 --link -r >>/EOO
+ !libfix configured 1.0.0
+ libbar configured 1.0.0 available 1.1.0
+ !foo [h1/] configured 1.1.0
+ libbaz [h1/] configured 1.1.0
+ libbaz configured 1.0.0 available 1.1.0
+ libbox configured 1.0.0 available 1.1.0
+ !foo [h1/] configured 1.1.0
+ libbaz [h1/] configured 1.1.0
+ libbaz configured 1.0.0 available 1.1.0
+ !libfax [t2/] configured 1.0.0
+ !libfax [t2/] configured 1.0.0
+ !foo [h1/] configured 1.1.0
+ libbaz [h1/] configured 1.1.0
+ EOO
+ }
+ }
+
+ : build2-module-dep
+ :
+ {
+ +$clone_root_cfg && $rep_add $rep/t7a && $rep_fetch
+
+ : private-config
+ :
+ {
+ $clone_cfg;
+
+ $* libbiz <<EOI 2>>~%EOE% &cfg/.bpkg/build2/*** &cfg/.bpkg/host/***;
+ y
+ EOI
+ % new libbuild2-bar/1.0.0 \[cfg..bpkg.build2.\] \(required by foo \[cfg..bpkg.host.\]\)%
+ % new libbaz/1.0.0 \[cfg..bpkg.host.\] \(required by foo \[cfg..bpkg.host.\]\)%
+ % new foo/1.0.0 \[cfg..bpkg.host.\] \(required by libbiz\)%
+ % new libbaz/1.0.0 \[cfg..bpkg.build2.\] \(required by libbuild2-foo \[cfg..bpkg.build2.\]\)%
+ % new libbuild2-foo/1.0.0 \[cfg..bpkg.build2.\] \(required by libbiz\)%
+ new libbaz/1.0.0 (required by libbiz)
+ new libbiz/1.0.0
+ %continue\? \[Y/n\] fetched libbuild2-bar/1.0.0 \[cfg..bpkg.build2.\]%
+ %unpacked libbuild2-bar/1.0.0 \[cfg..bpkg.build2.\]%
+ %fetched libbaz/1.0.0 \[cfg..bpkg.host.\]%
+ %unpacked libbaz/1.0.0 \[cfg..bpkg.host.\]%
+ %fetched foo/1.0.0 \[cfg..bpkg.host.\]%
+ %unpacked foo/1.0.0 \[cfg..bpkg.host.\]%
+ %fetched libbaz/1.0.0 \[cfg..bpkg.build2.\]%
+ %unpacked libbaz/1.0.0 \[cfg..bpkg.build2.\]%
+ %fetched libbuild2-foo/1.0.0 \[cfg..bpkg.build2.\]%
+ %unpacked libbuild2-foo/1.0.0 \[cfg..bpkg.build2.\]%
+ fetched libbaz/1.0.0
+ unpacked libbaz/1.0.0
+ fetched libbiz/1.0.0
+ unpacked libbiz/1.0.0
+ %configured libbuild2-bar/1.0.0 \[cfg..bpkg.build2.\]%
+ %configured libbaz/1.0.0 \[cfg..bpkg.host.\]%
+ %configured foo/1.0.0 \[cfg..bpkg.host.\]%
+ %configured libbaz/1.0.0 \[cfg..bpkg.build2.\]%
+ %configured libbuild2-foo/1.0.0 \[cfg..bpkg.build2.\]%
+ configured libbaz/1.0.0
+ configured libbiz/1.0.0
+ %info: cfg.+libbiz-1.0.0.+ is up to date%
+ updated libbiz/1.0.0
+ EOE
+
+ $pkg_status -d cfg -r >>/EOO;
+ !libbiz configured 1.0.0
+ foo [cfg/.bpkg/host/] configured 1.0.0
+ libbaz [cfg/.bpkg/host/] configured 1.0.0
+ libbuild2-bar [cfg/.bpkg/build2/] configured 1.0.0
+ libbaz configured 1.0.0
+ libbuild2-foo [cfg/.bpkg/build2/] configured 1.0.0
+ libbaz [cfg/.bpkg/build2/] configured 1.0.0
+ EOO
+
+ $pkg_drop libbiz
+ }
+
+ : external-config
+ :
+ {
+ $clone_cfg;
+
+ $cfg_create -d cfg2 --type build2 --name cfg2 &cfg2/***;
+ $cfg_link -d cfg cfg2;
+
+ $cfg_create -d cfg3 --type host --name cfg3 &cfg3/***;
+ $cfg_link -d cfg cfg3;
+
+ $* libbiz <<EOI 2>>~%EOE%;
+ y
+ EOI
+ % new libbuild2-bar/1.0.0 \[cfg3..bpkg.build2.\] \(required by foo \[cfg3.\]\)%
+ % new libbaz/1.0.0 \[cfg3.\] \(required by foo \[cfg3.\]\)%
+ % new foo/1.0.0 \[cfg3.\] \(required by libbiz\)%
+ % new libbaz/1.0.0 \[cfg2.\] \(required by libbuild2-foo \[cfg2.\]\)%
+ % new libbuild2-foo/1.0.0 \[cfg2.\] \(required by libbiz\)%
+ new libbaz/1.0.0 (required by libbiz)
+ new libbiz/1.0.0
+ %continue\? \[Y/n\] fetched libbuild2-bar/1.0.0 \[cfg3..bpkg.build2.\]%
+ %unpacked libbuild2-bar/1.0.0 \[cfg3..bpkg.build2.\]%
+ %fetched libbaz/1.0.0 \[cfg3.\]%
+ %unpacked libbaz/1.0.0 \[cfg3.\]%
+ %fetched foo/1.0.0 \[cfg3.\]%
+ %unpacked foo/1.0.0 \[cfg3.\]%
+ %fetched libbaz/1.0.0 \[cfg2.\]%
+ %unpacked libbaz/1.0.0 \[cfg2.\]%
+ %fetched libbuild2-foo/1.0.0 \[cfg2.\]%
+ %unpacked libbuild2-foo/1.0.0 \[cfg2.\]%
+ fetched libbaz/1.0.0
+ unpacked libbaz/1.0.0
+ fetched libbiz/1.0.0
+ unpacked libbiz/1.0.0
+ %configured libbuild2-bar/1.0.0 \[cfg3..bpkg.build2.\]%
+ %configured libbaz/1.0.0 \[cfg3.\]%
+ %configured foo/1.0.0 \[cfg3.\]%
+ %configured libbaz/1.0.0 \[cfg2.\]%
+ %configured libbuild2-foo/1.0.0 \[cfg2.\]%
+ configured libbaz/1.0.0
+ configured libbiz/1.0.0
+ %info: cfg.+libbiz-1.0.0.+ is up to date%
+ updated libbiz/1.0.0
+ EOE
+
+ $pkg_status -d cfg -r >>/EOO;
+ !libbiz configured 1.0.0
+ foo [cfg3/] configured 1.0.0
+ libbaz [cfg3/] configured 1.0.0
+ libbuild2-bar [cfg3/.bpkg/build2/] configured 1.0.0
+ libbaz configured 1.0.0
+ libbuild2-foo [cfg2/] configured 1.0.0
+ libbaz [cfg2/] configured 1.0.0
+ EOO
+
+ $pkg_drop libbiz
+ }
+
+ : build2-config
+ :
+ {
+ $cfg_create -d cfg --type build2 &cfg/***;
+ $rep_add $rep/t7a && $rep_fetch;
+
+ $* libbiz 2>>EOE != 0
+ error: build-time dependency foo in build system module configuration
+ info: build system modules cannot have build-time dependencies
+ info: while satisfying libbiz/1.0.0
+ EOE
+ }
+
+ : duplicates
+ :
+ {
+ $cfg_create -d cfg &cfg/***;
+
+ $cfg_create -d cfg2 --name cfg2 &cfg2/***;
+ $cfg_link -d cfg cfg2;
+
+ $cfg_create -d cfg3 --name cfg3 &cfg3/***;
+ $cfg_link -d cfg cfg3;
+
+ $* "libbiz@$rep/t7a" +{ --config-name cfg2 } --yes --trust-yes 2>!;
+ $* "libbuz@$rep/t7a" +{ --config-name cfg3 } --yes --trust-yes 2>!;
+
+ $* "libbix@$rep/t7a" --trust-yes 2>>/~%EOE% != 0
+ %(added|fetching).+%{2}
+ error: building build system module libbuild2-bar in multiple configurations
+ % info: cfg(2|3)/.bpkg/build2/%{2}
+ EOE
+ }
+ }
+
+ : multiple-configs
+ :
+ {
+ cfg_uuid = '18f48b4b-b5d9-4712-b98c-1930df1c4228'
+ cfg2_uuid = '28f48b4b-b5d9-4712-b98c-1930df1c4228'
+ cfg3_uuid = '38f48b4b-b5d9-4712-b98c-1930df1c4228'
+ cfg4_uuid = '48f48b4b-b5d9-4712-b98c-1930df1c4228'
+ cfg5_uuid = '58f48b4b-b5d9-4712-b98c-1930df1c4228'
+
+ : 2-current-configs
+ :
+ {
+ $cfg_create -d cfg --uuid $cfg_uuid &cfg/***;
+ $cfg_create -d cfg2 --uuid $cfg2_uuid &cfg2/***;
+ $cfg_create -d cfg3 --uuid $cfg3_uuid --type build2 &cfg3/***;
+
+ $cfg_link -d cfg cfg3;
+ $cfg_link -d cfg2 cfg3;
+
+ $rep_add $rep/t7a && $rep_fetch;
+ $rep_add -d cfg2 $rep/t7a && $rep_fetch -d cfg2;
+
+ test.arguments += -d cfg2; # Now refers 2 current dirs: cfg/ and cfg2/.
+
+ # While at it, make sure --config-uuid is only allowed for multiple
+ # current configurations.
+ #
+ $* foo +{ --config-id 1 } 2>>EOE != 0;
+ error: --config-id specified for multiple current configurations
+ info: use --config-uuid to specify configurations in this mode
+ info: while validating options for foo
+ EOE
+
+ $* foo +{ --config-name cfg2 } 2>>EOE != 0;
+ error: --config-name specified for multiple current configurations
+ info: use --config-uuid to specify configurations in this mode
+ info: while validating options for foo
+ EOE
+
+ # While at it, make sure a package must have the configuration
+ # specified.
+ #
+ $* foo 2>>EOE != 0;
+ error: no configuration specified for foo
+ info: configuration must be explicitly specified for each package in multi-configurations mode
+ info: use --config-uuid to specify its configuration
+ EOE
+
+ # Build foo in cfg/ and cfg2/ with its libbaz dependency colocated and
+ # libbuild2-bar dependency shared in cfg3/.
+ #
+ $* foo +{ --config-uuid $cfg_uuid --config-uuid $cfg2_uuid } <<EOI 2>>~%EOE%;
+ y
+ EOI
+ % new libbaz/1.0.0 \[cfg.\] \(required by foo \[cfg.\]\)%
+ % new libbuild2-bar/1.0.0 \[cfg3.\] \(required by foo \[cfg.\], foo \[cfg2.\]\)%
+ % new foo/1.0.0 \[cfg.\]%
+ % new libbaz/1.0.0 \[cfg2.\] \(required by foo \[cfg2.\]\)%
+ % new foo/1.0.0 \[cfg2.\]%
+ %continue\? \[Y/n\] fetched libbaz/1.0.0 \[cfg.\]%
+ %unpacked libbaz/1.0.0 \[cfg.\]%
+ %fetched libbuild2-bar/1.0.0 \[cfg3.\]%
+ %unpacked libbuild2-bar/1.0.0 \[cfg3.\]%
+ %fetched foo/1.0.0 \[cfg.\]%
+ %unpacked foo/1.0.0 \[cfg.\]%
+ %fetched libbaz/1.0.0 \[cfg2.\]%
+ %unpacked libbaz/1.0.0 \[cfg2.\]%
+ %fetched foo/1.0.0 \[cfg2.\]%
+ %unpacked foo/1.0.0 \[cfg2.\]%
+ %configured libbaz/1.0.0 \[cfg.\]%
+ %configured libbuild2-bar/1.0.0 \[cfg3.\]%
+ %configured foo/1.0.0 \[cfg.\]%
+ %configured libbaz/1.0.0 \[cfg2.\]%
+ %configured foo/1.0.0 \[cfg2.\]%
+ %info: cfg.+foo-1.0.0.+ is up to date%
+ %info: cfg2.+foo-1.0.0.+ is up to date%
+ %updated foo/1.0.0 \[cfg.\]%
+ %updated foo/1.0.0 \[cfg2.\]%
+ EOE
+
+ $pkg_status -d cfg -r >>/EOO;
+ !foo configured 1.0.0
+ libbaz configured 1.0.0
+ libbuild2-bar [cfg3/] configured 1.0.0
+ EOO
+
+ $pkg_status -d cfg2 -r >>/EOO;
+ !foo configured 1.0.0
+ libbaz configured 1.0.0
+ libbuild2-bar [cfg3/] configured 1.0.0
+ EOO
+
+ # Move libbuild2-bar to cfg4/ and libbaz to cfg5/.
+ #
+ $cfg_create -d cfg4 --uuid $cfg4_uuid --type build2 &cfg4/***;
+ $cfg_create -d cfg5 --uuid $cfg5_uuid &cfg5/***;
+
+ $cfg_link -d cfg cfg4;
+ $cfg_link -d cfg2 cfg4;
+ $cfg_link -d cfg cfg5;
+ $cfg_link -d cfg2 cfg5;
+
+ $* ?libbuild2-bar +{ --config-uuid $cfg4_uuid } \
+ ?libbaz +{ --config-uuid $cfg5_uuid } <<EOI 2>>~%EOE%;
+ y
+ y
+ EOI
+ % drop libbaz/1.0.0 \[cfg2.\] \(unused\)%
+ % drop libbuild2-bar/1.0.0 \[cfg3.\] \(unused\)%
+ % drop libbaz/1.0.0 \[cfg.\] \(unused\)%
+ % new libbuild2-bar/1.0.0 \[cfg4.\] \(required by foo \[cfg.\], foo \[cfg2.\]\)%
+ % new libbaz/1.0.0 \[cfg5.\] \(required by foo \[cfg.\], foo \[cfg2.\]\)%
+ % reconfigure foo/1.0.0 \[cfg2.\] \(dependent of libbaz \[cfg5.\], libbuild2-bar \[cfg4.\]\)%
+ % reconfigure foo/1.0.0 \[cfg.\] \(dependent of libbaz \[cfg5.\], libbuild2-bar \[cfg4.\]\)%
+ %continue\? \[Y/n\] update dependent packages\? \[Y/n\] disfigured foo/1.0.0 \[cfg.\]%
+ %disfigured foo/1.0.0 \[cfg2.\]%
+ %disfigured libbaz/1.0.0 \[cfg.\]%
+ %disfigured libbuild2-bar/1.0.0 \[cfg3.\]%
+ %disfigured libbaz/1.0.0 \[cfg2.\]%
+ %purged libbaz/1.0.0 \[cfg2.\]%
+ %purged libbuild2-bar/1.0.0 \[cfg3.\]%
+ %purged libbaz/1.0.0 \[cfg.\]%
+ %fetched libbuild2-bar/1.0.0 \[cfg4.\]%
+ %unpacked libbuild2-bar/1.0.0 \[cfg4.\]%
+ %fetched libbaz/1.0.0 \[cfg5.\]%
+ %unpacked libbaz/1.0.0 \[cfg5.\]%
+ %configured libbuild2-bar/1.0.0 \[cfg4.\]%
+ %configured libbaz/1.0.0 \[cfg5.\]%
+ %configured foo/1.0.0 \[cfg2.\]%
+ %configured foo/1.0.0 \[cfg.\]%
+ %info: cfg2.+foo-1.0.0.+ is up to date%
+ %info: cfg.+foo-1.0.0.+ is up to date%
+ %updated foo/1.0.0 \[cfg2.\]%
+ %updated foo/1.0.0 \[cfg.\]%
+ EOE
+
+ $pkg_status -d cfg -r >>/EOO;
+ !foo configured 1.0.0
+ libbaz [cfg5/] configured 1.0.0
+ libbuild2-bar [cfg4/] configured 1.0.0
+ EOO
+
+ $pkg_status -d cfg2 -r >>/EOO;
+ !foo configured 1.0.0
+ libbaz [cfg5/] configured 1.0.0
+ libbuild2-bar [cfg4/] configured 1.0.0
+ EOO
+
+ $rep_add $rep/t7b && $rep_fetch;
+
+ $* foo ?libbaz --config-uuid $cfg_uuid <<EOI 2>>~%EOE%;
+ y
+ EOI
+ % new libbaz/1.1.0 \[cfg.\] \(required by foo \[cfg.\]\)%
+ % upgrade foo/1.1.0 \[cfg.\]%
+ %continue\? \[Y/n\] disfigured foo/1.0.0 \[cfg.\]%
+ %fetched libbaz/1.1.0 \[cfg.\]%
+ %unpacked libbaz/1.1.0 \[cfg.\]%
+ %fetched foo/1.1.0 \[cfg.\]%
+ %unpacked foo/1.1.0 \[cfg.\]%
+ %configured libbaz/1.1.0 \[cfg.\]%
+ %configured foo/1.1.0 \[cfg.\]%
+ %info: cfg.+foo-1.1.0.+ is up to date%
+ %updated foo/1.1.0 \[cfg.\]%
+ EOE
+
+ $pkg_status -d cfg -r >>/EOO;
+ !foo configured 1.1.0
+ libbaz configured 1.1.0
+ EOO
+
+ $pkg_status -d cfg2 -r >>/EOO;
+ !foo configured 1.0.0
+ libbaz [cfg5/] configured 1.0.0
+ libbuild2-bar [cfg4/] configured 1.0.0
+ EOO
+
+ $rep_add -d cfg2 $rep/t7b && $rep_fetch -d cfg2;
+
+ $* --no-move foo +{ --config-uuid $cfg2_uuid } \
+ ?libbaz +{ --config-uuid $cfg5_uuid } <<EOI 2>>~%EOE%;
+ y
+ EOI
+ % drop libbuild2-bar/1.0.0 \[cfg4.\] \(unused\)%
+ % upgrade libbaz/1.1.0 \[cfg5.\]%
+ % upgrade foo/1.1.0 \[cfg2.\]%
+ %continue\? \[Y/n\] disfigured foo/1.0.0 \[cfg2.\]%
+ %disfigured libbaz/1.0.0 \[cfg5.\]%
+ %disfigured libbuild2-bar/1.0.0 \[cfg4.\]%
+ %purged libbuild2-bar/1.0.0 \[cfg4.\]%
+ %fetched libbaz/1.1.0 \[cfg5.\]%
+ %unpacked libbaz/1.1.0 \[cfg5.\]%
+ %fetched foo/1.1.0 \[cfg2.\]%
+ %unpacked foo/1.1.0 \[cfg2.\]%
+ %configured libbaz/1.1.0 \[cfg5.\]%
+ %configured foo/1.1.0 \[cfg2.\]%
+ %info: cfg2.+foo-1.1.0.+ is up to date%
+ %updated foo/1.1.0 \[cfg2.\]%
+ EOE
+
+ $pkg_status -d cfg2 -r >>/EOO
+ !foo configured 1.1.0
+ libbaz [cfg5/] configured 1.1.0
+ EOO
+ }
+
+ : variable
+ :
+ {
+ $cfg_create -d cfg --uuid $cfg_uuid &cfg/***;
+ $cfg_create -d cfg2 --uuid $cfg2_uuid &cfg2/***;
+ $cfg_create -d cfg3 --uuid $cfg3_uuid --type host &cfg3/***;
+
+ $cfg_link -d cfg cfg3;
+ $cfg_link -d cfg2 cfg3;
+
+ $rep_add $rep/t7a && $rep_fetch;
+ $rep_add -d cfg2 $rep/t7a && $rep_fetch -d cfg2;
+
+ test.arguments += -d cfg2; # Now refers 2 current dirs: cfg/ and cfg2/.
+
+ $* --configure-only \
+ { --config-uuid $cfg_uuid config.libbaz=true }+ libbaz \
+ { --config-uuid $cfg2_uuid }+ libbaz 2>!;
+
+ sed -n -e 's/^config.libbaz = (.+)$/\1/p' \
+ cfg/libbaz-1.0.0/build/config.build >'true';
+
+ sed -n -e 's/^config.libbaz = (.+)$/\1/p' \
+ cfg2/libbaz-1.0.0/build/config.build >'false'
+ }
+
+ : system-dependency
+ :
+ {
+ $cfg_create -d cfg --uuid $cfg_uuid &cfg/***;
+ $cfg_create -d cfg2 --uuid $cfg2_uuid &cfg2/***;
+ $cfg_create -d cfg3 --uuid $cfg3_uuid --type host &cfg3/***;
+
+ $cfg_link -d cfg cfg3;
+ $cfg_link -d cfg2 cfg3;
+
+ $rep_add $rep/t7a && $rep_fetch;
+ $rep_add -d cfg2 $rep/t7a && $rep_fetch -d cfg2;
+
+ test.arguments += -d cfg2; # Now refers 2 current dirs: cfg/ and cfg2/.
+
+ $* libbox +{ --config-uuid $cfg_uuid --config-uuid $cfg2_uuid } \
+ '?sys:foo' <<EOI 2>>~%EOE%;
+ y
+ EOI
+ % new libbaz/1.0.0 \[cfg.\] \(required by libbox \[cfg.\]\)%
+ % configure sys:foo/\* \[cfg3.\] \(required by libbox \[cfg.\], libbox \[cfg2.\]\)%
+ % new libbox/1.0.0 \[cfg.\]%
+ % new libbaz/1.0.0 \[cfg2.\] \(required by libbox \[cfg2.\]\)%
+ % new libbox/1.0.0 \[cfg2.\]%
+ %continue\? \[Y/n\] fetched libbaz/1.0.0 \[cfg.\]%
+ %unpacked libbaz/1.0.0 \[cfg.\]%
+ %fetched libbox/1.0.0 \[cfg.\]%
+ %unpacked libbox/1.0.0 \[cfg.\]%
+ %fetched libbaz/1.0.0 \[cfg2.\]%
+ %unpacked libbaz/1.0.0 \[cfg2.\]%
+ %fetched libbox/1.0.0 \[cfg2.\]%
+ %unpacked libbox/1.0.0 \[cfg2.\]%
+ %configured libbaz/1.0.0 \[cfg.\]%
+ %configured sys:foo/\* \[cfg3.\]%
+ %configured libbox/1.0.0 \[cfg.\]%
+ %configured libbaz/1.0.0 \[cfg2.\]%
+ %configured libbox/1.0.0 \[cfg2.\]%
+ %info: cfg.+libbox-1.0.0.+ is up to date%
+ %info: cfg2.+libbox-1.0.0.+ is up to date%
+ %updated libbox/1.0.0 \[cfg.\]%
+ %updated libbox/1.0.0 \[cfg2.\]%
+ EOE
+
+ $pkg_status -d cfg -r >>/EOO;
+ !libbox configured 1.0.0
+ foo [cfg3/] configured,system !* available 1.0.0
+ libbaz configured 1.0.0
+ EOO
+
+ $pkg_status -d cfg2 -r >>/EOO
+ !libbox configured 1.0.0
+ foo [cfg3/] configured,system !* available 1.0.0
+ libbaz configured 1.0.0
+ EOO
+ }
+
+ : build2-module-different-clusters
+ :
+ : Test that the same module can successfully be built in 2 configurations
+ : if they belong to different linked configuration clusters.
+ :
+ {
+ $cfg_create -d cfg --uuid $cfg_uuid &cfg/***;
+ $cfg_create -d cfg2 --uuid $cfg2_uuid &cfg2/***;
+
+ test.arguments += -d cfg2; # Now refers 2 current dirs: cfg/ and cfg2/.
+
+ $* "libbiz@$rep/t7a" +{ --config-uuid $cfg_uuid } \
+ "libbuz@$rep/t7a" +{ --config-uuid $cfg2_uuid } --yes --trust-yes 2>>~%EOE%
+ %(added|fetching) .+%{4}
+ %(fetched|unpacked) .+%{26}
+ %configured .+%{13}
+ %info: .+ is up to date%{2}
+ %updated .+%{2}
+ EOE
+ }
+ }
+
+ : system
+ :
+ {
+ +$clone_root_cfg && $rep_add $rep/t7a && $rep_fetch
+
+ : no-config
+ :
+ {
+ +$clone_cfg
+
+ : linked
+ :
+ {
+ $clone_cfg;
+
+ $cfg_create -d cfg2 --type host --name cfg2 &cfg2/***;
+ $cfg_link -d cfg cfg2;
+
+ $* libbox '?sys:foo' <<EOI 2>>~%EOE%;
+ y
+ EOI
+ % configure sys:foo/\* \[cfg2.\] \(required by libbox\)%
+ new libbaz/1.0.0 (required by libbox)
+ new libbox/1.0.0
+ continue? [Y/n] fetched libbaz/1.0.0
+ unpacked libbaz/1.0.0
+ fetched libbox/1.0.0
+ unpacked libbox/1.0.0
+ %configured sys:foo/\* \[cfg2.\]%
+ configured libbaz/1.0.0
+ configured libbox/1.0.0
+ %info: cfg.+libbox-1.0.0.+ is up to date%
+ updated libbox/1.0.0
+ EOE
+
+ $pkg_status -r >>/EOO;
+ !libbox configured 1.0.0
+ foo [cfg2/] configured,system !* available 1.0.0
+ libbaz configured 1.0.0
+ EOO
+
+ $pkg_drop libbox
+ }
+
+ : private-host
+ :
+ {
+ $clone_cfg;
+
+ $* libbox '?sys:foo' &cfg/.bpkg/host/*** <<EOI 2>>~%EOE%;
+ y
+ EOI
+ % configure sys:foo/\* \[cfg..bpkg.host.\] \(required by libbox\)%
+ new libbaz/1.0.0 (required by libbox)
+ new libbox/1.0.0
+ continue? [Y/n] fetched libbaz/1.0.0
+ unpacked libbaz/1.0.0
+ fetched libbox/1.0.0
+ unpacked libbox/1.0.0
+ %configured sys:foo/\* \[cfg..bpkg.host.\]%
+ configured libbaz/1.0.0
+ configured libbox/1.0.0
+ %info: cfg.+libbox-1.0.0.+ is up to date%
+ updated libbox/1.0.0
+ EOE
+
+ $pkg_status -r >>/EOO;
+ !libbox configured 1.0.0
+ foo [cfg/.bpkg/host/] configured,system !* available 1.0.0
+ libbaz configured 1.0.0
+ EOO
+
+ $pkg_drop libbox
+ }
+
+ : private-module
+ :
+ {
+ $clone_cfg;
+
+ $* libbox '?sys:libbuild2-bar' &cfg/.bpkg/host/*** &cfg/.bpkg/build2/*** <<EOI 2>>~%EOE%;
+ y
+ EOI
+ % configure sys:libbuild2-bar/\* \[cfg..bpkg.build2.\] \(required by foo \[cfg..bpkg.host.\]\)%
+ % new libbaz/1.0.0 \[cfg..bpkg.host.\] \(required by foo \[cfg..bpkg.host.\]\)%
+ % new foo/1.0.0 \[cfg..bpkg.host.\] \(required by libbox\)%
+ new libbaz/1.0.0 (required by libbox)
+ new libbox/1.0.0
+ %continue\? \[Y/n\] fetched libbaz/1.0.0 \[cfg..bpkg.host.\]%
+ %unpacked libbaz/1.0.0 \[cfg..bpkg.host.\]%
+ %fetched foo/1.0.0 \[cfg..bpkg.host.\]%
+ %unpacked foo/1.0.0 \[cfg..bpkg.host.\]%
+ fetched libbaz/1.0.0
+ unpacked libbaz/1.0.0
+ fetched libbox/1.0.0
+ unpacked libbox/1.0.0
+ %configured sys:libbuild2-bar/\* \[cfg..bpkg.build2.\]%
+ %configured libbaz/1.0.0 \[cfg..bpkg.host.\]%
+ %configured foo/1.0.0 \[cfg..bpkg.host.\]%
+ configured libbaz/1.0.0
+ configured libbox/1.0.0
+ %info: cfg.+libbox-1.0.0.+ is up to date%
+ updated libbox/1.0.0
+ EOE
+
+ $pkg_status -r >>/EOO;
+ !libbox configured 1.0.0
+ foo [cfg/.bpkg/host/] configured 1.0.0
+ libbaz [cfg/.bpkg/host/] configured 1.0.0
+ libbuild2-bar [cfg/.bpkg/build2/] configured,system !* available 1.0.0
+ libbaz configured 1.0.0
+ EOO
+
+ $pkg_drop libbox
+ }
+ }
+
+ : config
+ :
+ {
+ +$clone_cfg
+
+ : linked
+ :
+ {
+ $clone_cfg;
+
+ $cfg_create -d cfg2 --type host --name cfg2 &cfg2/***;
+ $cfg_link -d cfg cfg2;
+
+ $cfg_create -d cfg3 --type host --name cfg3 &cfg3/***;
+ $cfg_link -d cfg cfg3;
+
+ $* libbox '?sys:foo' +{ --config-name cfg3 } <<EOI 2>>~%EOE%;
+ y
+ EOI
+ % configure sys:foo/\* \[cfg3.\] \(required by libbox\)%
+ new libbaz/1.0.0 (required by libbox)
+ new libbox/1.0.0
+ continue? [Y/n] fetched libbaz/1.0.0
+ unpacked libbaz/1.0.0
+ fetched libbox/1.0.0
+ unpacked libbox/1.0.0
+ %configured sys:foo/\* \[cfg3.\]%
+ configured libbaz/1.0.0
+ configured libbox/1.0.0
+ %info: cfg.+libbox-1.0.0.+ is up to date%
+ updated libbox/1.0.0
+ EOE
+
+ $pkg_status -r >>/EOO;
+ !libbox configured 1.0.0
+ foo [cfg3/] configured,system !* available 1.0.0
+ libbaz configured 1.0.0
+ EOO
+
+ $pkg_drop libbox
+ }
+
+ : src-sys
+ :
+ {
+ $clone_cfg;
+
+ $cfg_create -d cfg2 --type host --name cfg2 &cfg2/***;
+ $cfg_link -d cfg cfg2;
+
+ $* libbox --yes 2>!;
+
+ $pkg_status -r >>/EOO;
+ !libbox configured 1.0.0
+ foo [cfg2/] configured 1.0.0
+ libbaz [cfg2/] configured 1.0.0
+ libbuild2-bar [cfg2/.bpkg/build2/] configured 1.0.0
+ libbaz configured 1.0.0
+ EOO
+
+ $* '?sys:foo' <<EOI 2>>~%EOE%;
+ y
+ y
+ EOI
+ % drop libbuild2-bar/1.0.0 \[cfg2..bpkg.build2.\] \(unused\)%
+ % drop libbaz/1.0.0 \[cfg2.\] \(unused\)%
+ % reconfigure sys:foo/\* \[cfg2.\]%
+ % reconfigure libbox \(dependent of foo \[cfg2.\]\)%
+ continue? [Y/n] update dependent packages? [Y/n] disfigured libbox/1.0.0
+ %disfigured foo/1.0.0 \[cfg2.\]%
+ %disfigured libbaz/1.0.0 \[cfg2.\]%
+ %disfigured libbuild2-bar/1.0.0 \[cfg2..bpkg.build2.\]%
+ %purged libbuild2-bar/1.0.0 \[cfg2..bpkg.build2.\]%
+ %purged libbaz/1.0.0 \[cfg2.\]%
+ %purged foo/1.0.0 \[cfg2.\]%
+ %configured sys:foo/\* \[cfg2.\]%
+ configured libbox/1.0.0
+ %info: cfg.+libbox-1.0.0.+ is up to date%
+ updated libbox/1.0.0
+ EOE
+
+ $pkg_status -r >>/EOO;
+ !libbox configured 1.0.0
+ foo [cfg2/] configured,system !* available 1.0.0
+ libbaz configured 1.0.0
+ EOO
+
+ $pkg_drop libbox
+ }
+
+ : src-sys-upgrade
+ :
+ {
+ $clone_cfg;
+
+ $cfg_create -d cfg2 --type host --name cfg2 &cfg2/***;
+ $cfg_link -d cfg cfg2;
+
+ $* libbox --yes 2>!;
+
+ $pkg_status -r >>/EOO;
+ !libbox configured 1.0.0
+ foo [cfg2/] configured 1.0.0
+ libbaz [cfg2/] configured 1.0.0
+ libbuild2-bar [cfg2/.bpkg/build2/] configured 1.0.0
+ libbaz configured 1.0.0
+ EOO
+
+ $* '?sys:foo/1.1.0' <<EOI 2>>~%EOE%;
+ y
+ y
+ EOI
+ % drop libbuild2-bar/1.0.0 \[cfg2..bpkg.build2.\] \(unused\)%
+ % drop libbaz/1.0.0 \[cfg2.\] \(unused\)%
+ % reconfigure sys:foo/1.1.0 \[cfg2.\]%
+ % reconfigure libbox \(dependent of foo \[cfg2.\]\)%
+ continue? [Y/n] update dependent packages? [Y/n] disfigured libbox/1.0.0
+ %disfigured foo/1.0.0 \[cfg2.\]%
+ %disfigured libbaz/1.0.0 \[cfg2.\]%
+ %disfigured libbuild2-bar/1.0.0 \[cfg2..bpkg.build2.\]%
+ %purged libbuild2-bar/1.0.0 \[cfg2..bpkg.build2.\]%
+ %purged libbaz/1.0.0 \[cfg2.\]%
+ %purged foo/1.0.0 \[cfg2.\]%
+ %configured sys:foo/1.1.0 \[cfg2.\]%
+ configured libbox/1.0.0
+ %info: cfg.+libbox-1.0.0.+ is up to date%
+ updated libbox/1.0.0
+ EOE
+
+ $pkg_status -r >>/EOO;
+ !libbox configured 1.0.0
+ foo [cfg2/] configured,system !1.1.0
+ libbaz configured 1.0.0
+ EOO
+
+ $pkg_drop libbox
+ }
+
+ : src-sys-unhold
+ :
+ {
+ $clone_cfg;
+
+ $cfg_create -d cfg2 --type host --name cfg2 &cfg2/***;
+ $cfg_link -d cfg cfg2;
+
+ $rep_add -d cfg2 $rep/t7a && $rep_fetch -d cfg2;
+
+ $* libbox foo +{ --config-name cfg2 } --yes 2>!;
+
+ $pkg_status -r >>/EOO;
+ !libbox configured 1.0.0
+ !foo [cfg2/] configured 1.0.0
+ libbaz [cfg2/] configured 1.0.0
+ libbuild2-bar [cfg2/.bpkg/build2/] configured 1.0.0
+ libbaz configured 1.0.0
+ EOO
+
+ $* '?sys:foo' <<EOI 2>>~%EOE%;
+ y
+ y
+ EOI
+ % drop libbuild2-bar/1.0.0 \[cfg2..bpkg.build2.\] \(unused\)%
+ % drop libbaz/1.0.0 \[cfg2.\] \(unused\)%
+ % reconfigure/unhold sys:foo/\* \[cfg2.\]%
+ % reconfigure libbox \(dependent of foo \[cfg2.\]\)%
+ continue? [Y/n] update dependent packages? [Y/n] disfigured libbox/1.0.0
+ %disfigured foo/1.0.0 \[cfg2.\]%
+ %disfigured libbaz/1.0.0 \[cfg2.\]%
+ %disfigured libbuild2-bar/1.0.0 \[cfg2..bpkg.build2.\]%
+ %purged libbuild2-bar/1.0.0 \[cfg2..bpkg.build2.\]%
+ %purged libbaz/1.0.0 \[cfg2.\]%
+ %purged foo/1.0.0 \[cfg2.\]%
+ %configured sys:foo/\* \[cfg2.\]%
+ configured libbox/1.0.0
+ %info: cfg.+libbox-1.0.0.+ is up to date%
+ updated libbox/1.0.0
+ EOE
+
+ $pkg_status -r >>/EOO;
+ !libbox configured 1.0.0
+ foo [cfg2/] configured,system !* available 1.0.0
+ libbaz configured 1.0.0
+ EOO
+
+ $pkg_drop libbox
+ }
+
+ : sys-unhold
+ :
+ {
+ $clone_cfg;
+
+ $cfg_create -d cfg2 --type host --name cfg2 &cfg2/***;
+ $cfg_link -d cfg cfg2;
+
+ $rep_add -d cfg2 $rep/t7a && $rep_fetch -d cfg2;
+
+ $* libbox 'sys:foo' +{ --config-name cfg2 } --yes 2>!;
+
+ $pkg_status -r >>/EOO;
+ !libbox configured 1.0.0
+ !foo [cfg2/] configured,system !* available 1.0.0
+ libbaz configured 1.0.0
+ EOO
+
+ $* '?sys:foo';
+
+ $pkg_status -r >>/EOO;
+ !libbox configured 1.0.0
+ foo [cfg2/] configured,system !* available 1.0.0
+ libbaz configured 1.0.0
+ EOO
+
+ $pkg_drop libbox
+ }
+ }
+ }
+}
+
+: deorphan
+:
+{
+ test.arguments += --yes --plan ""
+
+ : dependency
+ :
+ {
+ : unhold
+ :
+ {
+ : basics
+ :
+ {
+ $clone_root_cfg;
+
+ cp -r $src/libfoo-1.1.0 libfoo;
+ $rep_add --type dir libfoo/ && $rep_fetch;
+
+ $* libfoo 2>!;
+ $rep_fetch $rep/t4b;
+
+ $* libbar 2>!;
+
+ $pkg_status -r libbar >>EOO;
+ !libbar configured 1.1.0
+ !libfoo configured 1.1.0
+ EOO
+
+ echo "" >+ libfoo/manifest;
+ $rep_fetch;
+
+ $pkg_status -r libbar >>EOO;
+ !libbar configured 1.1.0
+ !libfoo configured 1.1.0 available 1.1.0#1
+ EOO
+
+ # Deorphan libfoo/1.1.0 to libfoo/1.1.0#1.
+ #
+ # Note that libfoo/1.1.0 is considered as an orphan since its version
+ # is replaced with 1.1.0#1 in its existing repository fragment. This is
+ # in contrast to the subsequent tests where the package repository is
+ # removed.
+ #
+ $* --deorphan libfoo 2>>~%EOE%;
+ replace/upgrade libfoo/1.1.0#1
+ reconfigure libbar (dependent of libfoo)
+ disfigured libbar/1.1.0
+ disfigured libfoo/1.1.0
+ using libfoo/1.1.0#1 (external)
+ configured libfoo/1.1.0#1
+ configured libbar/1.1.0
+ %info: .+libfoo.+ is up to date%
+ %info: .+libbar-1.1.0.+ is up to date%
+ updated libfoo/1.1.0#1
+ updated libbar/1.1.0
+ EOE
+
+ $pkg_status -r libbar >>EOO;
+ !libbar configured 1.1.0
+ !libfoo configured 1.1.0#1
+ EOO
+
+ # Noop.
+ #
+ $* --deorphan libfoo 2>'info: nothing to build';
+
+ $pkg_status -r libbar >>EOO;
+ !libbar configured 1.1.0
+ !libfoo configured 1.1.0#1
+ EOO
+
+ # Deorphan libfoo/1.1.0#1 to ?libfoo/1.1.0.
+ #
+ $rep_remove $~/libfoo/;
+
+ $* --deorphan ?libfoo 2>>~%EOE%;
+ replace/downgrade/unhold libfoo/1.1.0
+ reconfigure libbar (dependent of libfoo)
+ disfigured libbar/1.1.0
+ disfigured libfoo/1.1.0#1
+ fetched libfoo/1.1.0
+ unpacked libfoo/1.1.0
+ configured libfoo/1.1.0
+ configured libbar/1.1.0
+ %info: .+libbar-1.1.0.+ is up to date%
+ updated libbar/1.1.0
+ EOE
+
+ $pkg_status -r libbar >>EOO;
+ !libbar configured 1.1.0
+ libfoo configured 1.1.0
+ EOO
+
+ # Noop.
+ #
+ $* --deorphan ?libfoo;
+
+ $pkg_status -r libbar >>EOO;
+ !libbar configured 1.1.0
+ libfoo configured 1.1.0
+ EOO
+
+ # Deorphan libfoo/1.1.0#1 to ?libfoo/1.1.0.
+ #
+ $rep_add --type dir libfoo/ && $rep_fetch;
+ $* libfoo 2>!;
+
+ $pkg_status -r libbar >>EOO;
+ !libbar configured 1.1.0
+ !libfoo configured 1.1.0#1
+ EOO
+
+ $rep_remove $~/libfoo/;
+
+ $* --deorphan ?libfoo 2>>~%EOE%;
+ replace/downgrade/unhold libfoo/1.1.0
+ reconfigure libbar (dependent of libfoo)
+ disfigured libbar/1.1.0
+ disfigured libfoo/1.1.0#1
+ fetched libfoo/1.1.0
+ unpacked libfoo/1.1.0
+ configured libfoo/1.1.0
+ configured libbar/1.1.0
+ %info: .+libbar-1.1.0.+ is up to date%
+ updated libbar/1.1.0
+ EOE
+
+ $pkg_status -r libbar >>EOO;
+ !libbar configured 1.1.0
+ libfoo configured 1.1.0
+ EOO
+
+ # Noop.
+ #
+ $* --deorphan ?libfoo;
+
+ $pkg_status -r libbar >>EOO;
+ !libbar configured 1.1.0
+ libfoo configured 1.1.0
+ EOO
+
+ $pkg_drop libbar
+ }
+
+ : basics-masked
+ :
+ : As above but using --mask-repository* instead of rep-remove.
+ :
+ {
+ $clone_root_cfg;
+
+ cp -r $src/libfoo-1.1.0 libfoo;
+ $rep_add --type dir libfoo/ && $rep_fetch;
+
+ $* libfoo 2>!;
+ $rep_fetch $rep/t4b;
+
+ $* libbar 2>!;
+
+ $pkg_status -r libbar >>EOO;
+ !libbar configured 1.1.0
+ !libfoo configured 1.1.0
+ EOO
+
+ echo "" >+ libfoo/manifest;
+ $rep_fetch;
+
+ $pkg_status -r libbar >>EOO;
+ !libbar configured 1.1.0
+ !libfoo configured 1.1.0 available 1.1.0#1
+ EOO
+
+ # Deorphan libfoo/1.1.0 to libfoo/1.1.0#1.
+ #
+ # Note that libfoo/1.1.0 is considered as an orphan since its version
+ # is replaced with 1.1.0#1 in its existing repository fragment. This is
+ # in contrast to the subsequent tests where the package repository is
+ # removed.
+ #
+ $* --deorphan libfoo 2>>~%EOE%;
+ replace/upgrade libfoo/1.1.0#1
+ reconfigure libbar (dependent of libfoo)
+ disfigured libbar/1.1.0
+ disfigured libfoo/1.1.0
+ using libfoo/1.1.0#1 (external)
+ configured libfoo/1.1.0#1
+ configured libbar/1.1.0
+ %info: .+libfoo.+ is up to date%
+ %info: .+libbar-1.1.0.+ is up to date%
+ updated libfoo/1.1.0#1
+ updated libbar/1.1.0
+ EOE
+
+ $pkg_status -r libbar >>EOO;
+ !libbar configured 1.1.0
+ !libfoo configured 1.1.0#1
+ EOO
+
+ # Noop.
+ #
+ $* --deorphan libfoo 2>'info: nothing to build';
+
+ $pkg_status -r libbar >>EOO;
+ !libbar configured 1.1.0
+ !libfoo configured 1.1.0#1
+ EOO
+
+ # Deorphan libfoo/1.1.0#1 to ?libfoo/1.1.0.
+ #
+ # Note that on Windows the local repository canonical name path part
+ # is converted to lower case.
+ #
+ cn = "$canonicalize([dir_path] $~/libfoo)";
+ if! $posix
+ cn = $lcase([string] $cn)
+ end;
+ cn = "dir:$cn";
+
+ $* --mask-repository $cn --deorphan ?libfoo 2>>~%EOE%;
+ replace/downgrade/unhold libfoo/1.1.0
+ reconfigure libbar (dependent of libfoo)
+ disfigured libbar/1.1.0
+ disfigured libfoo/1.1.0#1
+ fetched libfoo/1.1.0
+ unpacked libfoo/1.1.0
+ configured libfoo/1.1.0
+ configured libbar/1.1.0
+ %info: .+libbar-1.1.0.+ is up to date%
+ updated libbar/1.1.0
+ EOE
+
+ $pkg_status -r libbar >>EOO;
+ !libbar configured 1.1.0
+ libfoo configured 1.1.0 available 1.1.0#1
+ EOO
+
+ # Noop.
+ #
+ $* --mask-repository-uuid "$cfg_uuid=$cn" --deorphan ?libfoo;
+
+ $pkg_status -r libbar >>EOO;
+ !libbar configured 1.1.0
+ libfoo configured 1.1.0 available 1.1.0#1
+ EOO
+
+ # Deorphan libfoo/1.1.0#1 to ?libfoo/1.1.0.
+ #
+ $* libfoo 2>!;
+
+ $pkg_status -r libbar >>EOO;
+ !libbar configured 1.1.0
+ !libfoo configured 1.1.0#1
+ EOO
+
+ $* --mask-repository $cn --deorphan ?libfoo 2>>~%EOE%;
+ replace/downgrade/unhold libfoo/1.1.0
+ reconfigure libbar (dependent of libfoo)
+ disfigured libbar/1.1.0
+ disfigured libfoo/1.1.0#1
+ fetched libfoo/1.1.0
+ unpacked libfoo/1.1.0
+ configured libfoo/1.1.0
+ configured libbar/1.1.0
+ %info: .+libbar-1.1.0.+ is up to date%
+ updated libbar/1.1.0
+ EOE
+
+ $pkg_status -r libbar >>EOO;
+ !libbar configured 1.1.0
+ libfoo configured 1.1.0 available 1.1.0#1
+ EOO
+
+ # Noop.
+ #
+ $* --mask-repository $cn --deorphan ?libfoo;
+
+ $pkg_status -r libbar >>EOO;
+ !libbar configured 1.1.0
+ libfoo configured 1.1.0 available 1.1.0#1
+ EOO
+
+ $pkg_drop libbar
+ }
+
+ : drop
+ :
+ {
+ $clone_root_cfg;
+
+ cp -r $src/libfoo-1.1.0 libfoo;
+ $rep_add --type dir libfoo/ && $rep_fetch;
+
+ $* libfoo 2>!;
+ $rep_fetch $rep/t4b;
+
+ $* libbar 2>!;
+
+ echo "" >+ libfoo/manifest;
+ $rep_fetch;
+ $* libfoo 2>!;
+
+ $pkg_status -r libbar >>EOO;
+ !libbar configured 1.1.0
+ !libfoo configured 1.1.0#1
+ EOO
+
+ $rep_remove $~/libfoo/;
+
+ $* --deorphan ?libfoo ?libbar 2>>EOE;
+ drop libfoo/1.1.0#1 (unused)
+ drop libbar/1.1.0 (unused)
+ disfigured libbar/1.1.0
+ disfigured libfoo/1.1.0#1
+ purged libfoo/1.1.0#1
+ purged libbar/1.1.0
+ EOE
+
+ $pkg_status -ar 2>'info: no packages in the configuration'
+ }
+
+ : drop-masked
+ :
+ : As above but using --mask-repository instead of rep-remove.
+ :
+ {
+ $clone_root_cfg;
+
+ cp -r $src/libfoo-1.1.0 libfoo;
+ $rep_add --type dir libfoo/ && $rep_fetch;
+
+ $* libfoo 2>!;
+ $rep_fetch $rep/t4b;
+
+ $* libbar 2>!;
+
+ echo "" >+ libfoo/manifest;
+ $rep_fetch;
+ $* libfoo 2>!;
+
+ $pkg_status -r libbar >>EOO;
+ !libbar configured 1.1.0
+ !libfoo configured 1.1.0#1
+ EOO
+
+ # Note that on Windows the local repository canonical name path part
+ # is converted to lower case.
+ #
+ cn = "$canonicalize([dir_path] $~/libfoo)";
+ if! $posix
+ cn = $lcase([string] $cn)
+ end;
+ cn = "dir:$cn";
+
+ $* --mask-repository $cn --deorphan ?libfoo ?libbar 2>>EOE;
+ drop libfoo/1.1.0#1 (unused)
+ drop libbar/1.1.0 (unused)
+ disfigured libbar/1.1.0
+ disfigured libfoo/1.1.0#1
+ purged libfoo/1.1.0#1
+ purged libbar/1.1.0
+ EOE
+
+ $pkg_status -ar 2>'info: no packages in the configuration'
+ }
+
+ : no-dependent
+ :
+ {
+ $clone_root_cfg;
+
+ cp -r $src/libfoo-1.1.0 libfoo;
+ $rep_add --type dir libfoo/ && $rep_fetch;
+
+ echo "" >+ libfoo/manifest;
+ $rep_fetch;
+ $* libfoo 2>!;
+
+ $rep_fetch $rep/t4b;
+ $rep_remove $~/libfoo/;
+
+ $pkg_status libfoo >'!libfoo configured 1.1.0';
+
+ $* --deorphan ?libfoo 2>>EOE;
+ drop libfoo/1.1.0 (unused)
+ disfigured libfoo/1.1.0
+ purged libfoo/1.1.0
+ EOE
+
+ $pkg_status -ar 2>'info: no packages in the configuration'
+ }
+
+ : no-dependent-masked
+ :
+ : As above but using --mask-repository instead of rep-remove.
+ :
+ {
+ $clone_root_cfg;
+
+ cp -r $src/libfoo-1.1.0 libfoo;
+ $rep_add --type dir libfoo/ && $rep_fetch;
+
+ echo "" >+ libfoo/manifest;
+ $rep_fetch;
+ $* libfoo 2>!;
+
+ $rep_fetch $rep/t4b;
+
+ $pkg_status libfoo >'!libfoo configured 1.1.0';
+
+ # Note that on Windows the local repository canonical name path part
+ # is converted to lower case.
+ #
+ cn = "$canonicalize([dir_path] $~/libfoo)";
+ if! $posix
+ cn = $lcase([string] $cn)
+ end;
+ cn = "dir:$cn";
+
+ $* --mask-repository $cn --deorphan ?libfoo 2>>EOE;
+ drop libfoo/1.1.0 (unused)
+ disfigured libfoo/1.1.0
+ purged libfoo/1.1.0
+ EOE
+
+ $pkg_status -ar 2>'info: no packages in the configuration'
+ }
+
+ : preference
+ :
+ {
+ $clone_root_cfg;
+
+ $tar -xf $src/t14d/libfoo-1.1.0+2.tar.gz &libfoo-1.1.0+2/***;
+ mv libfoo-1.1.0+2 libfoo;
+
+ $rep_add --type dir libfoo/ && $rep_fetch;
+
+ $* libfoo 2>!;
+
+ $tar -xf $src/t2/libbar-1.0.0.tar.gz &libbar-1.0.0/***;
+ mv libbar-1.0.0 libbar;
+
+ cat <<"EOI" >=libbar/repositories.manifest;
+ : 1
+ :
+ location: $rep/t14a
+ role: prerequisite
+ :
+ location: $rep/t14b
+ role: prerequisite
+ :
+ location: $rep/t14c
+ role: prerequisite
+ :
+ location: $rep/t14d
+ role: prerequisite
+ :
+ location: $rep/t14e
+ role: prerequisite
+ :
+ location: $rep/t14f
+ role: prerequisite
+ :
+ location: $rep/t14i
+ role: prerequisite
+ EOI
+
+ $rep_add --type dir libbar/ && $rep_fetch;
+ $* libbar 2>!;
+
+ $pkg_status -or libbar >>EOO;
+ !libbar configured 1.0.0 available (1.0.0)
+ !libfoo configured 1.1.0+2 available [1.2.0] [1.1.1] [1.1.0+3] (1.1.0+2) [1.1.0+1] [1.1.0] [1.0.0]
+ EOO
+
+ # Deorphan/unhold libfoo/1.1.0+2 to the exactly same version.
+ #
+ $rep_remove $~/libfoo/;
+
+ $* --deorphan ?libfoo 2>>~%EOE%;
+ replace/update/unhold libfoo/1.1.0+2
+ reconfigure libbar (dependent of libfoo)
+ disfigured libbar/1.0.0
+ disfigured libfoo/1.1.0+2
+ fetched libfoo/1.1.0+2
+ unpacked libfoo/1.1.0+2
+ configured libfoo/1.1.0+2
+ configured libbar/1.0.0
+ %info: .+libbar.+ is up to date%
+ updated libbar/1.0.0
+ EOE
+
+ $pkg_status -or libbar >>EOO;
+ !libbar configured 1.0.0 available (1.0.0)
+ libfoo configured 1.1.0+2 available [1.2.0] [1.1.1] [1.1.0+3] (1.1.0+2) [1.1.0+1] [1.1.0] [1.0.0]
+ EOO
+
+ # Noop.
+ #
+ $* --deorphan ?libfoo;
+
+ $pkg_status -or libbar >>EOO;
+ !libbar configured 1.0.0 available (1.0.0)
+ libfoo configured 1.1.0+2 available [1.2.0] [1.1.1] [1.1.0+3] (1.1.0+2) [1.1.0+1] [1.1.0] [1.0.0]
+ EOO
+
+ # Deorphan libfoo/1.1.0+2 to the later revision of same version
+ # (1.1.0+3).
+ #
+ cat <<"EOI" >=libbar/repositories.manifest;
+ : 1
+ :
+ location: $rep/t14a
+ role: prerequisite
+ :
+ location: $rep/t14b
+ role: prerequisite
+ :
+ location: $rep/t14c
+ role: prerequisite
+ :
+ location: $rep/t14e
+ role: prerequisite
+ :
+ location: $rep/t14f
+ role: prerequisite
+ :
+ location: $rep/t14i
+ role: prerequisite
+ EOI
+
+ $rep_fetch;
+
+ $* --deorphan ?libfoo 2>>~%EOE%;
+ replace/upgrade libfoo/1.1.0+3
+ reconfigure libbar (dependent of libfoo)
+ disfigured libbar/1.0.0
+ disfigured libfoo/1.1.0+2
+ fetched libfoo/1.1.0+3
+ unpacked libfoo/1.1.0+3
+ configured libfoo/1.1.0+3
+ configured libbar/1.0.0
+ %info: .+libbar.+ is up to date%
+ updated libbar/1.0.0
+ EOE
+
+ $pkg_status -or libbar >>EOO;
+ !libbar configured 1.0.0 available (1.0.0)
+ libfoo configured 1.1.0+3 available [1.2.0] [1.1.1] (1.1.0+3) [1.1.0+1] [1.1.0] [1.0.0]
+ EOO
+
+ # Noop.
+ #
+ $* --deorphan ?libfoo;
+
+ $pkg_status -or libbar >>EOO;
+ !libbar configured 1.0.0 available (1.0.0)
+ libfoo configured 1.1.0+3 available [1.2.0] [1.1.1] (1.1.0+3) [1.1.0+1] [1.1.0] [1.0.0]
+ EOO
+
+ # Deorphan libfoo/1.1.0+3 to the later patch of same version (1.1.1).
+ #
+ cat <<"EOI" >=libbar/repositories.manifest;
+ : 1
+ :
+ location: $rep/t14a
+ role: prerequisite
+ :
+ location: $rep/t14b
+ role: prerequisite
+ :
+ location: $rep/t14c
+ role: prerequisite
+ :
+ location: $rep/t14f
+ role: prerequisite
+ :
+ location: $rep/t14i
+ role: prerequisite
+ EOI
+
+ $rep_fetch;
+
+ $* --deorphan ?libfoo 2>>~%EOE%;
+ replace/upgrade libfoo/1.1.1
+ reconfigure libbar (dependent of libfoo)
+ disfigured libbar/1.0.0
+ disfigured libfoo/1.1.0+3
+ fetched libfoo/1.1.1
+ unpacked libfoo/1.1.1
+ configured libfoo/1.1.1
+ configured libbar/1.0.0
+ %info: .+libbar.+ is up to date%
+ updated libbar/1.0.0
+ EOE
+
+ $pkg_status -or libbar >>EOO;
+ !libbar configured 1.0.0 available (1.0.0)
+ libfoo configured 1.1.1 available [1.2.0] (1.1.1) [1.1.0+1] [1.1.0] [1.0.0]
+ EOO
+
+ # Noop.
+ #
+ $* --deorphan ?libfoo;
+
+ $pkg_status -or libbar >>EOO;
+ !libbar configured 1.0.0 available (1.0.0)
+ libfoo configured 1.1.1 available [1.2.0] (1.1.1) [1.1.0+1] [1.1.0] [1.0.0]
+ EOO
+
+ # Deorphan libfoo/1.1.1 to later minor of same version (1.2.0).
+ #
+ cat <<"EOI" >=libbar/repositories.manifest;
+ : 1
+ :
+ location: $rep/t14a
+ role: prerequisite
+ :
+ location: $rep/t14b
+ role: prerequisite
+ :
+ location: $rep/t14c
+ role: prerequisite
+ :
+ location: $rep/t14i
+ role: prerequisite
+ EOI
+
+ $rep_fetch;
+
+ $* --deorphan ?libfoo 2>>~%EOE%;
+ replace/upgrade libfoo/1.2.0
+ reconfigure libbar (dependent of libfoo)
+ disfigured libbar/1.0.0
+ disfigured libfoo/1.1.1
+ fetched libfoo/1.2.0
+ unpacked libfoo/1.2.0
+ configured libfoo/1.2.0
+ configured libbar/1.0.0
+ %info: .+libbar.+ is up to date%
+ updated libbar/1.0.0
+ EOE
+
+ $pkg_status -or libbar >>EOO;
+ !libbar configured 1.0.0 available (1.0.0)
+ libfoo configured 1.2.0 available (1.2.0) [1.1.0+1] [1.1.0] [1.0.0]
+ EOO
+
+ # Noop.
+ #
+ $* --deorphan ?libfoo;
+
+ $pkg_status -or libbar >>EOO;
+ !libbar configured 1.0.0 available (1.0.0)
+ libfoo configured 1.2.0 available (1.2.0) [1.1.0+1] [1.1.0] [1.0.0]
+ EOO
+
+ # Deorphan libfoo/1.2.0 to latest available version (1.1.0+1).
+ #
+ cat <<"EOI" >=libbar/repositories.manifest;
+ : 1
+ :
+ location: $rep/t14a
+ role: prerequisite
+ :
+ location: $rep/t14b
+ role: prerequisite
+ :
+ location: $rep/t14c
+ role: prerequisite
+ EOI
+
+ $rep_fetch;
+
+ $* --deorphan ?libfoo 2>>~%EOE%;
+ replace/downgrade libfoo/1.1.0+1
+ reconfigure libbar (dependent of libfoo)
+ disfigured libbar/1.0.0
+ disfigured libfoo/1.2.0
+ fetched libfoo/1.1.0+1
+ unpacked libfoo/1.1.0+1
+ configured libfoo/1.1.0+1
+ configured libbar/1.0.0
+ %info: .+libbar.+ is up to date%
+ updated libbar/1.0.0
+ EOE
+
+ $pkg_status -or libbar >>EOO;
+ !libbar configured 1.0.0 available (1.0.0)
+ libfoo configured 1.1.0+1 available (1.1.0+1) [1.1.0] [1.0.0]
+ EOO
+
+ # Noop.
+ #
+ $* --deorphan ?libfoo;
+
+ # Deorphan libfoo/1.1.0+1 to latest available version (1.1.0).
+ #
+ cat <<"EOI" >=libbar/repositories.manifest;
+ : 1
+ :
+ location: $rep/t14a
+ role: prerequisite
+ :
+ location: $rep/t14b
+ role: prerequisite
+ EOI
+
+ $pkg_status -or libbar >>EOO;
+ !libbar configured 1.0.0 available (1.0.0)
+ libfoo configured 1.1.0+1 available (1.1.0+1) [1.1.0] [1.0.0]
+ EOO
+
+ $rep_fetch;
+
+ $* --deorphan ?libfoo 2>>~%EOE%;
+ replace/downgrade libfoo/1.1.0
+ reconfigure libbar (dependent of libfoo)
+ disfigured libbar/1.0.0
+ disfigured libfoo/1.1.0+1
+ fetched libfoo/1.1.0
+ unpacked libfoo/1.1.0
+ configured libfoo/1.1.0
+ configured libbar/1.0.0
+ %info: .+libbar.+ is up to date%
+ updated libbar/1.0.0
+ EOE
+
+ $pkg_status -or libbar >>EOO;
+ !libbar configured 1.0.0 available (1.0.0)
+ libfoo configured 1.1.0 available (1.1.0) [1.0.0]
+ EOO
+
+ # Noop.
+ #
+ $* --deorphan ?libfoo;
+
+ $pkg_status -or libbar >>EOO;
+ !libbar configured 1.0.0 available (1.0.0)
+ libfoo configured 1.1.0 available (1.1.0) [1.0.0]
+ EOO
+
+ # Deorphan libfoo/1.1.0 to latest available version (1.0.0).
+ #
+ cat <<"EOI" >=libbar/repositories.manifest;
+ : 1
+ :
+ location: $rep/t14a
+ role: prerequisite
+ EOI
+
+ $rep_fetch;
+
+ $* --deorphan ?libfoo 2>>~%EOE%;
+ replace/downgrade libfoo/1.0.0
+ reconfigure libbar (dependent of libfoo)
+ disfigured libbar/1.0.0
+ disfigured libfoo/1.1.0
+ fetched libfoo/1.0.0
+ unpacked libfoo/1.0.0
+ configured libfoo/1.0.0
+ configured libbar/1.0.0
+ %info: .+libbar.+ is up to date%
+ updated libbar/1.0.0
+ EOE
+
+ $pkg_status -or libbar >>EOO;
+ !libbar configured 1.0.0 available (1.0.0)
+ libfoo configured 1.0.0 available (1.0.0)
+ EOO
+
+ # Noop.
+ #
+ $* --deorphan ?libfoo;
+
+ $pkg_status -or libbar >>EOO;
+ !libbar configured 1.0.0 available (1.0.0)
+ libfoo configured 1.0.0 available (1.0.0)
+ EOO
+
+ # Deorphan fails (none available).
+ #
+ cat <<"EOI" >=libbar/repositories.manifest;
+ : 1
+ EOI
+
+ $rep_fetch;
+
+ $* --deorphan ?libfoo 2>>/EOE != 0;
+ error: unknown package libfoo
+ EOE
+
+ $pkg_status -or libbar >>EOO;
+ !libbar configured 1.0.0 available (1.0.0)
+ libfoo configured 1.0.0
+ EOO
+
+ $pkg_drop libbar
+ }
+ }
+
+ : recursive
+ :
+ {
+ +$tar -xf $src/t2/libbar-1.0.0.tar.gz &libbar-1.0.0/***
+ +mv libbar-1.0.0 libbar
+
+ +cat <<"EOI" >=libbar/repositories.manifest
+ : 1
+ :
+ location: $rep/t14b
+ role: prerequisite
+ :
+ location: $rep/t14f
+ role: prerequisite
+ :
+ location: $rep/t14i
+ role: prerequisite
+ EOI
+
+ : immediate
+ :
+ {
+ $clone_root_cfg;
+ cp -rp ../libbar ./;
+
+ $rep_add --type dir libbar/ && $rep_fetch;
+ $* libbar ?libfoo/1.1.0 2>!;
+
+ $pkg_status -or libbar >>EOO;
+ !libbar configured 1.0.0 available (1.0.0)
+ libfoo configured !1.1.0 available [1.2.0] [1.1.1] (1.1.0)
+ EOO
+
+ $rep_remove $~/libbar/;
+ $rep_add $rep/t2 $rep/t4b $rep/t14c && $rep_fetch;
+
+ $* --deorphan --immediate libbar 2>>~%EOE%;
+ replace/downgrade libfoo/1.0.0
+ replace/update libbar/1.0.0
+ disfigured libbar/1.0.0
+ disfigured libfoo/1.1.0
+ fetched libfoo/1.0.0
+ unpacked libfoo/1.0.0
+ fetched libbar/1.0.0
+ unpacked libbar/1.0.0
+ configured libfoo/1.0.0
+ configured libbar/1.0.0
+ %info: .+libbar.+ is up to date%
+ updated libbar/1.0.0
+ EOE
+
+ $pkg_status -or libbar >>EOO;
+ !libbar configured 1.0.0 available 1.1.0 (1.0.0)
+ libfoo configured !1.0.0 available 1.1.0+1 [1.1.0] (1.0.0) 0.1.0
+ EOO
+
+ $pkg_drop libbar
+ }
+
+ : immediate-masked
+ :
+ : As above but using --mask-repository instead of rep-remove.
+ :
+ {
+ $clone_root_cfg;
+ cp -rp ../libbar ./;
+
+ $rep_add --type dir libbar/ && $rep_fetch;
+ $* libbar ?libfoo/1.1.0 2>!;
+
+ $pkg_status -or libbar >>EOO;
+ !libbar configured 1.0.0 available (1.0.0)
+ libfoo configured !1.1.0 available [1.2.0] [1.1.1] (1.1.0)
+ EOO
+
+ $rep_add $rep/t2 $rep/t4b $rep/t14c && $rep_fetch;
+
+ # Note that on Windows the local repository canonical name path part
+ # is converted to lower case.
+ #
+ cn = "$canonicalize([dir_path] $~/libbar)";
+ if! $posix
+ cn = $lcase([string] $cn)
+ end;
+ cn = "dir:$cn";
+
+ $* --mask-repository $cn --deorphan --immediate libbar 2>>~%EOE%;
+ replace/downgrade libfoo/1.0.0
+ replace/update libbar/1.0.0
+ disfigured libbar/1.0.0
+ disfigured libfoo/1.1.0
+ fetched libfoo/1.0.0
+ unpacked libfoo/1.0.0
+ fetched libbar/1.0.0
+ unpacked libbar/1.0.0
+ configured libfoo/1.0.0
+ configured libbar/1.0.0
+ %info: .+libbar.+ is up to date%
+ updated libbar/1.0.0
+ EOE
+
+ $pkg_status -or libbar >>EOO;
+ !libbar configured 1.0.0 available 1.1.0 (1.0.0)
+ libfoo configured !1.0.0 available [1.2.0] [1.1.1] 1.1.0+1 [1.1.0] (1.0.0) 0.1.0
+ EOO
+
+ $pkg_drop libbar
+ }
+
+ : recursive
+ :
+ {
+ $clone_root_cfg;
+ cp -rp ../libbar ./;
+
+ $rep_add --type dir libbar/ && $rep_fetch;
+ $* libbar ?libfoo/1.1.0 2>!;
+
+ $pkg_status -or libbar >>EOO;
+ !libbar configured 1.0.0 available (1.0.0)
+ libfoo configured !1.1.0 available [1.2.0] [1.1.1] (1.1.0)
+ EOO
+
+ $rep_remove $~/libbar/;
+ $rep_add $rep/t2 $rep/t4b $rep/t14c && $rep_fetch;
+
+ $* --deorphan --recursive libbar 2>>~%EOE%;
+ replace/downgrade libfoo/1.0.0
+ replace/update libbar/1.0.0
+ disfigured libbar/1.0.0
+ disfigured libfoo/1.1.0
+ fetched libfoo/1.0.0
+ unpacked libfoo/1.0.0
+ fetched libbar/1.0.0
+ unpacked libbar/1.0.0
+ configured libfoo/1.0.0
+ configured libbar/1.0.0
+ %info: .+libbar.+ is up to date%
+ updated libbar/1.0.0
+ EOE
+
+ $pkg_status -or libbar >>EOO;
+ !libbar configured 1.0.0 available 1.1.0 (1.0.0)
+ libfoo configured !1.0.0 available 1.1.0+1 [1.1.0] (1.0.0) 0.1.0
+ EOO
+
+ $pkg_drop libbar
+ }
+
+ : deorphan-immediate
+ :
+ {
+ $clone_root_cfg;
+ cp -rp ../libbar ./;
+
+ $rep_add --type dir libbar/ && $rep_fetch;
+ $* libbar ?libfoo/1.1.0 2>!;
+
+ $pkg_status -or libbar >>EOO;
+ !libbar configured 1.0.0 available (1.0.0)
+ libfoo configured !1.1.0 available [1.2.0] [1.1.1] (1.1.0)
+ EOO
+
+ cat <<"EOI" >=libbar/repositories.manifest;
+ : 1
+ :
+ location: $rep/t14f
+ role: prerequisite
+ :
+ location: $rep/t14i
+ role: prerequisite
+ EOI
+
+ $rep_fetch;
+
+ $* --deorphan-immediate libbar 2>>~%EOE%;
+ replace/upgrade libfoo/1.1.1
+ reconfigure/update libbar/1.0.0
+ disfigured libbar/1.0.0
+ disfigured libfoo/1.1.0
+ fetched libfoo/1.1.1
+ unpacked libfoo/1.1.1
+ configured libfoo/1.1.1
+ configured libbar/1.0.0
+ %info: .+libbar.+ is up to date%
+ updated libbar/1.0.0
+ EOE
+
+ $pkg_status -or libbar >>EOO;
+ !libbar configured 1.0.0 available (1.0.0)
+ libfoo configured !1.1.1 available [1.2.0] (1.1.1)
+ EOO
+
+ $pkg_drop libbar
+ }
+
+ : deorphan-recursive
+ :
+ {
+ $clone_root_cfg;
+ cp -rp ../libbar ./;
+
+ $rep_add --type dir libbar/ && $rep_fetch;
+ $* libbar ?libfoo/1.1.0 2>!;
+
+ $rep_add $rep/t3 && $rep_fetch;
+ $* libbaz 2>!;
+
+ $pkg_status -or libbaz >>EOO;
+ !libbaz configured 1.0.0 available (1.0.0)
+ !libbar configured 1.0.0 available (1.0.0)
+ libfoo configured !1.1.0 available [1.2.0] [1.1.1] (1.1.0) [1.0.0] [0.1.0]
+ EOO
+
+ cat <<"EOI" >=libbar/repositories.manifest;
+ : 1
+ :
+ location: $rep/t14f
+ role: prerequisite
+ :
+ location: $rep/t14i
+ role: prerequisite
+ EOI
+
+ $rep_fetch;
+
+ $* --deorphan-recursive libbaz 2>>~%EOE%;
+ replace/upgrade libfoo/1.1.1
+ reconfigure libbar (dependent of libfoo)
+ reconfigure/update libbaz/1.0.0
+ disfigured libbaz/1.0.0
+ disfigured libbar/1.0.0
+ disfigured libfoo/1.1.0
+ fetched libfoo/1.1.1
+ unpacked libfoo/1.1.1
+ configured libfoo/1.1.1
+ configured libbar/1.0.0
+ configured libbaz/1.0.0
+ %info: .+libbaz.+ is up to date%
+ %info: .+libbar.+ is up to date%
+ updated libbaz/1.0.0
+ updated libbar/1.0.0
+ EOE
+
+ $pkg_status -or libbaz >>EOO;
+ !libbaz configured 1.0.0 available (1.0.0)
+ !libbar configured 1.0.0 available (1.0.0)
+ libfoo configured !1.1.1 available [1.2.0] (1.1.1) [1.0.0] [0.1.0]
+ EOO
+
+ $pkg_drop libbaz libbar
+ }
+ }
+
+ : recursive-all-held
+ :
+ : As above but uses 'deorphan all held packages form'.
+ :
+ {
+ +$tar -xf $src/t2/libbar-1.0.0.tar.gz &libbar-1.0.0/***
+ +mv libbar-1.0.0 libbar
+
+ +cat <<"EOI" >=libbar/repositories.manifest
+ : 1
+ :
+ location: $rep/t14b
+ role: prerequisite
+ :
+ location: $rep/t14f
+ role: prerequisite
+ :
+ location: $rep/t14i
+ role: prerequisite
+ EOI
+
+ : immediate
+ :
+ {
+ $clone_root_cfg;
+ cp -rp ../libbar ./;
+
+ $rep_add --type dir libbar/ && $rep_fetch;
+ $* libbar ?libfoo/1.1.0 2>!;
+
+ $pkg_status -or libbar >>EOO;
+ !libbar configured 1.0.0 available (1.0.0)
+ libfoo configured !1.1.0 available [1.2.0] [1.1.1] (1.1.0)
+ EOO
+
+ $rep_remove $~/libbar/;
+ $rep_add $rep/t2 $rep/t4b $rep/t14c && $rep_fetch;
+
+ $* --deorphan --immediate 2>>~%EOE%;
+ replace/downgrade libfoo/1.0.0
+ replace/update libbar/1.0.0
+ disfigured libbar/1.0.0
+ disfigured libfoo/1.1.0
+ fetched libfoo/1.0.0
+ unpacked libfoo/1.0.0
+ fetched libbar/1.0.0
+ unpacked libbar/1.0.0
+ configured libfoo/1.0.0
+ configured libbar/1.0.0
+ %info: .+libbar.+ is up to date%
+ updated libbar/1.0.0
+ EOE
+
+ $pkg_status -or libbar >>EOO;
+ !libbar configured 1.0.0 available 1.1.0 (1.0.0)
+ libfoo configured !1.0.0 available 1.1.0+1 [1.1.0] (1.0.0) 0.1.0
+ EOO
+
+ $pkg_drop libbar
+ }
+
+ : recursive
+ :
+ {
+ $clone_root_cfg;
+ cp -rp ../libbar ./;
+
+ $rep_add --type dir libbar/ && $rep_fetch;
+ $* libbar ?libfoo/1.1.0 2>!;
+
+ $pkg_status -or libbar >>EOO;
+ !libbar configured 1.0.0 available (1.0.0)
+ libfoo configured !1.1.0 available [1.2.0] [1.1.1] (1.1.0)
+ EOO
+
+ $rep_remove $~/libbar/;
+ $rep_add $rep/t2 $rep/t4b $rep/t14c && $rep_fetch;
+
+ $* --deorphan --recursive 2>>~%EOE%;
+ replace/downgrade libfoo/1.0.0
+ replace/update libbar/1.0.0
+ disfigured libbar/1.0.0
+ disfigured libfoo/1.1.0
+ fetched libfoo/1.0.0
+ unpacked libfoo/1.0.0
+ fetched libbar/1.0.0
+ unpacked libbar/1.0.0
+ configured libfoo/1.0.0
+ configured libbar/1.0.0
+ %info: .+libbar.+ is up to date%
+ updated libbar/1.0.0
+ EOE
+
+ $pkg_status -or libbar >>EOO;
+ !libbar configured 1.0.0 available 1.1.0 (1.0.0)
+ libfoo configured !1.0.0 available 1.1.0+1 [1.1.0] (1.0.0) 0.1.0
+ EOO
+
+ $pkg_drop libbar
+ }
+ }
+
+ : same-version
+ :
+ {
+ : best-match
+ :
+ {
+ $clone_root_cfg;
+
+ $rep_add $rep/t1 && $rep_fetch;
+ $* libfoo 2>!;
+
+ $rep_add $rep/t2 && $rep_fetch;
+ $* libbar ?libfoo 2>!;
+
+ $rep_remove $rep/t1;
+
+ $* --deorphan ?libfoo 2>>~%EOE%;
+ replace/update libfoo/1.0.0
+ reconfigure libbar (dependent of libfoo)
+ disfigured libbar/1.0.0
+ disfigured libfoo/1.0.0
+ fetched libfoo/1.0.0
+ unpacked libfoo/1.0.0
+ configured libfoo/1.0.0
+ configured libbar/1.0.0
+ %info: .+libbar-1.0.0.+ is up to date%
+ updated libbar/1.0.0
+ EOE
+
+ $pkg_drop libbar
+ }
+
+ : constrained
+ :
+ {
+ $clone_root_cfg;
+
+ $rep_add $rep/t1 && $rep_fetch;
+ $* libfoo 2>!;
+
+ $rep_add $rep/t2 && $rep_fetch;
+ $* libbar ?libfoo 2>!;
+
+ $rep_remove $rep/t1;
+
+ $* --deorphan ?libfoo/1.0.0 2>>~%EOE%;
+ replace/update libfoo/1.0.0
+ reconfigure libbar (dependent of libfoo)
+ disfigured libbar/1.0.0
+ disfigured libfoo/1.0.0
+ fetched libfoo/1.0.0
+ unpacked libfoo/1.0.0
+ configured libfoo/1.0.0
+ configured libbar/1.0.0
+ %info: .+libbar-1.0.0.+ is up to date%
+ updated libbar/1.0.0
+ EOE
+
+ $pkg_drop libbar
+ }
+ }
+ }
+
+ : held
+ :
+ {
+ : basics
+ :
+ {
+ $clone_root_cfg;
+
+ cp -r $src/libfoo-1.1.0 libfoo;
+ sed -i -e 's/(version:).+/\1 1.0.0/' libfoo/manifest;
+
+ $rep_add --type dir libfoo/ && $rep_fetch;
+
+ $* libfoo 2>!;
+
+ echo "" >+ libfoo/manifest;
+ $rep_fetch;
+
+ $pkg_status -ro libfoo >>EOO;
+ !libfoo configured 1.0.0 available 1.0.0#1
+ EOO
+
+ # Deorphan libfoo/1.0.0 to libfoo/1.0.0#1.
+ #
+ $* --deorphan libfoo 2>>~%EOE%;
+ replace/upgrade libfoo/1.0.0#1
+ disfigured libfoo/1.0.0
+ using libfoo/1.0.0#1 (external)
+ configured libfoo/1.0.0#1
+ %info: .+libfoo.+ is up to date%
+ updated libfoo/1.0.0#1
+ EOE
+
+ $pkg_status -ro libfoo >>EOO;
+ !libfoo configured 1.0.0#1 available (1.0.0#1)
+ EOO
+
+ # Noop.
+ #
+ $* --deorphan libfoo 2>'info: nothing to build';
+
+ $rep_fetch $rep/t4a $rep/t4c;
+
+ $pkg_status -ro libfoo >>EOO;
+ !libfoo configured 1.0.0#1 available 1.1.0 (1.0.0#1) 1.0.0
+ EOO
+
+ # Deorphan libfoo/1.0.0#1 to libfoo/1.0.0.
+ #
+ $rep_remove $~/libfoo/;
+
+ $* --deorphan libfoo 2>>~%EOE%;
+ replace/downgrade libfoo/1.0.0
+ disfigured libfoo/1.0.0#1
+ fetched libfoo/1.0.0
+ unpacked libfoo/1.0.0
+ configured libfoo/1.0.0
+ %info: .+libfoo-1.0.0.+ is up to date%
+ updated libfoo/1.0.0
+ EOE
+
+ $pkg_status libfoo >'!libfoo configured 1.0.0 available 1.1.0';
+
+ # Noop.
+ #
+ $* --deorphan libfoo 2>'info: nothing to build';
+
+ $pkg_status libfoo >'!libfoo configured 1.0.0 available 1.1.0';
+
+ # Deorphan libfoo/1.0.0 to libfoo/1.1.0.
+ #
+ $rep_remove $rep/t4c;
+
+ # While at it, use the 'deorphan all held packages' form.
+ #
+ $* --deorphan 2>>~%EOE%;
+ replace/upgrade libfoo/1.1.0
+ disfigured libfoo/1.0.0
+ fetched libfoo/1.1.0
+ unpacked libfoo/1.1.0
+ configured libfoo/1.1.0
+ %info: .+libfoo-1.1.0.+ is up to date%
+ updated libfoo/1.1.0
+ EOE
+
+ $pkg_status libfoo >'!libfoo configured 1.1.0';
+
+ # Noop.
+ #
+ $* --deorphan libfoo 2>'info: nothing to build';
+
+ $pkg_status libfoo >'!libfoo configured 1.1.0';
+
+ $pkg_drop libfoo
+ }
+
+ : basics-masked
+ :
+ : As above but using --mask-repository instead of rep-remove.
+ :
+ {
+ $clone_root_cfg;
+
+ cp -r $src/libfoo-1.1.0 libfoo;
+ sed -i -e 's/(version:).+/\1 1.0.0/' libfoo/manifest;
+
+ $rep_add --type dir libfoo/ && $rep_fetch;
+
+ $* libfoo 2>!;
+
+ echo "" >+ libfoo/manifest;
+ $rep_fetch;
+
+ $pkg_status -ro libfoo >>EOO;
+ !libfoo configured 1.0.0 available 1.0.0#1
+ EOO
+
+ # Deorphan libfoo/1.0.0 to libfoo/1.0.0#1.
+ #
+ $* --deorphan libfoo 2>>~%EOE%;
+ replace/upgrade libfoo/1.0.0#1
+ disfigured libfoo/1.0.0
+ using libfoo/1.0.0#1 (external)
+ configured libfoo/1.0.0#1
+ %info: .+libfoo.+ is up to date%
+ updated libfoo/1.0.0#1
+ EOE
+
+ $pkg_status -ro libfoo >>EOO;
+ !libfoo configured 1.0.0#1 available (1.0.0#1)
+ EOO
+
+ # Noop.
+ #
+ $* --deorphan libfoo 2>'info: nothing to build';
+
+ $rep_fetch $rep/t4a $rep/t4c;
+
+ $pkg_status -ro libfoo >>EOO;
+ !libfoo configured 1.0.0#1 available 1.1.0 (1.0.0#1) 1.0.0
+ EOO
+
+ # Deorphan libfoo/1.0.0#1 to libfoo/1.0.0.
+ #
+ # Note that on Windows the local repository canonical name path part is
+ # converted to lower case.
+ #
+ cn = "$canonicalize([dir_path] $~/libfoo)";
+ if! $posix
+ cn = $lcase([string] $cn)
+ end;
+ cn = "dir:$cn";
+
+ $* --mask-repository $cn --deorphan libfoo 2>>~%EOE%;
+ replace/downgrade libfoo/1.0.0
+ disfigured libfoo/1.0.0#1
+ fetched libfoo/1.0.0
+ unpacked libfoo/1.0.0
+ configured libfoo/1.0.0
+ %info: .+libfoo-1.0.0.+ is up to date%
+ updated libfoo/1.0.0
+ EOE
+
+ $pkg_status libfoo >'!libfoo configured 1.0.0 available 1.1.0 1.0.0#1';
+
+ # Noop.
+ #
+ $* --mask-repository $cn --deorphan libfoo 2>'info: nothing to build';
+
+ $pkg_status libfoo >'!libfoo configured 1.0.0 available 1.1.0 1.0.0#1';
+
+ # Deorphan libfoo/1.0.0 to libfoo/1.1.0.
+ #
+ # While at it, use the 'deorphan all held packages' form.
+ #
+ $* --mask-repository $cn --mask-repository $rep/t4c --deorphan 2>>~%EOE%;
+ replace/upgrade libfoo/1.1.0
+ disfigured libfoo/1.0.0
+ fetched libfoo/1.1.0
+ unpacked libfoo/1.1.0
+ configured libfoo/1.1.0
+ %info: .+libfoo-1.1.0.+ is up to date%
+ updated libfoo/1.1.0
+ EOE
+
+ $pkg_status libfoo >'!libfoo configured 1.1.0';
+
+ # Noop.
+ #
+ $* --mask-repository $cn --mask-repository $rep/t4c --deorphan libfoo \
+ 2>'info: nothing to build';
+
+ $pkg_status libfoo >'!libfoo configured 1.1.0';
+
+ $pkg_drop libfoo
+ }
+
+ : preference
+ :
+ {
+ $clone_root_cfg;
+
+ $tar -xf $src/t14d/libfoo-1.1.0+2.tar.gz &libfoo-1.1.0+2/***;
+ mv libfoo-1.1.0+2 libfoo;
+
+ $rep_add --type dir libfoo/ && $rep_fetch;
+
+ $* libfoo 2>!;
+
+ $rep_fetch $rep/t14a $rep/t14b $rep/t14c $rep/t14d $rep/t14e $rep/t14f $rep/t14i;
+
+ $pkg_status -o libfoo >>EOO;
+ !libfoo configured 1.1.0+2 available 1.2.0 1.1.1 1.1.0+3 (1.1.0+2) 1.1.0+1 1.1.0 1.0.0
+ EOO
+
+ # Deorphan libfoo/1.1.0+2 to the exactly same version.
+ #
+ $rep_remove $~/libfoo/;
+
+ $* --deorphan libfoo 2>>~%EOE%;
+ replace/update libfoo/1.1.0+2
+ disfigured libfoo/1.1.0+2
+ fetched libfoo/1.1.0+2
+ unpacked libfoo/1.1.0+2
+ configured libfoo/1.1.0+2
+ %info: .+libfoo-1.1.0\+2.+ is up to date%
+ updated libfoo/1.1.0+2
+ EOE
+
+ $pkg_status -o libfoo >>EOO;
+ !libfoo configured 1.1.0+2 available 1.2.0 1.1.1 1.1.0+3 (1.1.0+2) 1.1.0+1 1.1.0 1.0.0
+ EOO
+
+ # Noop.
+ #
+ $* --deorphan libfoo 2>'info: nothing to build';
+
+ $pkg_status -o libfoo >>EOO;
+ !libfoo configured 1.1.0+2 available 1.2.0 1.1.1 1.1.0+3 (1.1.0+2) 1.1.0+1 1.1.0 1.0.0
+ EOO
+
+ # Deorphan libfoo/1.1.0+2 to the later revision of same version (1.1.0+3).
+ #
+ $rep_remove $rep/t14d;
+
+ $* --deorphan libfoo 2>>~%EOE%;
+ replace/upgrade libfoo/1.1.0+3
+ disfigured libfoo/1.1.0+2
+ fetched libfoo/1.1.0+3
+ unpacked libfoo/1.1.0+3
+ configured libfoo/1.1.0+3
+ %info: .+libfoo-1.1.0\+3.+ is up to date%
+ updated libfoo/1.1.0+3
+ EOE
+
+ $pkg_status -o libfoo >>EOO;
+ !libfoo configured 1.1.0+3 available 1.2.0 1.1.1 (1.1.0+3) 1.1.0+1 1.1.0 1.0.0
+ EOO
+
+ # Noop.
+ #
+ $* --deorphan libfoo 2>'info: nothing to build';
+
+ $pkg_status -o libfoo >>EOO;
+ !libfoo configured 1.1.0+3 available 1.2.0 1.1.1 (1.1.0+3) 1.1.0+1 1.1.0 1.0.0
+ EOO
+
+ # Deorphan libfoo/1.1.0+3 to the later patch of same version (1.1.1).
+ #
+ $rep_remove $rep/t14e;
+
+ $* --deorphan libfoo 2>>~%EOE%;
+ replace/upgrade libfoo/1.1.1
+ disfigured libfoo/1.1.0+3
+ fetched libfoo/1.1.1
+ unpacked libfoo/1.1.1
+ configured libfoo/1.1.1
+ %info: .+libfoo-1.1.1.+ is up to date%
+ updated libfoo/1.1.1
+ EOE
+
+ $pkg_status -o libfoo >>EOO;
+ !libfoo configured 1.1.1 available 1.2.0 (1.1.1) 1.1.0+1 1.1.0 1.0.0
+ EOO
+
+ # Noop.
+ #
+ $* --deorphan libfoo 2>'info: nothing to build';
+
+ $pkg_status -o libfoo >>EOO;
+ !libfoo configured 1.1.1 available 1.2.0 (1.1.1) 1.1.0+1 1.1.0 1.0.0
+ EOO
+
+ # Deorphan libfoo/1.1.1 to later minor of same version (1.2.0).
+ #
+ $rep_remove $rep/t14f;
+
+ $* --deorphan libfoo 2>>~%EOE%;
+ replace/upgrade libfoo/1.2.0
+ disfigured libfoo/1.1.1
+ fetched libfoo/1.2.0
+ unpacked libfoo/1.2.0
+ configured libfoo/1.2.0
+ %info: .+libfoo-1.2.0.+ is up to date%
+ updated libfoo/1.2.0
+ EOE
+
+ $pkg_status -o libfoo >>EOO;
+ !libfoo configured 1.2.0 available (1.2.0) 1.1.0+1 1.1.0 1.0.0
+ EOO
+
+ # Noop.
+ #
+ $* --deorphan libfoo 2>'info: nothing to build';
+
+ $pkg_status -o libfoo >>EOO;
+ !libfoo configured 1.2.0 available (1.2.0) 1.1.0+1 1.1.0 1.0.0
+ EOO
+
+ # Deorphan libfoo/1.2.0 to latest available version (1.1.0+1).
+ #
+ $rep_remove $rep/t14i;
+
+ $* --deorphan libfoo 2>>~%EOE%;
+ replace/downgrade libfoo/1.1.0+1
+ disfigured libfoo/1.2.0
+ fetched libfoo/1.1.0+1
+ unpacked libfoo/1.1.0+1
+ configured libfoo/1.1.0+1
+ %info: .+libfoo-1.1.0\+1.+ is up to date%
+ updated libfoo/1.1.0+1
+ EOE
+
+ $pkg_status -o libfoo >>EOO;
+ !libfoo configured 1.1.0+1 available (1.1.0+1) 1.1.0 1.0.0
+ EOO
+
+ # Noop.
+ #
+ $* --deorphan libfoo 2>'info: nothing to build';
+
+ $pkg_status -o libfoo >>EOO;
+ !libfoo configured 1.1.0+1 available (1.1.0+1) 1.1.0 1.0.0
+ EOO
+
+ # Deorphan libfoo/1.1.0+1 to latest available version (1.1.0).
+ #
+ $rep_remove $rep/t14c;
+
+ $* --deorphan libfoo 2>>~%EOE%;
+ replace/downgrade libfoo/1.1.0
+ disfigured libfoo/1.1.0+1
+ fetched libfoo/1.1.0
+ unpacked libfoo/1.1.0
+ configured libfoo/1.1.0
+ %info: .+libfoo-1.1.0.+ is up to date%
+ updated libfoo/1.1.0
+ EOE
+
+ $pkg_status -o libfoo >>EOO;
+ !libfoo configured 1.1.0 available (1.1.0) 1.0.0
+ EOO
+
+ # Noop.
+ #
+ $* --deorphan libfoo 2>'info: nothing to build';
+
+ $pkg_status -o libfoo >>EOO;
+ !libfoo configured 1.1.0 available (1.1.0) 1.0.0
+ EOO
+
+ # Deorphan libfoo/1.1.0 to latest available version (1.0.0).
+ #
+ $rep_remove $rep/t14b;
+
+ $* --deorphan libfoo 2>>~%EOE%;
+ replace/downgrade libfoo/1.0.0
+ disfigured libfoo/1.1.0
+ fetched libfoo/1.0.0
+ unpacked libfoo/1.0.0
+ configured libfoo/1.0.0
+ %info: .+libfoo-1.0.0.+ is up to date%
+ updated libfoo/1.0.0
+ EOE
+
+ $pkg_status -o libfoo >>EOO;
+ !libfoo configured 1.0.0 available (1.0.0)
+ EOO
+
+ # Noop.
+ #
+ $* --deorphan libfoo 2>'info: nothing to build';
+
+ $pkg_status -o libfoo >>EOO;
+ !libfoo configured 1.0.0 available (1.0.0)
+ EOO
+
+ # Deorphan fails (none available).
+ #
+ $rep_remove $rep/t14a;
+
+ $* --deorphan libfoo 2>>/EOE != 0;
+ error: unknown package libfoo
+ info: configuration cfg/ has no repositories
+ info: use 'bpkg rep-add' to add a repository
+ EOE
+
+ $pkg_status -o libfoo >>EOO;
+ !libfoo configured 1.0.0
+ EOO
+
+ $pkg_drop libfoo
+ }
+
+ : preference-all-held
+ :
+ : As above but uses 'deorphan all held packages form'.
+ :
+ {
+ $clone_root_cfg;
+
+ $tar -xf $src/t14d/libfoo-1.1.0+2.tar.gz &libfoo-1.1.0+2/***;
+ mv libfoo-1.1.0+2 libfoo;
+
+ $rep_add --type dir libfoo/ && $rep_fetch;
+
+ $* libfoo 2>!;
+
+ $rep_fetch $rep/t14a $rep/t14b $rep/t14c $rep/t14d $rep/t14e $rep/t14f $rep/t14i;
+
+ $pkg_status -o libfoo >>EOO;
+ !libfoo configured 1.1.0+2 available 1.2.0 1.1.1 1.1.0+3 (1.1.0+2) 1.1.0+1 1.1.0 1.0.0
+ EOO
+
+ # Deorphan libfoo/1.1.0+2 to the exactly same version.
+ #
+ $rep_remove $~/libfoo/;
+
+ $* --deorphan 2>>~%EOE%;
+ replace/update libfoo/1.1.0+2
+ disfigured libfoo/1.1.0+2
+ fetched libfoo/1.1.0+2
+ unpacked libfoo/1.1.0+2
+ configured libfoo/1.1.0+2
+ %info: .+libfoo-1.1.0\+2.+ is up to date%
+ updated libfoo/1.1.0+2
+ EOE
+
+ $pkg_status -o libfoo >>EOO;
+ !libfoo configured 1.1.0+2 available 1.2.0 1.1.1 1.1.0+3 (1.1.0+2) 1.1.0+1 1.1.0 1.0.0
+ EOO
+
+ # Noop.
+ #
+ $* --deorphan 2>'info: nothing to build';
+
+ $pkg_status -o libfoo >>EOO;
+ !libfoo configured 1.1.0+2 available 1.2.0 1.1.1 1.1.0+3 (1.1.0+2) 1.1.0+1 1.1.0 1.0.0
+ EOO
+
+ # Deorphan libfoo/1.1.0+2 to the later revision of same version (1.1.0+3).
+ #
+ $rep_remove $rep/t14d;
+
+ $* --deorphan 2>>~%EOE%;
+ replace/upgrade libfoo/1.1.0+3
+ disfigured libfoo/1.1.0+2
+ fetched libfoo/1.1.0+3
+ unpacked libfoo/1.1.0+3
+ configured libfoo/1.1.0+3
+ %info: .+libfoo-1.1.0\+3.+ is up to date%
+ updated libfoo/1.1.0+3
+ EOE
+
+ $pkg_status -o libfoo >>EOO;
+ !libfoo configured 1.1.0+3 available 1.2.0 1.1.1 (1.1.0+3) 1.1.0+1 1.1.0 1.0.0
+ EOO
+
+ # Noop.
+ #
+ $* --deorphan 2>'info: nothing to build';
+
+ $pkg_status -o libfoo >>EOO;
+ !libfoo configured 1.1.0+3 available 1.2.0 1.1.1 (1.1.0+3) 1.1.0+1 1.1.0 1.0.0
+ EOO
+
+ # Deorphan libfoo/1.1.0+3 to the later patch of same version (1.1.1).
+ #
+ $rep_remove $rep/t14e;
+
+ $* --deorphan 2>>~%EOE%;
+ replace/upgrade libfoo/1.1.1
+ disfigured libfoo/1.1.0+3
+ fetched libfoo/1.1.1
+ unpacked libfoo/1.1.1
+ configured libfoo/1.1.1
+ %info: .+libfoo-1.1.1.+ is up to date%
+ updated libfoo/1.1.1
+ EOE
+
+ $pkg_status -o libfoo >>EOO;
+ !libfoo configured 1.1.1 available 1.2.0 (1.1.1) 1.1.0+1 1.1.0 1.0.0
+ EOO
+
+ # Noop.
+ #
+ $* --deorphan 2>'info: nothing to build';
+
+ $pkg_status -o libfoo >>EOO;
+ !libfoo configured 1.1.1 available 1.2.0 (1.1.1) 1.1.0+1 1.1.0 1.0.0
+ EOO
+
+ # Deorphan libfoo/1.1.1 to later minor of same version (1.2.0).
+ #
+ $rep_remove $rep/t14f;
+
+ $* --deorphan 2>>~%EOE%;
+ replace/upgrade libfoo/1.2.0
+ disfigured libfoo/1.1.1
+ fetched libfoo/1.2.0
+ unpacked libfoo/1.2.0
+ configured libfoo/1.2.0
+ %info: .+libfoo-1.2.0.+ is up to date%
+ updated libfoo/1.2.0
+ EOE
+
+ $pkg_status -o libfoo >>EOO;
+ !libfoo configured 1.2.0 available (1.2.0) 1.1.0+1 1.1.0 1.0.0
+ EOO
+
+ # Noop.
+ #
+ $* --deorphan 2>'info: nothing to build';
+
+ $pkg_status -o libfoo >>EOO;
+ !libfoo configured 1.2.0 available (1.2.0) 1.1.0+1 1.1.0 1.0.0
+ EOO
+
+ # Deorphan libfoo/1.2.0 to latest available version (1.1.0+1).
+ #
+ $rep_remove $rep/t14i;
+
+ $* --deorphan 2>>~%EOE%;
+ replace/downgrade libfoo/1.1.0+1
+ disfigured libfoo/1.2.0
+ fetched libfoo/1.1.0+1
+ unpacked libfoo/1.1.0+1
+ configured libfoo/1.1.0+1
+ %info: .+libfoo-1.1.0\+1.+ is up to date%
+ updated libfoo/1.1.0+1
+ EOE
+
+ $pkg_status -o libfoo >>EOO;
+ !libfoo configured 1.1.0+1 available (1.1.0+1) 1.1.0 1.0.0
+ EOO
+
+ # Noop.
+ #
+ $* --deorphan 2>'info: nothing to build';
+
+ $pkg_status -o libfoo >>EOO;
+ !libfoo configured 1.1.0+1 available (1.1.0+1) 1.1.0 1.0.0
+ EOO
+
+ # Deorphan libfoo/1.1.0+1 to latest available version (1.1.0).
+ #
+ $rep_remove $rep/t14c;
+
+ $* --deorphan 2>>~%EOE%;
+ replace/downgrade libfoo/1.1.0
+ disfigured libfoo/1.1.0+1
+ fetched libfoo/1.1.0
+ unpacked libfoo/1.1.0
+ configured libfoo/1.1.0
+ %info: .+libfoo-1.1.0.+ is up to date%
+ updated libfoo/1.1.0
+ EOE
+
+ $pkg_status -o libfoo >>EOO;
+ !libfoo configured 1.1.0 available (1.1.0) 1.0.0
+ EOO
+
+ # Noop.
+ #
+ $* --deorphan 2>'info: nothing to build';
+
+ $pkg_status -o libfoo >>EOO;
+ !libfoo configured 1.1.0 available (1.1.0) 1.0.0
+ EOO
+
+ # Deorphan libfoo/1.1.0 to latest available version (1.0.0).
+ #
+ $rep_remove $rep/t14b;
+
+ $* --deorphan 2>>~%EOE%;
+ replace/downgrade libfoo/1.0.0
+ disfigured libfoo/1.1.0
+ fetched libfoo/1.0.0
+ unpacked libfoo/1.0.0
+ configured libfoo/1.0.0
+ %info: .+libfoo-1.0.0.+ is up to date%
+ updated libfoo/1.0.0
+ EOE
+
+ $pkg_status -o libfoo >>EOO;
+ !libfoo configured 1.0.0 available (1.0.0)
+ EOO
+
+ # Noop.
+ #
+ $* --deorphan 2>'info: nothing to build';
+
+ $pkg_status -o libfoo >>EOO;
+ !libfoo configured 1.0.0 available (1.0.0)
+ EOO
+
+ # Deorphan fails (none available).
+ #
+ $rep_remove $rep/t14a;
+
+ $* --deorphan 2>>/EOE != 0;
+ error: libfoo is not available
+ info: configuration cfg/ has no repositories
+ info: use 'bpkg rep-add' to add a repository
+ EOE
+
+ $pkg_status -o libfoo >>EOO;
+ !libfoo configured 1.0.0
+ EOO
+
+ $pkg_drop libfoo
+ }
+
+ : preference-all-held-masked
+ :
+ : As above but using --mask-repository instead of rep-remove.
+ :
+ {
+ $clone_root_cfg;
+
+ $tar -xf $src/t14d/libfoo-1.1.0+2.tar.gz &libfoo-1.1.0+2/***;
+ mv libfoo-1.1.0+2 libfoo;
+
+ $rep_add --type dir libfoo/ && $rep_fetch;
+
+ $* libfoo 2>!;
+
+ $rep_fetch $rep/t14a $rep/t14b $rep/t14c $rep/t14d $rep/t14e $rep/t14f $rep/t14i;
+
+ $pkg_status -o libfoo >>EOO;
+ !libfoo configured 1.1.0+2 available 1.2.0 1.1.1 1.1.0+3 (1.1.0+2) 1.1.0+1 1.1.0 1.0.0
+ EOO
+
+ # Deorphan libfoo/1.1.0+2 to the exactly same version.
+ #
+ # Note that on Windows the local repository canonical name path part is
+ # converted to lower case.
+ #
+ cn = "$canonicalize([dir_path] $~/libfoo)";
+ if! $posix
+ cn = $lcase([string] $cn)
+ end;
+ cn = "dir:$cn";
+
+ mask = --mask-repository $cn;
+
+ $* $mask --deorphan 2>>~%EOE%;
+ replace/update libfoo/1.1.0+2
+ disfigured libfoo/1.1.0+2
+ fetched libfoo/1.1.0+2
+ unpacked libfoo/1.1.0+2
+ configured libfoo/1.1.0+2
+ %info: .+libfoo-1.1.0\+2.+ is up to date%
+ updated libfoo/1.1.0+2
+ EOE
+
+ $pkg_status -o libfoo >>EOO;
+ !libfoo configured 1.1.0+2 available 1.2.0 1.1.1 1.1.0+3 (1.1.0+2) 1.1.0+1 1.1.0 1.0.0
+ EOO
+
+ # Noop.
+ #
+ $* $mask --deorphan 2>'info: nothing to build';
+
+ $pkg_status -o libfoo >>EOO;
+ !libfoo configured 1.1.0+2 available 1.2.0 1.1.1 1.1.0+3 (1.1.0+2) 1.1.0+1 1.1.0 1.0.0
+ EOO
+
+ # Deorphan libfoo/1.1.0+2 to the later revision of same version (1.1.0+3).
+ #
+ mask += --mask-repository $rep/t14d;
+
+ $* $mask --deorphan 2>>~%EOE%;
+ replace/upgrade libfoo/1.1.0+3
+ disfigured libfoo/1.1.0+2
+ fetched libfoo/1.1.0+3
+ unpacked libfoo/1.1.0+3
+ configured libfoo/1.1.0+3
+ %info: .+libfoo-1.1.0\+3.+ is up to date%
+ updated libfoo/1.1.0+3
+ EOE
+
+ $pkg_status -o libfoo >>EOO;
+ !libfoo configured 1.1.0+3 available 1.2.0 1.1.1 (1.1.0+3) 1.1.0+2 1.1.0+1 1.1.0 1.0.0
+ EOO
+
+ # Noop.
+ #
+ $* $mask --deorphan 2>'info: nothing to build';
+
+ $pkg_status -o libfoo >>EOO;
+ !libfoo configured 1.1.0+3 available 1.2.0 1.1.1 (1.1.0+3) 1.1.0+2 1.1.0+1 1.1.0 1.0.0
+ EOO
+
+ # Deorphan libfoo/1.1.0+3 to the later patch of same version (1.1.1).
+ #
+ mask += --mask-repository $rep/t14e;
+
+ $* $mask --deorphan 2>>~%EOE%;
+ replace/upgrade libfoo/1.1.1
+ disfigured libfoo/1.1.0+3
+ fetched libfoo/1.1.1
+ unpacked libfoo/1.1.1
+ configured libfoo/1.1.1
+ %info: .+libfoo-1.1.1.+ is up to date%
+ updated libfoo/1.1.1
+ EOE
+
+ $pkg_status -o libfoo >>EOO;
+ !libfoo configured 1.1.1 available 1.2.0 (1.1.1) 1.1.0+3 1.1.0+2 1.1.0+1 1.1.0 1.0.0
+ EOO
+
+ # Noop.
+ #
+ $* $mask --deorphan 2>'info: nothing to build';
+
+ $pkg_status -o libfoo >>EOO;
+ !libfoo configured 1.1.1 available 1.2.0 (1.1.1) 1.1.0+3 1.1.0+2 1.1.0+1 1.1.0 1.0.0
+ EOO
+
+ # Deorphan libfoo/1.1.1 to later minor of same version (1.2.0).
+ #
+ mask += --mask-repository $rep/t14f;
+
+ $* $mask --deorphan 2>>~%EOE%;
+ replace/upgrade libfoo/1.2.0
+ disfigured libfoo/1.1.1
+ fetched libfoo/1.2.0
+ unpacked libfoo/1.2.0
+ configured libfoo/1.2.0
+ %info: .+libfoo-1.2.0.+ is up to date%
+ updated libfoo/1.2.0
+ EOE
+
+ $pkg_status -o libfoo >>EOO;
+ !libfoo configured 1.2.0 available (1.2.0) 1.1.1 1.1.0+3 1.1.0+2 1.1.0+1 1.1.0 1.0.0
+ EOO
+
+ # Noop.
+ #
+ $* $mask --deorphan 2>'info: nothing to build';
+
+ $pkg_status -o libfoo >>EOO;
+ !libfoo configured 1.2.0 available (1.2.0) 1.1.1 1.1.0+3 1.1.0+2 1.1.0+1 1.1.0 1.0.0
+ EOO
+
+ # Deorphan libfoo/1.2.0 to latest available version (1.1.0+1).
+ #
+ mask += --mask-repository $rep/t14i;
+
+ $* $mask --deorphan 2>>~%EOE%;
+ replace/downgrade libfoo/1.1.0+1
+ disfigured libfoo/1.2.0
+ fetched libfoo/1.1.0+1
+ unpacked libfoo/1.1.0+1
+ configured libfoo/1.1.0+1
+ %info: .+libfoo-1.1.0\+1.+ is up to date%
+ updated libfoo/1.1.0+1
+ EOE
+
+ $pkg_status -o libfoo >>EOO;
+ !libfoo configured 1.1.0+1 available 1.2.0 1.1.1 1.1.0+3 1.1.0+2 (1.1.0+1) 1.1.0 1.0.0
+ EOO
+
+ # Noop.
+ #
+ $* $mask --deorphan 2>'info: nothing to build';
+
+ $pkg_status -o libfoo >>EOO;
+ !libfoo configured 1.1.0+1 available 1.2.0 1.1.1 1.1.0+3 1.1.0+2 (1.1.0+1) 1.1.0 1.0.0
+ EOO
+
+ # Deorphan libfoo/1.1.0+1 to latest available version (1.1.0).
+ #
+ mask += --mask-repository $rep/t14c;
+
+ $* $mask --deorphan 2>>~%EOE%;
+ replace/downgrade libfoo/1.1.0
+ disfigured libfoo/1.1.0+1
+ fetched libfoo/1.1.0
+ unpacked libfoo/1.1.0
+ configured libfoo/1.1.0
+ %info: .+libfoo-1.1.0.+ is up to date%
+ updated libfoo/1.1.0
+ EOE
+
+ $pkg_status -o libfoo >>EOO;
+ !libfoo configured 1.1.0 available 1.2.0 1.1.1 1.1.0+3 1.1.0+2 1.1.0+1 (1.1.0) 1.0.0
+ EOO
+
+ # Noop.
+ #
+ $* $mask --deorphan 2>'info: nothing to build';
+
+ $pkg_status -o libfoo >>EOO;
+ !libfoo configured 1.1.0 available 1.2.0 1.1.1 1.1.0+3 1.1.0+2 1.1.0+1 (1.1.0) 1.0.0
+ EOO
+
+ # Deorphan libfoo/1.1.0 to latest available version (1.0.0).
+ #
+ mask += --mask-repository $rep/t14b;
+
+ $* $mask --deorphan 2>>~%EOE%;
+ replace/downgrade libfoo/1.0.0
+ disfigured libfoo/1.1.0
+ fetched libfoo/1.0.0
+ unpacked libfoo/1.0.0
+ configured libfoo/1.0.0
+ %info: .+libfoo-1.0.0.+ is up to date%
+ updated libfoo/1.0.0
+ EOE
+
+ $pkg_status -o libfoo >>EOO;
+ !libfoo configured 1.0.0 available 1.2.0 1.1.1 1.1.0+3 1.1.0+2 1.1.0+1 1.1.0 (1.0.0)
+ EOO
+
+ # Noop.
+ #
+ $* $mask --deorphan 2>'info: nothing to build';
+
+ $pkg_status -o libfoo >>EOO;
+ !libfoo configured 1.0.0 available 1.2.0 1.1.1 1.1.0+3 1.1.0+2 1.1.0+1 1.1.0 (1.0.0)
+ EOO
+
+ # Deorphan fails (none available).
+ #
+ mask += --mask-repository $rep/t14a;
+
+ $* $mask --deorphan 2>>/EOE != 0;
+ error: libfoo is not available
+ EOE
+
+ $pkg_status -o libfoo >>EOO;
+ !libfoo configured 1.0.0 available 1.2.0 1.1.1 1.1.0+3 1.1.0+2 1.1.0+1 1.1.0 (1.0.0)
+ EOO
+
+ $pkg_drop libfoo
+ }
+ }
+}
+
+: compatibility
+:
+{
+ +$clone_cfg
+ +$rep_add $rep/t15 && $rep_fetch
+
+ test.arguments += --yes
+
+ : toolchain
+ :
+ {
+ +$clone_cfg
+
+ : build-satisfied
+ :
+ {
+ $clone_cfg;
+
+ $* libfoo libbar 2>>~%EOE%;
+ fetched libfoo/1.0.0
+ unpacked libfoo/1.0.0
+ fetched libbar/1.0.0
+ unpacked libbar/1.0.0
+ configured libfoo/1.0.0
+ configured libbar/1.0.0
+ %info: .+libfoo-1.0.0.+ is up to date%
+ %info: .+libbar-1.0.0.+ is up to date%
+ updated libfoo/1.0.0
+ updated libbar/1.0.0
+ EOE
+
+ $pkg_drop libfoo libbar
+ }
+
+ : build-fail-unsatisfied
+ :
+ {
+ $clone_cfg;
+
+ $* libbaz 2>>~%EOE% != 0
+ error: unable to satisfy constraint (build2 >= 65536.0.0) for package libbaz
+ % info: available build2 version is .+%
+ info: while satisfying libbaz/1.0.0
+ EOE
+ }
+
+ : build-fail-unsatisfied-dependency
+ :
+ {
+ $clone_cfg;
+
+ $* libbiz 2>>~%EOE% != 0
+ error: unable to satisfy constraint (build2 >= 65536.0.0) for package libbaz
+ % info: available build2 version is .+%
+ info: while satisfying libbaz/1.0.0
+ info: while satisfying libbiz/1.0.0
+ EOE
+ }
+ }
+}
+
+: existing-package
+:
+{
+ +$tar -xf $src/libbar-1.2.0.tar.gz &libbar-1.2.0/***
+ +$tar -xf $src/libbar-1.1.0.tar.gz &libbar-1.1.0/***
+ +$tar -xf $src/libbar-1.0.0.tar.gz &libbar-1.0.0/***
+ +$tar -xf $src/libbar-0.0.3.tar.gz &libbar-0.0.3/***
+ +$tar -xf $src/libbaz-1.1.0.tar.gz &libbaz-1.1.0/***
+ +$tar -xf $src/libfoo-1.1.0.tar.gz &libfoo-1.1.0/***
+ +$tar -xf $src/libfoo-1.0.0.tar.gz &libfoo-1.0.0/***
+
+ d = [dir_path] ../../../
+
+ test.arguments += --yes
+
+ : hold
+ :
+ {
+ : archive
+ :
+ {
+ : pick-archive
+ :
+ : Test that libbar/1.0.0 specified as an archive is picked as a
+ : dependency for libbaz, despite the fact the repository contains
+ : libbar/1.2.0.
+ :
+ {
+ $clone_root_cfg;
+ $rep_add $rep/t5 && $rep_fetch;
+
+ $* $src/libbaz-1.1.0.tar.gz \
+ $src/libfoo-1.0.0.tar.gz \
+ $src/libbar-1.0.0.tar.gz 2>>~%EOE%;
+ fetched libfoo/1.0.0
+ unpacked libfoo/1.0.0
+ fetched libbar/1.0.0
+ unpacked libbar/1.0.0
+ fetched libbaz/1.1.0
+ unpacked libbaz/1.1.0
+ configured libfoo/1.0.0
+ configured libbar/1.0.0
+ configured libbaz/1.1.0
+ %info: .+libfoo-1.0.0.+ is up to date%
+ %info: .+libbar-1.0.0.+ is up to date%
+ %info: .+libbaz-1.1.0.+ is up to date%
+ updated libfoo/1.0.0
+ updated libbar/1.0.0
+ updated libbaz/1.1.0
+ EOE
+
+ $pkg_status -r libbaz >>EOO;
+ !libbaz configured !1.1.0
+ !libbar configured !1.0.0 available 1.2.0
+ !libfoo configured !1.0.0
+ !libfoo configured !1.0.0
+ EOO
+
+ $pkg_drop libbaz libfoo libbar
+ }
+
+ : pick-repo
+ :
+ : Picks the libbar/1.2.0 dependency from the repository for the
+ : dependent libbaz/1.1.0 specified as an archive.
+ :
+ {
+ $clone_root_cfg;
+ $rep_add $rep/t5 && $rep_fetch;
+
+ $* $src/libbaz-1.1.0.tar.gz $src/libfoo-1.0.0.tar.gz 2>>~%EOE%;
+ fetched libbar/1.2.0
+ unpacked libbar/1.2.0
+ fetched libfoo/1.0.0
+ unpacked libfoo/1.0.0
+ fetched libbaz/1.1.0
+ unpacked libbaz/1.1.0
+ configured libbar/1.2.0
+ configured libfoo/1.0.0
+ configured libbaz/1.1.0
+ %info: .+libfoo-1.0.0.+ is up to date%
+ %info: .+libbaz-1.1.0.+ is up to date%
+ updated libfoo/1.0.0
+ updated libbaz/1.1.0
+ EOE
+
+ $pkg_status -r libbaz >>EOO;
+ !libbaz configured !1.1.0
+ libbar configured 1.2.0
+ !libfoo configured !1.0.0
+ EOO
+
+ $pkg_drop libbaz libfoo libbar
+ }
+
+ : unsatisfactory
+ :
+ {
+ $clone_root_cfg;
+ $rep_add $rep/t4b && $rep_fetch;
+
+ $* libbar $src/libfoo-1.0.0.tar.gz 2>>~%EOE% != 0
+ error: unable to satisfy constraints on package libfoo
+ info: command line depends on (libfoo == 1.0.0)
+ info: libbar/1.1.0 depends on (libfoo == 1.1.0)
+ info: available libfoo/1.0.0
+ info: available libfoo/1.1.0
+ info: while satisfying libbar/1.1.0
+ info: explicitly specify libfoo version to manually satisfy both constraints
+ EOE
+ }
+
+ : unsatisfactory-archive
+ :
+ : Same as above but the dependent is specified as an archive.
+ :
+ {
+ $clone_root_cfg;
+ $rep_add $rep/t4a && $rep_fetch; # Note: libfoo/1.1.0 belongs to t4a.
+
+ $* $src/libbar-1.1.0.tar.gz $src/libfoo-1.0.0.tar.gz 2>>~%EOE% != 0
+ error: unable to satisfy constraints on package libfoo
+ info: command line depends on (libfoo == 1.0.0)
+ info: libbar/1.1.0 depends on (libfoo == 1.1.0)
+ command line requires (libbar == 1.1.0)
+ info: available libfoo/1.0.0
+ info: available libfoo/1.1.0
+ info: while satisfying libbar/1.1.0
+ info: explicitly specify libfoo version to manually satisfy both constraints
+ EOE
+ }
+
+ : dependency-alternative
+ :
+ : Note: by specifying an unsatisfactory dependency alternative as an
+ : archive we resolve the alternatives ambiguity here, building both
+ : libbar and libbaz packages as a result.
+ :
+ {
+ $clone_root_cfg;
+ $rep_add $rep/t8a && $rep_fetch;
+
+ $* fox 2>>EOE != 0;
+ error: unable to select dependency alternative for package fox/1.0.0
+ info: explicitly specify dependency packages to manually select the alternative
+ info: alternative: libbar
+ info: alternative: libbaz
+ info: while satisfying fox/1.0.0
+ EOE
+
+ $* fox $src/libbar-0.0.3.tar.gz 2>>~%EOE%;
+ fetched libbaz/1.1.0
+ unpacked libbaz/1.1.0
+ fetched fox/1.0.0
+ unpacked fox/1.0.0
+ fetched libbar/0.0.3
+ unpacked libbar/0.0.3
+ configured libbaz/1.1.0
+ configured fox/1.0.0
+ configured libbar/0.0.3
+ %info: .+fox-1.0.0.+ is up to date%
+ %info: .+libbar-0.0.3.+ is up to date%
+ updated fox/1.0.0
+ updated libbar/0.0.3
+ EOE
+
+ $pkg_status -r fox libbar >>EOO;
+ !fox configured 1.0.0
+ libbaz configured 1.1.0
+ !libbar configured !0.0.3 available 1.0.0
+ libbaz configured 1.1.0
+ EOO
+
+ $pkg_drop fox libbar
+ }
+
+ : upgrade
+ :
+ {
+ $clone_root_cfg;
+ $rep_add $rep/t4a $rep/t4b && $rep_fetch;
+
+ $* $src/libbaz-1.1.0.tar.gz \
+ $src/libfoo-1.0.0.tar.gz \
+ $src/libbar-1.0.0.tar.gz 2>!;
+
+ $pkg_status -r libbaz >>EOO;
+ !libbaz configured !1.1.0
+ !libbar configured !1.0.0 available 1.1.0
+ !libfoo configured !1.0.0 available 1.1.0
+ !libfoo configured !1.0.0 available 1.1.0
+ EOO
+
+ $* --upgrade-recursive libbaz 2>>~%EOE%;
+ disfigured libbaz/1.1.0
+ disfigured libbar/1.0.0
+ disfigured libfoo/1.0.0
+ fetched libfoo/1.1.0
+ unpacked libfoo/1.1.0
+ fetched libbar/1.1.0
+ unpacked libbar/1.1.0
+ configured libfoo/1.1.0
+ configured libbar/1.1.0
+ configured libbaz/1.1.0
+ %info: .+libbaz-1.1.0.+ is up to date%
+ updated libbaz/1.1.0
+ EOE
+
+ $pkg_status -r libbaz >>EOO;
+ !libbaz configured 1.1.0
+ !libbar configured !1.1.0
+ !libfoo configured !1.1.0
+ !libfoo configured !1.1.0
+ EOO
+
+ $pkg_drop libbaz libbar libfoo
+ }
+
+ : downgrade
+ :
+ {
+ $clone_root_cfg;
+ $rep_add $rep/t5 && $rep_fetch;
+
+ $* libbar 2>!;
+
+ $* $src/libbar-1.0.0.tar.gz "?$src/libfoo-1.0.0.tar.gz" 2>>~%EOE%;
+ disfigured libbar/1.2.0
+ fetched libfoo/1.0.0
+ unpacked libfoo/1.0.0
+ fetched libbar/1.0.0
+ unpacked libbar/1.0.0
+ configured libfoo/1.0.0
+ configured libbar/1.0.0
+ %info: .+libbar-1.0.0.+ is up to date%
+ updated libbar/1.0.0
+ EOE
+
+ $pkg_status -r libbar >>EOO;
+ !libbar configured !1.0.0 available 1.2.0
+ libfoo configured !1.0.0
+ EOO
+
+ $pkg_drop libbar
+ }
+
+ : replace
+ :
+ {
+ $clone_root_cfg;
+ $rep_add $rep/t2 && $rep_fetch;
+
+ $* libfoo 2>!;
+
+ $* $src/libfoo-1.0.0.tar.gz 2>>~%EOE%;
+ disfigured libfoo/1.0.0
+ fetched libfoo/1.0.0
+ unpacked libfoo/1.0.0
+ configured libfoo/1.0.0
+ %info: .+libfoo-1.0.0.+ is up to date%
+ updated libfoo/1.0.0
+ EOE
+
+ $pkg_status -r libfoo >>EOO;
+ !libfoo configured !1.0.0
+ EOO
+
+ $* --plan "" $src/libfoo-1.0.0.tar.gz 2>>~%EOE%;
+ replace/update libfoo/1.0.0
+ disfigured libfoo/1.0.0
+ fetched libfoo/1.0.0
+ unpacked libfoo/1.0.0
+ configured libfoo/1.0.0
+ %info: .+libfoo-1.0.0.+ is up to date%
+ updated libfoo/1.0.0
+ EOE
+
+ $pkg_status -r libfoo >>EOO;
+ !libfoo configured !1.0.0
+ EOO
+
+ $* --plan "" $d/libfoo-1.0.0/ 2>>~%EOE%;
+ replace/upgrade libfoo/1.0.0#1
+ disfigured libfoo/1.0.0
+ using libfoo/1.0.0#1 (external)
+ configured libfoo/1.0.0#1
+ %info: .+libfoo.+ is up to date%
+ updated libfoo/1.0.0#1
+ EOE
+
+ $pkg_status -r libfoo >>EOO;
+ !libfoo configured !1.0.0#1
+ EOO
+
+ $pkg_drop libfoo
+ }
+
+ : deorphan-existing-archive
+ :
+ {
+ $clone_root_cfg;
+ $rep_add $rep/t2 && $rep_fetch;
+
+ $* $src/libbaz-1.1.0.tar.gz \
+ $src/libfoo-1.0.0.tar.gz \
+ $src/libbar-1.0.0.tar.gz 2>!;
+
+ $* --deorphan libfoo 2>>~%EOE%;
+ disfigured libbaz/1.1.0
+ disfigured libbar/1.0.0
+ disfigured libfoo/1.0.0
+ fetched libfoo/1.0.0
+ unpacked libfoo/1.0.0
+ configured libfoo/1.0.0
+ configured libbar/1.0.0
+ configured libbaz/1.1.0
+ %info: .+libfoo-1.0.0.+ is up to date%
+ %info: .+libbar-1.0.0.+ is up to date%
+ %info: .+libbaz-1.1.0.+ is up to date%
+ updated libfoo/1.0.0
+ updated libbar/1.0.0
+ updated libbaz/1.1.0
+ EOE
+
+ $pkg_status -r libbaz >>EOO;
+ !libbaz configured !1.1.0
+ !libbar configured !1.0.0
+ !libfoo configured 1.0.0
+ !libfoo configured 1.0.0
+ EOO
+
+ $rep_add $rep/t3 && $rep_fetch;
+
+ $* --deorphan 2>>~%EOE%;
+ disfigured libbaz/1.1.0
+ disfigured libbar/1.0.0
+ fetched libbar/1.0.0
+ unpacked libbar/1.0.0
+ fetched libbaz/1.0.0
+ unpacked libbaz/1.0.0
+ configured libbar/1.0.0
+ configured libbaz/1.0.0
+ %info: .+libbar-1.0.0.+ is up to date%
+ %info: .+libbaz-1.0.0.+ is up to date%
+ updated libbar/1.0.0
+ updated libbaz/1.0.0
+ EOE
+
+ $pkg_status -r libbaz libfoo >>EOO;
+ !libbaz configured 1.0.0
+ !libbar configured 1.0.0
+ !libfoo configured 1.0.0
+ !libfoo configured 1.0.0
+ EOO
+
+ $pkg_drop libbaz libbar libfoo
+ }
+
+ : deorphan-with-existing-archive
+ :
+ {
+ $clone_root_cfg;
+ $rep_add $rep/t2 $rep/t3 && $rep_fetch;
+
+ $* libbaz libbar 2>!;
+
+ $pkg_status -r libbaz >>EOO;
+ !libbaz configured 1.0.0
+ !libbar configured 1.0.0
+ libfoo configured 1.0.0
+ EOO
+
+ $rep_remove $rep/t2 $rep/t3;
+
+ $* --deorphan $src/libfoo-1.0.0.tar.gz 2>>~%EOE%;
+ disfigured libbaz/1.0.0
+ disfigured libbar/1.0.0
+ disfigured libfoo/1.0.0
+ fetched libfoo/1.0.0
+ unpacked libfoo/1.0.0
+ configured libfoo/1.0.0
+ configured libbar/1.0.0
+ configured libbaz/1.0.0
+ %info: .+libfoo-1.0.0.+ is up to date%
+ %info: .+libbar-1.0.0.+ is up to date%
+ %info: .+libbaz-1.0.0.+ is up to date%
+ updated libfoo/1.0.0
+ updated libbar/1.0.0
+ updated libbaz/1.0.0
+ EOE
+
+ $pkg_status -r libbaz >>EOO;
+ !libbaz configured 1.0.0
+ !libbar configured 1.0.0
+ !libfoo configured !1.0.0
+ EOO
+
+ $* --deorphan $src/libbar-1.2.0.tar.gz 2>>~%EOE%;
+ disfigured libbaz/1.0.0
+ disfigured libbar/1.0.0
+ fetched libbar/1.2.0
+ unpacked libbar/1.2.0
+ configured libbar/1.2.0
+ configured libbaz/1.0.0
+ %info: .+libbar-1.2.0.+ is up to date%
+ %info: .+libbaz-1.0.0.+ is up to date%
+ updated libbar/1.2.0
+ updated libbaz/1.0.0
+ EOE
+
+ $pkg_status -r libbaz libfoo >>EOO;
+ !libbaz configured 1.0.0
+ !libbar configured !1.2.0
+ !libfoo configured !1.0.0
+ EOO
+
+ $pkg_drop libbaz libbar libfoo
+ }
+
+ : system
+ :
+ {
+ $clone_root_cfg;
+ $rep_add $rep/t3 && $rep_fetch;
+
+ $* libbaz '?sys:libbar' 2>!;
+
+ $* $src/libbar-1.1.0.tar.gz "?$src/libfoo-1.1.0.tar.gz" 2>>~%EOE%;
+ disfigured libbaz/1.0.0
+ purged libbar/*
+ fetched libfoo/1.1.0
+ unpacked libfoo/1.1.0
+ fetched libbar/1.1.0
+ unpacked libbar/1.1.0
+ configured libfoo/1.1.0
+ configured libbar/1.1.0
+ configured libbaz/1.0.0
+ %info: .+libbar-1.1.0.+ is up to date%
+ %info: .+libbaz-1.0.0.+ is up to date%
+ updated libbar/1.1.0
+ updated libbaz/1.0.0
+ EOE
+
+ $pkg_status -r libbaz >>EOO;
+ !libbaz configured 1.0.0
+ !libbar configured !1.1.0
+ libfoo configured !1.1.0
+ EOO
+
+ $pkg_drop libbaz libbar
+ }
+ }
+
+ : directory
+ :
+ {
+ : pick-directory
+ :
+ : Test that libbar/1.0.0 specified as a directory is picked as a
+ : dependency for libbaz, despite the fact the repository contains
+ : libbar/1.2.0.
+ :
+ {
+ $clone_root_cfg;
+ $rep_add $rep/t5 && $rep_fetch;
+
+ $* $d/libbaz-1.1.0/ \
+ $d/libfoo-1.0.0/ \
+ $d/libbar-1.0.0/ 2>>~%EOE%;
+ using libfoo/1.0.0 (external)
+ using libbar/1.0.0 (external)
+ using libbaz/1.1.0 (external)
+ configured libfoo/1.0.0
+ configured libbar/1.0.0
+ configured libbaz/1.1.0
+ %info: .+libfoo.+ is up to date%
+ %info: .+libbar.+ is up to date%
+ %info: .+libbaz.+ is up to date%
+ updated libfoo/1.0.0
+ updated libbar/1.0.0
+ updated libbaz/1.1.0
+ EOE
+
+ $pkg_status -r libbaz >>EOO;
+ !libbaz configured !1.1.0
+ !libbar configured !1.0.0 available 1.2.0
+ !libfoo configured !1.0.0
+ !libfoo configured !1.0.0
+ EOO
+
+ $pkg_drop libbaz libfoo libbar
+ }
+
+ : pick-repo
+ :
+ : Picks the libbar/1.2.0 dependency from the repository for the
+ : dependent libbaz/1.1.0 specified as a directory.
+ :
+ {
+ $clone_root_cfg;
+ $rep_add $rep/t5 && $rep_fetch;
+
+ $* $d/libbaz-1.1.0/ $d/libfoo-1.0.0/ 2>>~%EOE%;
+ fetched libbar/1.2.0
+ unpacked libbar/1.2.0
+ using libfoo/1.0.0 (external)
+ using libbaz/1.1.0 (external)
+ configured libbar/1.2.0
+ configured libfoo/1.0.0
+ configured libbaz/1.1.0
+ %info: .+libfoo.+ is up to date%
+ %info: .+libbaz.+ is up to date%
+ updated libfoo/1.0.0
+ updated libbaz/1.1.0
+ EOE
+
+ $pkg_status -r libbaz >>EOO;
+ !libbaz configured !1.1.0
+ libbar configured 1.2.0
+ !libfoo configured !1.0.0
+ EOO
+
+ $pkg_drop libbaz libfoo libbar
+ }
+
+ : unsatisfactory
+ :
+ {
+ $clone_root_cfg;
+ $rep_add $rep/t4b && $rep_fetch;
+
+ $* libbar $d/libfoo-1.0.0/ 2>>~%EOE% != 0
+ error: unable to satisfy constraints on package libfoo
+ info: command line depends on (libfoo == 1.0.0)
+ info: libbar/1.1.0 depends on (libfoo == 1.1.0)
+ info: available libfoo/1.0.0
+ info: available libfoo/1.1.0
+ info: while satisfying libbar/1.1.0
+ info: explicitly specify libfoo version to manually satisfy both constraints
+ EOE
+ }
+
+ : unsatisfactory-archive
+ :
+ : Same as above but the dependent is specified as a directory.
+ :
+ {
+ $clone_root_cfg;
+ $rep_add $rep/t4a && $rep_fetch; # Note: libfoo/1.1.0 belongs to t4a.
+
+ $* $d/libbar-1.1.0/ $d/libfoo-1.0.0/ 2>>~%EOE% != 0
+ error: unable to satisfy constraints on package libfoo
+ info: command line depends on (libfoo == 1.0.0)
+ info: libbar/1.1.0 depends on (libfoo == 1.1.0)
+ command line requires (libbar == 1.1.0)
+ info: available libfoo/1.0.0
+ info: available libfoo/1.1.0
+ info: while satisfying libbar/1.1.0
+ info: explicitly specify libfoo version to manually satisfy both constraints
+ EOE
+ }
+
+ : dependency-alternative
+ :
+ : Note: by specifying an unsatisfactory dependency alternative as a
+ : directory we resolve the alternatives ambiguity here, building both
+ : libbar and libbaz packages as a result.
+ :
+ {
+ $clone_root_cfg;
+ $rep_add $rep/t8a && $rep_fetch;
+
+ $* fox 2>>EOE != 0;
+ error: unable to select dependency alternative for package fox/1.0.0
+ info: explicitly specify dependency packages to manually select the alternative
+ info: alternative: libbar
+ info: alternative: libbaz
+ info: while satisfying fox/1.0.0
+ EOE
+
+ $* fox $d/libbar-0.0.3/ 2>>~%EOE%;
+ fetched libbaz/1.1.0
+ unpacked libbaz/1.1.0
+ fetched fox/1.0.0
+ unpacked fox/1.0.0
+ using libbar/0.0.3 (external)
+ configured libbaz/1.1.0
+ configured fox/1.0.0
+ configured libbar/0.0.3
+ %info: .+fox-1.0.0.+ is up to date%
+ %info: .+libbar.+ is up to date%
+ updated fox/1.0.0
+ updated libbar/0.0.3
+ EOE
+
+ $pkg_status -r fox libbar >>EOO;
+ !fox configured 1.0.0
+ libbaz configured 1.1.0
+ !libbar configured !0.0.3 available 1.0.0
+ libbaz configured 1.1.0
+ EOO
+
+ $pkg_drop fox libbar
+ }
+
+ : upgrade
+ :
+ {
+ $clone_root_cfg;
+ $rep_add $rep/t4a $rep/t4b && $rep_fetch;
+
+ $* $d/libbaz-1.1.0/ \
+ $d/libfoo-1.0.0/ \
+ $d/libbar-1.0.0/ 2>!;
+
+ $pkg_status -r libbaz >>EOO;
+ !libbaz configured !1.1.0
+ !libbar configured !1.0.0 available 1.1.0
+ !libfoo configured !1.0.0 available 1.1.0
+ !libfoo configured !1.0.0 available 1.1.0
+ EOO
+
+ $* --upgrade-recursive libbaz 2>>~%EOE%;
+ disfigured libbaz/1.1.0
+ disfigured libbar/1.0.0
+ disfigured libfoo/1.0.0
+ fetched libfoo/1.1.0
+ unpacked libfoo/1.1.0
+ fetched libbar/1.1.0
+ unpacked libbar/1.1.0
+ configured libfoo/1.1.0
+ configured libbar/1.1.0
+ configured libbaz/1.1.0
+ %info: .+libbaz.+ is up to date%
+ updated libbaz/1.1.0
+ EOE
+
+ $pkg_status -r libbaz >>EOO;
+ !libbaz configured 1.1.0
+ !libbar configured !1.1.0
+ !libfoo configured !1.1.0
+ !libfoo configured !1.1.0
+ EOO
+
+ $pkg_drop libbaz libbar libfoo
+ }
+
+ : downgrade
+ :
+ {
+ $clone_root_cfg;
+ $rep_add $rep/t5 && $rep_fetch;
+
+ $* libbar 2>!;
+
+ $* $d/libbar-1.0.0/ "?$d/libfoo-1.0.0/" 2>>~%EOE%;
+ disfigured libbar/1.2.0
+ using libfoo/1.0.0 (external)
+ using libbar/1.0.0 (external)
+ configured libfoo/1.0.0
+ configured libbar/1.0.0
+ %info: .+libbar.+ is up to date%
+ updated libbar/1.0.0
+ EOE
+
+ $pkg_status -r libbar >>EOO;
+ !libbar configured !1.0.0 available 1.2.0
+ libfoo configured !1.0.0
+ EOO
+
+ $pkg_drop libbar
+ }
+
+ : replace
+ :
+ {
+ $clone_root_cfg;
+ $rep_add $rep/t2 && $rep_fetch;
+
+ $* libfoo 2>!;
+
+ $* $d/libfoo-1.0.0/ 2>>~%EOE%;
+ disfigured libfoo/1.0.0
+ using libfoo/1.0.0#1 (external)
+ configured libfoo/1.0.0#1
+ %info: .+libfoo.+ is up to date%
+ updated libfoo/1.0.0#1
+ EOE
+
+ $pkg_status -r libfoo >>EOO;
+ !libfoo configured !1.0.0#1
+ EOO
+
+ $* --plan "" $d/libfoo-1.0.0/ 2>>~%EOE%;
+ replace/update libfoo/1.0.0#1
+ disfigured libfoo/1.0.0#1
+ using libfoo/1.0.0#1 (external)
+ configured libfoo/1.0.0#1
+ %info: .+libfoo.+ is up to date%
+ updated libfoo/1.0.0#1
+ EOE
+
+ $pkg_status -r libfoo >>EOO;
+ !libfoo configured !1.0.0#1
+ EOO
+
+ $* --plan "" $src/libfoo-1.0.0.tar.gz 2>>~%EOE%;
+ replace/downgrade libfoo/1.0.0
+ disfigured libfoo/1.0.0#1
+ fetched libfoo/1.0.0
+ unpacked libfoo/1.0.0
+ configured libfoo/1.0.0
+ %info: .+libfoo-1.0.0.+ is up to date%
+ updated libfoo/1.0.0
+ EOE
+
+ $pkg_status -r libfoo >>EOO;
+ !libfoo configured !1.0.0
+ EOO
+
+ $pkg_drop libfoo
+ }
+
+ : deorphan-existing-directory
+ :
+ {
+ $clone_root_cfg;
+ $rep_add $rep/t2 && $rep_fetch;
+
+ $* $d/libbaz-1.1.0/ \
+ $d/libfoo-1.0.0/ \
+ $d/libbar-1.0.0/ 2>!;
+
+ $* --deorphan libfoo 2>>~%EOE%;
+ disfigured libbaz/1.1.0
+ disfigured libbar/1.0.0
+ disfigured libfoo/1.0.0
+ fetched libfoo/1.0.0
+ unpacked libfoo/1.0.0
+ configured libfoo/1.0.0
+ configured libbar/1.0.0
+ configured libbaz/1.1.0
+ %info: .+libfoo-1.0.0.+ is up to date%
+ %info: .+libbar.+ is up to date%
+ %info: .+libbaz.+ is up to date%
+ updated libfoo/1.0.0
+ updated libbar/1.0.0
+ updated libbaz/1.1.0
+ EOE
+
+ $pkg_status -r libbaz >>EOO;
+ !libbaz configured !1.1.0
+ !libbar configured !1.0.0
+ !libfoo configured 1.0.0
+ !libfoo configured 1.0.0
+ EOO
+
+ $rep_add $rep/t3 && $rep_fetch;
+
+ $* --deorphan 2>>~%EOE%;
+ disfigured libbaz/1.1.0
+ disfigured libbar/1.0.0
+ fetched libbar/1.0.0
+ unpacked libbar/1.0.0
+ fetched libbaz/1.0.0
+ unpacked libbaz/1.0.0
+ configured libbar/1.0.0
+ configured libbaz/1.0.0
+ %info: .+libbar-1.0.0.+ is up to date%
+ %info: .+libbaz-1.0.0.+ is up to date%
+ updated libbar/1.0.0
+ updated libbaz/1.0.0
+ EOE
+
+ $pkg_status -r libbaz libfoo >>EOO;
+ !libbaz configured 1.0.0
+ !libbar configured 1.0.0
+ !libfoo configured 1.0.0
+ !libfoo configured 1.0.0
+ EOO
+
+ $pkg_drop libbaz libbar libfoo
+ }
+
+ : deorphan-with-existing-directory
+ :
+ {
+ $clone_root_cfg;
+ $rep_add $rep/t2 $rep/t3 && $rep_fetch;
+
+ $* libbaz libbar 2>!;
+
+ $pkg_status -r libbaz >>EOO;
+ !libbaz configured 1.0.0
+ !libbar configured 1.0.0
+ libfoo configured 1.0.0
+ EOO
+
+ $rep_remove $rep/t2 $rep/t3;
+
+ $* --deorphan $d/libfoo-1.0.0/ 2>>~%EOE%;
+ disfigured libbaz/1.0.0
+ disfigured libbar/1.0.0
+ disfigured libfoo/1.0.0
+ using libfoo/1.0.0#1 (external)
+ configured libfoo/1.0.0#1
+ configured libbar/1.0.0
+ configured libbaz/1.0.0
+ %info: .+libfoo.+ is up to date%
+ %info: .+libbar-1.0.0.+ is up to date%
+ %info: .+libbaz-1.0.0.+ is up to date%
+ updated libfoo/1.0.0#1
+ updated libbar/1.0.0
+ updated libbaz/1.0.0
+ EOE
+
+ $pkg_status -r libbaz >>EOO;
+ !libbaz configured 1.0.0
+ !libbar configured 1.0.0
+ !libfoo configured !1.0.0#1
+ EOO
+
+ $* --deorphan $d/libbar-1.2.0/ 2>>~%EOE%;
+ disfigured libbaz/1.0.0
+ disfigured libbar/1.0.0
+ using libbar/1.2.0 (external)
+ configured libbar/1.2.0
+ configured libbaz/1.0.0
+ %info: .+libbar.+ is up to date%
+ %info: .+libbaz-1.0.0.+ is up to date%
+ updated libbar/1.2.0
+ updated libbaz/1.0.0
+ EOE
+
+ $pkg_status -r libbaz libfoo >>EOO;
+ !libbaz configured 1.0.0
+ !libbar configured !1.2.0
+ !libfoo configured !1.0.0#1
+ EOO
+
+ $pkg_drop libbaz libbar libfoo
+ }
+
+ : system
+ :
+ {
+ $clone_root_cfg;
+ $rep_add $rep/t3 && $rep_fetch;
+
+ $* libbaz '?sys:libbar' 2>!;
+
+ $* $d/libbar-1.1.0/ "?$d/libfoo-1.1.0/" 2>>~%EOE%;
+ disfigured libbaz/1.0.0
+ purged libbar/*
+ using libfoo/1.1.0 (external)
+ using libbar/1.1.0 (external)
+ configured libfoo/1.1.0
+ configured libbar/1.1.0
+ configured libbaz/1.0.0
+ %info: .+libbar.+ is up to date%
+ %info: .+libbaz-1.0.0.+ is up to date%
+ updated libbar/1.1.0
+ updated libbaz/1.0.0
+ EOE
+
+ $pkg_status -r libbaz >>EOO;
+ !libbaz configured 1.0.0
+ !libbar configured !1.1.0
+ libfoo configured !1.1.0
+ EOO
+
+ $pkg_drop libbaz libbar
+ }
+ }
+ }
+
+ : dependency
+ :
+ {
+ : archive
+ :
+ {
+ : pick-archive
+ :
+ : Test that libbar/1.0.0 specified as an archive is picked as a
+ : dependency for libbaz, despite the fact the repository contains
+ : libbar/1.2.0.
+ :
+ {
+ $clone_root_cfg;
+ $rep_add $rep/t5 && $rep_fetch;
+
+ $* $src/libbaz-1.1.0.tar.gz \
+ "?$src/libfoo-1.0.0.tar.gz" \
+ "?$src/libbar-1.0.0.tar.gz" 2>>~%EOE%;
+ fetched libfoo/1.0.0
+ unpacked libfoo/1.0.0
+ fetched libbar/1.0.0
+ unpacked libbar/1.0.0
+ fetched libbaz/1.1.0
+ unpacked libbaz/1.1.0
+ configured libfoo/1.0.0
+ configured libbar/1.0.0
+ configured libbaz/1.1.0
+ %info: .+libbaz-1.1.0.+ is up to date%
+ updated libbaz/1.1.0
+ EOE
+
+ $pkg_status -r libbaz >>EOO;
+ !libbaz configured !1.1.0
+ libbar configured !1.0.0 available 1.2.0
+ libfoo configured !1.0.0
+ libfoo configured !1.0.0
+ EOO
+
+ $pkg_drop libbaz
+ }
+
+ : unsatisfactory
+ :
+ {
+ $clone_root_cfg;
+ $rep_add $rep/t4b && $rep_fetch;
+
+ $* libbar "?$src/libfoo-1.0.0.tar.gz" 2>>EOE != 0
+ error: package libfoo doesn't satisfy its dependents
+ info: libfoo/1.0.0 doesn't satisfy libbar/1.1.0
+ EOE
+ }
+
+ : unsatisfactory-archive
+ :
+ : Same as above but the dependent is specified as an archive.
+ :
+ {
+ $clone_root_cfg;
+ $rep_add $rep/t4a && $rep_fetch;
+
+ $* $src/libbar-1.1.0.tar.gz "?$src/libfoo-1.0.0.tar.gz" 2>>EOE != 0
+ error: package libfoo doesn't satisfy its dependents
+ info: libfoo/1.0.0 doesn't satisfy libbar/1.1.0
+ EOE
+ }
+
+ : dependency-alternative
+ :
+ : Note: by specifying an unsatisfactory dependency alternative as an
+ : archive we resolve the alternatives ambiguity here, building libbaz and
+ : skipping unused libbar as a result.
+ :
+ {
+ $clone_root_cfg;
+ $rep_add $rep/t8a && $rep_fetch;
+
+ $* fox 2>>EOE != 0;
+ error: unable to select dependency alternative for package fox/1.0.0
+ info: explicitly specify dependency packages to manually select the alternative
+ info: alternative: libbar
+ info: alternative: libbaz
+ info: while satisfying fox/1.0.0
+ EOE
+
+ $* fox "?$src/libbar-0.0.3.tar.gz" 2>>~%EOE%;
+ fetched libbaz/1.1.0
+ unpacked libbaz/1.1.0
+ fetched fox/1.0.0
+ unpacked fox/1.0.0
+ configured libbaz/1.1.0
+ configured fox/1.0.0
+ %info: .+fox-1.0.0.+ is up to date%
+ updated fox/1.0.0
+ EOE
+
+ $pkg_status -r fox libbar >>EOO;
+ !fox configured 1.0.0
+ libbaz configured 1.1.0
+ libbar available 1.0.0
+ EOO
+
+ $pkg_drop fox
+ }
+
+ : upgrade
+ :
+ {
+ $clone_root_cfg;
+ $rep_add $rep/t4a $rep/t4b && $rep_fetch;
+
+ $* $src/libbaz-1.1.0.tar.gz \
+ "?$src/libfoo-1.0.0.tar.gz" \
+ "?$src/libbar-1.0.0.tar.gz" 2>!;
+
+ $pkg_status -r libbaz >>EOO;
+ !libbaz configured !1.1.0
+ libbar configured !1.0.0 available 1.1.0
+ libfoo configured !1.0.0 available 1.1.0
+ libfoo configured !1.0.0 available 1.1.0
+ EOO
+
+ $* --upgrade-recursive libbaz 2>>~%EOE%;
+ disfigured libbaz/1.1.0
+ disfigured libbar/1.0.0
+ disfigured libfoo/1.0.0
+ fetched libfoo/1.1.0
+ unpacked libfoo/1.1.0
+ fetched libbar/1.1.0
+ unpacked libbar/1.1.0
+ configured libfoo/1.1.0
+ configured libbar/1.1.0
+ configured libbaz/1.1.0
+ %info: .+libbaz-1.1.0.+ is up to date%
+ updated libbaz/1.1.0
+ EOE
+
+ $pkg_status -r libbaz >>EOO;
+ !libbaz configured 1.1.0
+ libbar configured !1.1.0
+ libfoo configured !1.1.0
+ libfoo configured !1.1.0
+ EOO
+
+ $pkg_drop libbaz
+ }
+
+ : downgrade
+ :
+ {
+ $clone_root_cfg;
+ $rep_add $rep/t5 && $rep_fetch;
+
+ $* $src/libbaz-1.1.0.tar.gz "?$src/libfoo-1.0.0.tar.gz" 2>>~%EOE%;
+ fetched libbar/1.2.0
+ unpacked libbar/1.2.0
+ fetched libfoo/1.0.0
+ unpacked libfoo/1.0.0
+ fetched libbaz/1.1.0
+ unpacked libbaz/1.1.0
+ configured libbar/1.2.0
+ configured libfoo/1.0.0
+ configured libbaz/1.1.0
+ %info: .+libbaz-1.1.0.+ is up to date%
+ updated libbaz/1.1.0
+ EOE
+
+ $pkg_status -r libbaz >>EOO;
+ !libbaz configured !1.1.0
+ libbar configured 1.2.0
+ libfoo configured !1.0.0
+ EOO
+
+ $* --plan "" "?$src/libbar-1.0.0.tar.gz" 2>>~%EOE%;
+ replace/downgrade libbar/1.0.0
+ reconfigure libbaz (dependent of libbar)
+ disfigured libbaz/1.1.0
+ disfigured libbar/1.2.0
+ fetched libbar/1.0.0
+ unpacked libbar/1.0.0
+ configured libbar/1.0.0
+ configured libbaz/1.1.0
+ %info: .+libbaz-1.1.0.+ is up to date%
+ updated libbaz/1.1.0
+ EOE
+
+ $pkg_status -r libbaz >>EOO;
+ !libbaz configured !1.1.0
+ libbar configured !1.0.0 available 1.2.0
+ libfoo configured !1.0.0
+ libfoo configured !1.0.0
+ EOO
+
+ $pkg_drop libbaz
+ }
+
+ : replace
+ :
+ {
+ $clone_root_cfg;
+ $rep_add $rep/t2 && $rep_fetch;
+
+ $* $src/libbaz-1.1.0.tar.gz 2>!;
+
+ $* --plan "" "?$src/libfoo-1.0.0.tar.gz" 2>>~%EOE%;
+ replace/update libfoo/1.0.0
+ reconfigure libbar (dependent of libfoo)
+ reconfigure libbaz (dependent of libbar, libfoo)
+ disfigured libbaz/1.1.0
+ disfigured libbar/1.0.0
+ disfigured libfoo/1.0.0
+ fetched libfoo/1.0.0
+ unpacked libfoo/1.0.0
+ configured libfoo/1.0.0
+ configured libbar/1.0.0
+ configured libbaz/1.1.0
+ %info: .+libbar-1.0.0.+ is up to date%
+ %info: .+libbaz-1.1.0.+ is up to date%
+ updated libbar/1.0.0
+ updated libbaz/1.1.0
+ EOE
+
+ $pkg_status -r libbaz >>EOO;
+ !libbaz configured !1.1.0
+ libbar configured 1.0.0
+ libfoo configured !1.0.0
+ libfoo configured !1.0.0
+ EOO
+
+ $* --plan "" "?$src/libfoo-1.0.0.tar.gz" 2>>~%EOE%;
+ replace/update libfoo/1.0.0
+ reconfigure libbar (dependent of libfoo)
+ reconfigure libbaz (dependent of libbar, libfoo)
+ disfigured libbaz/1.1.0
+ disfigured libbar/1.0.0
+ disfigured libfoo/1.0.0
+ fetched libfoo/1.0.0
+ unpacked libfoo/1.0.0
+ configured libfoo/1.0.0
+ configured libbar/1.0.0
+ configured libbaz/1.1.0
+ %info: .+libbar-1.0.0.+ is up to date%
+ %info: .+libbaz-1.1.0.+ is up to date%
+ updated libbar/1.0.0
+ updated libbaz/1.1.0
+ EOE
+
+ $pkg_status -r libbaz >>EOO;
+ !libbaz configured !1.1.0
+ libbar configured 1.0.0
+ libfoo configured !1.0.0
+ libfoo configured !1.0.0
+ EOO
+
+ $* --plan "" "?$d/libfoo-1.0.0/" 2>>~%EOE%;
+ replace/upgrade libfoo/1.0.0#1
+ reconfigure libbar (dependent of libfoo)
+ reconfigure libbaz (dependent of libbar, libfoo)
+ disfigured libbaz/1.1.0
+ disfigured libbar/1.0.0
+ disfigured libfoo/1.0.0
+ using libfoo/1.0.0#1 (external)
+ configured libfoo/1.0.0#1
+ configured libbar/1.0.0
+ configured libbaz/1.1.0
+ %info: .+libbar-1.0.0.+ is up to date%
+ %info: .+libbaz-1.1.0.+ is up to date%
+ updated libbar/1.0.0
+ updated libbaz/1.1.0
+ EOE
+
+ $pkg_status -r libbaz >>EOO;
+ !libbaz configured !1.1.0
+ libbar configured 1.0.0
+ libfoo configured !1.0.0#1
+ libfoo configured !1.0.0#1
+ EOO
+
+ $pkg_drop libbaz
+ }
+
+ : deorphan-existing-archive
+ :
+ {
+ $clone_root_cfg;
+ $rep_add $rep/t2 && $rep_fetch;
+
+ $* $src/libbaz-1.1.0.tar.gz \
+ "?$src/libfoo-1.0.0.tar.gz" \
+ "?$src/libbar-1.0.0.tar.gz" 2>!;
+
+ $* --deorphan ?libfoo 2>>~%EOE%;
+ disfigured libbaz/1.1.0
+ disfigured libbar/1.0.0
+ disfigured libfoo/1.0.0
+ fetched libfoo/1.0.0
+ unpacked libfoo/1.0.0
+ configured libfoo/1.0.0
+ configured libbar/1.0.0
+ configured libbaz/1.1.0
+ %info: .+libbar-1.0.0.+ is up to date%
+ %info: .+libbaz-1.1.0.+ is up to date%
+ updated libbar/1.0.0
+ updated libbaz/1.1.0
+ EOE
+
+ $rep_add $rep/t3 && $rep_fetch;
+
+ $* --deorphan libbaz ?libbar 2>>~%EOE%;
+ disfigured libbaz/1.1.0
+ disfigured libbar/1.0.0
+ fetched libbar/1.0.0
+ unpacked libbar/1.0.0
+ fetched libbaz/1.0.0
+ unpacked libbaz/1.0.0
+ configured libbar/1.0.0
+ configured libbaz/1.0.0
+ %info: .+libbaz-1.0.0.+ is up to date%
+ updated libbaz/1.0.0
+ EOE
+
+ $pkg_drop libbaz
+ }
+
+ : deorphan-with-existing-archive
+ :
+ {
+ $clone_root_cfg;
+ $rep_add $rep/t5 && $rep_fetch;
+
+ $* $src/libbaz-1.1.0.tar.gz "?$src/libfoo-1.0.0.tar.gz" 2>!;
+
+ $pkg_status -r libbaz >>EOO;
+ !libbaz configured !1.1.0
+ libbar configured 1.2.0
+ libfoo configured !1.0.0
+ EOO
+
+ $rep_remove $rep/t5;
+
+ $* --deorphan "?$src/libbar-1.2.0.tar.gz" 2>>~%EOE%;
+ disfigured libbaz/1.1.0
+ disfigured libbar/1.2.0
+ fetched libbar/1.2.0
+ unpacked libbar/1.2.0
+ configured libbar/1.2.0
+ configured libbaz/1.1.0
+ %info: .+libbaz-1.1.0.+ is up to date%
+ updated libbaz/1.1.0
+ EOE
+
+ $pkg_status -r libbaz >>EOO;
+ !libbaz configured !1.1.0
+ libbar configured !1.2.0
+ libfoo configured !1.0.0
+ EOO
+
+ $* --deorphan "?$src/libbar-1.0.0.tar.gz" 2>>~%EOE%;
+ disfigured libbaz/1.1.0
+ disfigured libbar/1.2.0
+ fetched libbar/1.0.0
+ unpacked libbar/1.0.0
+ configured libbar/1.0.0
+ configured libbaz/1.1.0
+ %info: .+libbaz-1.1.0.+ is up to date%
+ updated libbaz/1.1.0
+ EOE
+
+ $pkg_status -r libbaz >>EOO;
+ !libbaz configured !1.1.0
+ libbar configured !1.0.0
+ libfoo configured !1.0.0
+ libfoo configured !1.0.0
+ EOO
+
+ $pkg_drop libbaz
+ }
+
+ : system
+ :
+ {
+ $clone_root_cfg;
+ $rep_add $rep/t3 && $rep_fetch;
+
+ $* libbaz '?sys:libbar' 2>!;
+
+ $* "?$src/libbar-1.1.0.tar.gz" "?$src/libfoo-1.1.0.tar.gz" 2>>~%EOE%;
+ disfigured libbaz/1.0.0
+ purged libbar/*
+ fetched libfoo/1.1.0
+ unpacked libfoo/1.1.0
+ fetched libbar/1.1.0
+ unpacked libbar/1.1.0
+ configured libfoo/1.1.0
+ configured libbar/1.1.0
+ configured libbaz/1.0.0
+ %info: .+libbaz-1.0.0.+ is up to date%
+ updated libbaz/1.0.0
+ EOE
+
+ $pkg_status -r libbaz >>EOO;
+ !libbaz configured 1.0.0
+ libbar configured !1.1.0
+ libfoo configured !1.1.0
+ EOO
+
+ $pkg_drop libbaz
+ }
+ }
+
+ : directory
+ :
+ {
+ : pick-directory
+ :
+ : Test that libbar/1.0.0 specified as a directory is picked as a
+ : dependency for libbaz, despite the fact the repository contains
+ : libbar/1.2.0.
+ :
+ {
+ $clone_root_cfg;
+ $rep_add $rep/t5 && $rep_fetch;
+
+ $* $d/libbaz-1.1.0/ \
+ "?$d/libfoo-1.0.0/" \
+ "?$d/libbar-1.0.0/" 2>>~%EOE%;
+ using libfoo/1.0.0 (external)
+ using libbar/1.0.0 (external)
+ using libbaz/1.1.0 (external)
+ configured libfoo/1.0.0
+ configured libbar/1.0.0
+ configured libbaz/1.1.0
+ %info: .+libbaz.+ is up to date%
+ updated libbaz/1.1.0
+ EOE
+
+ $pkg_status -r libbaz >>EOO;
+ !libbaz configured !1.1.0
+ libbar configured !1.0.0 available 1.2.0
+ libfoo configured !1.0.0
+ libfoo configured !1.0.0
+ EOO
+
+ $pkg_drop libbaz
+ }
+
+ : unsatisfactory
+ :
+ {
+ $clone_root_cfg;
+ $rep_add $rep/t4b && $rep_fetch;
+
+ $* libbar "?$d/libfoo-1.0.0/" 2>>EOE != 0
+ error: package libfoo doesn't satisfy its dependents
+ info: libfoo/1.0.0 doesn't satisfy libbar/1.1.0
+ EOE
+ }
+
+ : unsatisfactory-archive
+ :
+ : Same as above but the dependent is specified as a directory.
+ :
+ {
+ $clone_root_cfg;
+ $rep_add $rep/t4a && $rep_fetch;
+
+ $* $d/libbar-1.1.0/ "?$d/libfoo-1.0.0/" 2>>EOE != 0
+ error: package libfoo doesn't satisfy its dependents
+ info: libfoo/1.0.0 doesn't satisfy libbar/1.1.0
+ EOE
+ }
+
+ : dependency-alternative
+ :
+ : Note: by specifying an unsatisfactory dependency alternative as a
+ : directory we resolve the alternatives ambiguity here, building libbaz
+ : and skipping unused libbar as a result.
+ :
+ {
+ $clone_root_cfg;
+ $rep_add $rep/t8a && $rep_fetch;
+
+ $* fox 2>>EOE != 0;
+ error: unable to select dependency alternative for package fox/1.0.0
+ info: explicitly specify dependency packages to manually select the alternative
+ info: alternative: libbar
+ info: alternative: libbaz
+ info: while satisfying fox/1.0.0
+ EOE
+
+ $* fox "?$d/libbar-0.0.3/" 2>>~%EOE%;
+ fetched libbaz/1.1.0
+ unpacked libbaz/1.1.0
+ fetched fox/1.0.0
+ unpacked fox/1.0.0
+ configured libbaz/1.1.0
+ configured fox/1.0.0
+ %info: .+fox-1.0.0.+ is up to date%
+ updated fox/1.0.0
+ EOE
+
+ $pkg_status -r fox libbar >>EOO;
+ !fox configured 1.0.0
+ libbaz configured 1.1.0
+ libbar available 1.0.0
+ EOO
+
+ $pkg_drop fox
+ }
+
+ : upgrade
+ :
+ {
+ $clone_root_cfg;
+ $rep_add $rep/t4a $rep/t4b && $rep_fetch;
+
+ $* $d/libbaz-1.1.0/ \
+ "?$d/libfoo-1.0.0/" \
+ "?$d/libbar-1.0.0/" 2>!;
+
+ $pkg_status -r libbaz >>EOO;
+ !libbaz configured !1.1.0
+ libbar configured !1.0.0 available 1.1.0
+ libfoo configured !1.0.0 available 1.1.0
+ libfoo configured !1.0.0 available 1.1.0
+ EOO
+
+ $* --upgrade-recursive libbaz 2>>~%EOE%;
+ disfigured libbaz/1.1.0
+ disfigured libbar/1.0.0
+ disfigured libfoo/1.0.0
+ fetched libfoo/1.1.0
+ unpacked libfoo/1.1.0
+ fetched libbar/1.1.0
+ unpacked libbar/1.1.0
+ configured libfoo/1.1.0
+ configured libbar/1.1.0
+ configured libbaz/1.1.0
+ %info: .+libbaz.+ is up to date%
+ updated libbaz/1.1.0
+ EOE
+
+ $pkg_status -r libbaz >>EOO;
+ !libbaz configured 1.1.0
+ libbar configured !1.1.0
+ libfoo configured !1.1.0
+ libfoo configured !1.1.0
+ EOO
+
+ $pkg_drop libbaz
+ }
+
+ : downgrade
+ :
+ {
+ $clone_root_cfg;
+ $rep_add $rep/t5 && $rep_fetch;
+
+ $* $d/libbaz-1.1.0/ "?$d/libfoo-1.0.0/" 2>>~%EOE%;
+ fetched libbar/1.2.0
+ unpacked libbar/1.2.0
+ using libfoo/1.0.0 (external)
+ using libbaz/1.1.0 (external)
+ configured libbar/1.2.0
+ configured libfoo/1.0.0
+ configured libbaz/1.1.0
+ %info: .+libbaz.+ is up to date%
+ updated libbaz/1.1.0
+ EOE
+
+ $pkg_status -r libbaz >>EOO;
+ !libbaz configured !1.1.0
+ libbar configured 1.2.0
+ libfoo configured !1.0.0
+ EOO
+
+ $* --plan "" "?$d/libbar-1.0.0/" 2>>~%EOE%;
+ replace/downgrade libbar/1.0.0
+ reconfigure libbaz (dependent of libbar)
+ disfigured libbaz/1.1.0
+ disfigured libbar/1.2.0
+ using libbar/1.0.0 (external)
+ configured libbar/1.0.0
+ configured libbaz/1.1.0
+ %info: .+libbaz.+ is up to date%
+ updated libbaz/1.1.0
+ EOE
+
+ $pkg_status -r libbaz >>EOO;
+ !libbaz configured !1.1.0
+ libbar configured !1.0.0 available 1.2.0
+ libfoo configured !1.0.0
+ libfoo configured !1.0.0
+ EOO
+
+ $pkg_drop libbaz
+ }
+
+ : replace
+ :
+ {
+ $clone_root_cfg;
+ $rep_add $rep/t2 && $rep_fetch;
+
+ $* $d/libbaz-1.1.0/ 2>!;
+
+ $* --plan "" "?$d/libfoo-1.0.0/" 2>>~%EOE%;
+ replace/upgrade libfoo/1.0.0#1
+ reconfigure libbar (dependent of libfoo)
+ reconfigure libbaz (dependent of libbar, libfoo)
+ disfigured libbaz/1.1.0
+ disfigured libbar/1.0.0
+ disfigured libfoo/1.0.0
+ using libfoo/1.0.0#1 (external)
+ configured libfoo/1.0.0#1
+ configured libbar/1.0.0
+ configured libbaz/1.1.0
+ %info: .+libbar-1.0.0.+ is up to date%
+ %info: .+libbaz.+ is up to date%
+ updated libbar/1.0.0
+ updated libbaz/1.1.0
+ EOE
+
+ $pkg_status -r libbaz >>EOO;
+ !libbaz configured !1.1.0
+ libbar configured 1.0.0
+ libfoo configured !1.0.0#1
+ libfoo configured !1.0.0#1
+ EOO
+
+ $* --plan "" "?$d/libfoo-1.0.0/" 2>>~%EOE%;
+ replace/update libfoo/1.0.0#1
+ reconfigure libbar (dependent of libfoo)
+ reconfigure libbaz (dependent of libbar, libfoo)
+ disfigured libbaz/1.1.0
+ disfigured libbar/1.0.0
+ disfigured libfoo/1.0.0#1
+ using libfoo/1.0.0#1 (external)
+ configured libfoo/1.0.0#1
+ configured libbar/1.0.0
+ configured libbaz/1.1.0
+ %info: .+libbar-1.0.0.+ is up to date%
+ %info: .+libbaz.+ is up to date%
+ updated libbar/1.0.0
+ updated libbaz/1.1.0
+ EOE
+
+ $pkg_status -r libbaz >>EOO;
+ !libbaz configured !1.1.0
+ libbar configured 1.0.0
+ libfoo configured !1.0.0#1
+ libfoo configured !1.0.0#1
+ EOO
+
+ $* --plan "" "?$src/libfoo-1.0.0.tar.gz" 2>>~%EOE%;
+ replace/downgrade libfoo/1.0.0
+ reconfigure libbar (dependent of libfoo)
+ reconfigure libbaz (dependent of libbar, libfoo)
+ disfigured libbaz/1.1.0
+ disfigured libbar/1.0.0
+ disfigured libfoo/1.0.0#1
+ fetched libfoo/1.0.0
+ unpacked libfoo/1.0.0
+ configured libfoo/1.0.0
+ configured libbar/1.0.0
+ configured libbaz/1.1.0
+ %info: .+libbar-1.0.0.+ is up to date%
+ %info: .+libbaz.+ is up to date%
+ updated libbar/1.0.0
+ updated libbaz/1.1.0
+ EOE
+
+ $pkg_status -r libbaz >>EOO;
+ !libbaz configured !1.1.0
+ libbar configured 1.0.0
+ libfoo configured !1.0.0
+ libfoo configured !1.0.0
+ EOO
+
+ $pkg_drop libbaz
+ }
+
+ : deorphan-existing-directory
+ :
+ {
+ $clone_root_cfg;
+ $rep_add $rep/t2 && $rep_fetch;
+
+ $* $d/libbaz-1.1.0/ \
+ "?$d/libfoo-1.0.0/" \
+ "?$d/libbar-1.0.0/" 2>!;
+
+ $* --deorphan ?libfoo 2>>~%EOE%;
+ disfigured libbaz/1.1.0
+ disfigured libbar/1.0.0
+ disfigured libfoo/1.0.0
+ fetched libfoo/1.0.0
+ unpacked libfoo/1.0.0
+ configured libfoo/1.0.0
+ configured libbar/1.0.0
+ configured libbaz/1.1.0
+ %info: .+libbar.+ is up to date%
+ %info: .+libbaz.+ is up to date%
+ updated libbar/1.0.0
+ updated libbaz/1.1.0
+ EOE
+
+ $rep_add $rep/t3 && $rep_fetch;
+
+ $* --deorphan libbaz ?libbar 2>>~%EOE%;
+ disfigured libbaz/1.1.0
+ disfigured libbar/1.0.0
+ fetched libbar/1.0.0
+ unpacked libbar/1.0.0
+ fetched libbaz/1.0.0
+ unpacked libbaz/1.0.0
+ configured libbar/1.0.0
+ configured libbaz/1.0.0
+ %info: .+libbaz-1.0.0.+ is up to date%
+ updated libbaz/1.0.0
+ EOE
+
+ $pkg_drop libbaz
+ }
+
+ : deorphan-with-existing-directory
+ :
+ {
+ $clone_root_cfg;
+ $rep_add $rep/t5 && $rep_fetch;
+
+ $* $d/libbaz-1.1.0/ "?$d/libfoo-1.0.0/" 2>!;
+
+ $pkg_status -r libbaz >>EOO;
+ !libbaz configured !1.1.0
+ libbar configured 1.2.0
+ libfoo configured !1.0.0
+ EOO
+
+ $rep_remove $rep/t5;
+
+ $* --deorphan "?$d/libbar-1.2.0/" 2>>~%EOE%;
+ disfigured libbaz/1.1.0
+ disfigured libbar/1.2.0
+ using libbar/1.2.0#1 (external)
+ configured libbar/1.2.0#1
+ configured libbaz/1.1.0
+ %info: .+libbaz.+ is up to date%
+ updated libbaz/1.1.0
+ EOE
+
+ $pkg_status -r libbaz >>EOO;
+ !libbaz configured !1.1.0
+ libbar configured !1.2.0#1
+ libfoo configured !1.0.0
+ EOO
+
+ # Suppress the 'dropping no longer used variable config.bin.exe.lib'
+ # and alike warnings.
+ #
+ rm cfg/libbar/build/config.build;
+
+ $* --deorphan "?$d/libbar-1.0.0/" 2>>~%EOE%;
+ disfigured libbaz/1.1.0
+ disfigured libbar/1.2.0#1
+ using libbar/1.0.0 (external)
+ configured libbar/1.0.0
+ configured libbaz/1.1.0
+ %info: .+libbaz.+ is up to date%
+ updated libbaz/1.1.0
+ EOE
+
+ $pkg_status -r libbaz >>EOO;
+ !libbaz configured !1.1.0
+ libbar configured !1.0.0
+ libfoo configured !1.0.0
+ libfoo configured !1.0.0
+ EOO
+
+ $pkg_drop libbaz
+ }
+
+ : system
+ :
+ {
+ $clone_root_cfg;
+ $rep_add $rep/t3 && $rep_fetch;
+
+ $* libbaz '?sys:libbar' 2>!;
+
+ $* "?$d/libbar-1.1.0/" "?$d/libfoo-1.1.0/" 2>>~%EOE%;
+ disfigured libbaz/1.0.0
+ purged libbar/*
+ using libfoo/1.1.0 (external)
+ using libbar/1.1.0 (external)
+ configured libfoo/1.1.0
+ configured libbar/1.1.0
+ configured libbaz/1.0.0
+ %info: .+libbaz-1.0.0.+ is up to date%
+ updated libbaz/1.0.0
+ EOE
+
+ $pkg_status -r libbaz >>EOO;
+ !libbaz configured 1.0.0
+ libbar configured !1.1.0
+ libfoo configured !1.1.0
+ EOO
+
+ $pkg_drop libbaz
+ }
+ }
}
}
diff --git a/tests/pkg-build/libbar-0.0.3.tar.gz b/tests/pkg-build/libbar-0.0.3.tar.gz
new file mode 120000
index 0000000..308e978
--- /dev/null
+++ b/tests/pkg-build/libbar-0.0.3.tar.gz
@@ -0,0 +1 @@
+../common/satisfy/libbar-0.0.3.tar.gz \ No newline at end of file
diff --git a/tests/pkg-build/libbar-1.1.0.tar.gz b/tests/pkg-build/libbar-1.1.0.tar.gz
new file mode 120000
index 0000000..12ae746
--- /dev/null
+++ b/tests/pkg-build/libbar-1.1.0.tar.gz
@@ -0,0 +1 @@
+../common/satisfy/libbar-1.1.0.tar.gz \ No newline at end of file
diff --git a/tests/pkg-build/libbar-1.2.0.tar.gz b/tests/pkg-build/libbar-1.2.0.tar.gz
new file mode 120000
index 0000000..3e4eff9
--- /dev/null
+++ b/tests/pkg-build/libbar-1.2.0.tar.gz
@@ -0,0 +1 @@
+../common/satisfy/libbar-1.2.0.tar.gz \ No newline at end of file
diff --git a/tests/pkg-build/t10 b/tests/pkg-build/t10
new file mode 120000
index 0000000..0208f77
--- /dev/null
+++ b/tests/pkg-build/t10
@@ -0,0 +1 @@
+../common/satisfy/t10 \ No newline at end of file
diff --git a/tests/pkg-build/t11a b/tests/pkg-build/t11a
new file mode 120000
index 0000000..4f78412
--- /dev/null
+++ b/tests/pkg-build/t11a
@@ -0,0 +1 @@
+../common/dependency-alternatives/t11a \ No newline at end of file
diff --git a/tests/pkg-build/t12a b/tests/pkg-build/t12a
new file mode 120000
index 0000000..d421f92
--- /dev/null
+++ b/tests/pkg-build/t12a
@@ -0,0 +1 @@
+../common/satisfy/t12a/ \ No newline at end of file
diff --git a/tests/pkg-build/t12b b/tests/pkg-build/t12b
new file mode 120000
index 0000000..0fbba8a
--- /dev/null
+++ b/tests/pkg-build/t12b
@@ -0,0 +1 @@
+../common/satisfy/t12b/ \ No newline at end of file
diff --git a/tests/pkg-build/t13a b/tests/pkg-build/t13a
new file mode 120000
index 0000000..9d8fb23
--- /dev/null
+++ b/tests/pkg-build/t13a
@@ -0,0 +1 @@
+../common/dependency-alternatives/t13a/ \ No newline at end of file
diff --git a/tests/pkg-build/t13b b/tests/pkg-build/t13b
new file mode 120000
index 0000000..d17701b
--- /dev/null
+++ b/tests/pkg-build/t13b
@@ -0,0 +1 @@
+../common/dependency-alternatives/t13b/ \ No newline at end of file
diff --git a/tests/pkg-build/t13c b/tests/pkg-build/t13c
new file mode 120000
index 0000000..1c534d0
--- /dev/null
+++ b/tests/pkg-build/t13c
@@ -0,0 +1 @@
+../common/dependency-alternatives/t13c/ \ No newline at end of file
diff --git a/tests/pkg-build/t13d b/tests/pkg-build/t13d
new file mode 120000
index 0000000..6933497
--- /dev/null
+++ b/tests/pkg-build/t13d
@@ -0,0 +1 @@
+../common/dependency-alternatives/t13d/ \ No newline at end of file
diff --git a/tests/pkg-build/t13e b/tests/pkg-build/t13e
new file mode 120000
index 0000000..d8d84cc
--- /dev/null
+++ b/tests/pkg-build/t13e
@@ -0,0 +1 @@
+../common/dependency-alternatives/t13e/ \ No newline at end of file
diff --git a/tests/pkg-build/t13f b/tests/pkg-build/t13f
new file mode 120000
index 0000000..bf556bc
--- /dev/null
+++ b/tests/pkg-build/t13f
@@ -0,0 +1 @@
+../common/dependency-alternatives/t13f/ \ No newline at end of file
diff --git a/tests/pkg-build/t13g b/tests/pkg-build/t13g
new file mode 120000
index 0000000..4dc8eb4
--- /dev/null
+++ b/tests/pkg-build/t13g
@@ -0,0 +1 @@
+../common/dependency-alternatives/t13g/ \ No newline at end of file
diff --git a/tests/pkg-build/t13h b/tests/pkg-build/t13h
new file mode 120000
index 0000000..f99413a
--- /dev/null
+++ b/tests/pkg-build/t13h
@@ -0,0 +1 @@
+../common/dependency-alternatives/t13h/ \ No newline at end of file
diff --git a/tests/pkg-build/t13i b/tests/pkg-build/t13i
new file mode 120000
index 0000000..bba4fd3
--- /dev/null
+++ b/tests/pkg-build/t13i
@@ -0,0 +1 @@
+../common/dependency-alternatives/t13i/ \ No newline at end of file
diff --git a/tests/pkg-build/t13j b/tests/pkg-build/t13j
new file mode 120000
index 0000000..da120da
--- /dev/null
+++ b/tests/pkg-build/t13j
@@ -0,0 +1 @@
+../common/dependency-alternatives/t13j/ \ No newline at end of file
diff --git a/tests/pkg-build/t13k b/tests/pkg-build/t13k
new file mode 120000
index 0000000..b1e5a14
--- /dev/null
+++ b/tests/pkg-build/t13k
@@ -0,0 +1 @@
+../common/dependency-alternatives/t13k/ \ No newline at end of file
diff --git a/tests/pkg-build/t13l b/tests/pkg-build/t13l
new file mode 120000
index 0000000..40d9561
--- /dev/null
+++ b/tests/pkg-build/t13l
@@ -0,0 +1 @@
+../common/dependency-alternatives/t13l/ \ No newline at end of file
diff --git a/tests/pkg-build/t13m b/tests/pkg-build/t13m
new file mode 120000
index 0000000..0154455
--- /dev/null
+++ b/tests/pkg-build/t13m
@@ -0,0 +1 @@
+../common/dependency-alternatives/t13m/ \ No newline at end of file
diff --git a/tests/pkg-build/t13n b/tests/pkg-build/t13n
new file mode 120000
index 0000000..1ed57ca
--- /dev/null
+++ b/tests/pkg-build/t13n
@@ -0,0 +1 @@
+../common/dependency-alternatives/t13n/ \ No newline at end of file
diff --git a/tests/pkg-build/t13o b/tests/pkg-build/t13o
new file mode 120000
index 0000000..9516f8f
--- /dev/null
+++ b/tests/pkg-build/t13o
@@ -0,0 +1 @@
+../common/dependency-alternatives/t13o/ \ No newline at end of file
diff --git a/tests/pkg-build/t14a b/tests/pkg-build/t14a
new file mode 120000
index 0000000..34b7111
--- /dev/null
+++ b/tests/pkg-build/t14a
@@ -0,0 +1 @@
+../common/satisfy/t14a \ No newline at end of file
diff --git a/tests/pkg-build/t14b b/tests/pkg-build/t14b
new file mode 120000
index 0000000..eeff0af
--- /dev/null
+++ b/tests/pkg-build/t14b
@@ -0,0 +1 @@
+../common/satisfy/t14b \ No newline at end of file
diff --git a/tests/pkg-build/t14c b/tests/pkg-build/t14c
new file mode 120000
index 0000000..01ab194
--- /dev/null
+++ b/tests/pkg-build/t14c
@@ -0,0 +1 @@
+../common/satisfy/t14c \ No newline at end of file
diff --git a/tests/pkg-build/t14d b/tests/pkg-build/t14d
new file mode 120000
index 0000000..463084d
--- /dev/null
+++ b/tests/pkg-build/t14d
@@ -0,0 +1 @@
+../common/satisfy/t14d \ No newline at end of file
diff --git a/tests/pkg-build/t14e b/tests/pkg-build/t14e
new file mode 120000
index 0000000..a9f72b7
--- /dev/null
+++ b/tests/pkg-build/t14e
@@ -0,0 +1 @@
+../common/satisfy/t14e \ No newline at end of file
diff --git a/tests/pkg-build/t14f b/tests/pkg-build/t14f
new file mode 120000
index 0000000..94c4598
--- /dev/null
+++ b/tests/pkg-build/t14f
@@ -0,0 +1 @@
+../common/satisfy/t14f \ No newline at end of file
diff --git a/tests/pkg-build/t14i b/tests/pkg-build/t14i
new file mode 120000
index 0000000..bcc36b2
--- /dev/null
+++ b/tests/pkg-build/t14i
@@ -0,0 +1 @@
+../common/satisfy/t14i \ No newline at end of file
diff --git a/tests/pkg-build/t15 b/tests/pkg-build/t15
new file mode 120000
index 0000000..c7ad857
--- /dev/null
+++ b/tests/pkg-build/t15
@@ -0,0 +1 @@
+../common/compatibility/t15 \ No newline at end of file
diff --git a/tests/pkg-build/t4f b/tests/pkg-build/t4f
new file mode 120000
index 0000000..00f2c86
--- /dev/null
+++ b/tests/pkg-build/t4f
@@ -0,0 +1 @@
+../common/satisfy/t4f \ No newline at end of file
diff --git a/tests/pkg-build/t4i b/tests/pkg-build/t4i
new file mode 120000
index 0000000..41e500e
--- /dev/null
+++ b/tests/pkg-build/t4i
@@ -0,0 +1 @@
+../common/satisfy/t4i \ No newline at end of file
diff --git a/tests/pkg-build/t4j b/tests/pkg-build/t4j
new file mode 120000
index 0000000..3e18229
--- /dev/null
+++ b/tests/pkg-build/t4j
@@ -0,0 +1 @@
+../common/satisfy/t4j \ No newline at end of file
diff --git a/tests/pkg-build/t4k b/tests/pkg-build/t4k
new file mode 120000
index 0000000..a4fa90c
--- /dev/null
+++ b/tests/pkg-build/t4k
@@ -0,0 +1 @@
+../common/satisfy/t4k \ No newline at end of file
diff --git a/tests/pkg-build/t7a b/tests/pkg-build/t7a
new file mode 120000
index 0000000..d02b5d4
--- /dev/null
+++ b/tests/pkg-build/t7a
@@ -0,0 +1 @@
+../common/linked/t7a \ No newline at end of file
diff --git a/tests/pkg-build/t7b b/tests/pkg-build/t7b
new file mode 120000
index 0000000..808039d
--- /dev/null
+++ b/tests/pkg-build/t7b
@@ -0,0 +1 @@
+../common/linked/t7b \ No newline at end of file
diff --git a/tests/pkg-build/t8a b/tests/pkg-build/t8a
new file mode 120000
index 0000000..8fa2bda
--- /dev/null
+++ b/tests/pkg-build/t8a
@@ -0,0 +1 @@
+../common/dependency-alternatives/t8a/ \ No newline at end of file
diff --git a/tests/pkg-build/t9 b/tests/pkg-build/t9
new file mode 120000
index 0000000..c8c9d9e
--- /dev/null
+++ b/tests/pkg-build/t9
@@ -0,0 +1 @@
+../common/satisfy/t9 \ No newline at end of file
diff --git a/tests/pkg-checkout.testscript b/tests/pkg-checkout.testscript
index 8f3ff92..85fe5e3 100644
--- a/tests/pkg-checkout.testscript
+++ b/tests/pkg-checkout.testscript
@@ -72,7 +72,9 @@ else
$pkg_status style-basic | sed -n -e 's/style-basic available \[.+\] ([^ ]+)/\1/p' | set v;
- $* "style-basic/$v" 2>>"EOE";
+ $* "style-basic/$v" 2>>~"%EOE%";
+ verifying symlinks...
+ %fixing up symlinks...%?
distributing style-basic/$v
checked out style-basic/$v
EOE
@@ -151,6 +153,8 @@ else
$* links/0.0.1 2>>~%EOE%;
checking out links/0.0.1
+ verifying symlinks...
+ %fixing up symlinks...%?
distributing links/0.0.1
checked out links/0.0.1
EOE
@@ -170,17 +174,25 @@ else
#
$rep_fetch "$rep/links.git#v1.0.1";
+ # Note that on POSIX the repository is restored in its permanent location,
+ # since the operation fails in the distribution phase. This is in contrast
+ # to Windows where the repository is lost, since the operation fails in
+ # the fix-up phase.
+ #
if $posix
$* links/1.0.1 2>>~%EOE% != 0
checking out links/1.0.1
+ verifying symlinks...
+ %fixing up symlinks...%?
distributing links/1.0.1
+ %warning: skipping dangling symlink .+%
%error: unable to stat .+%
- warning: repository state is now broken
- info: run 'bpkg rep-fetch' to repair
EOE
else
$* links/1.0.1 2>>~%EOE% != 0
checking out links/1.0.1
+ verifying symlinks...
+ %fixing up symlinks...%?
error: target 'bl' for symlink 'lc' does not exist
info: re-run with -v for more information
warning: repository state is now broken
@@ -190,14 +202,14 @@ else
# Cyclic symlinks in the repository.
#
+ $rep_fetch "$rep/links.git#v1.0.2";
+
if $posix
- $rep_fetch "$rep/links.git#v1.0.2" 2>>~%EOE% != 0
+ $* links/1.0.2 2>>~%EOE% != 0
%.*
%error: unable to iterate over .+%
EOE
else
- $rep_fetch "$rep/links.git#v1.0.2"
-
$* links/1.0.2 2>>~%EOE% != 0
checking out links/1.0.2
%.*
diff --git a/tests/pkg-clean.testscript b/tests/pkg-clean.testscript
index 7e04425..2922f51 100644
--- a/tests/pkg-clean.testscript
+++ b/tests/pkg-clean.testscript
@@ -121,7 +121,7 @@ $* 2>>EOE != 0
$pkg_configure libhello && $pkg_update libhello;
$* libhello 2>>~%EOE%;
- %rm .+%{8}
+ %(rm|rmdir) .+%{8}
cleaned libhello/1.0.0
EOE
diff --git a/tests/pkg-configure.testscript b/tests/pkg-configure.testscript
index 5a7d8aa..8430cec 100644
--- a/tests/pkg-configure.testscript
+++ b/tests/pkg-configure.testscript
@@ -35,6 +35,9 @@
# | | |-- driver.cxx
# | | `-- test.out
# | `-- version
+# |
+# |-- t8a (see pkg-build for details)
+# |
# `-- stable
# |-- libbar-1.0.0.tar.gz -> libfoo
# |-- libbar-1.1.0.tar.gz -> libfoo >= 1.1.0
@@ -63,9 +66,11 @@
#
cp -r $src/stable $out/stable
$rep_create $out/stable &$out/stable/packages.manifest
+
+ cp -r $src/t8a $out/t8a && $rep_create $out/t8a &$out/t8a/packages.manifest
end
-test.arguments += config.cxx=$quote($recall($cxx.path) $cxx.config.mode, true)
+test.arguments += config.cxx=$quote($recall($cxx.path) $cxx.config.mode)
pkg_disfigure += -d cfg
pkg_fetch += -d cfg 2>!
@@ -271,6 +276,11 @@ if ($posix && "$uid" != '0')
: dependency-management
:
{
+ # Remove the config.cxx variable override to avoid the 'dropping no longer
+ # used variable' warning.
+ #
+ test.arguments = $regex.filter_out_match($test.arguments, 'config.cxx=.*')
+
+$clone_cfg && $rep_add $rep/stable && $rep_fetch --trust-yes
: still-has-deps
@@ -280,7 +290,7 @@ if ($posix && "$uid" != '0')
$pkg_fetch libbar/1.0.0 && $pkg_unpack libbar;
$* libbar 2>>EOE != 0;
- error: no configured package satisfies dependency on libfoo
+ error: unable to satisfy dependency on libfoo
EOE
$pkg_status libbar/1.0.0 1>'libbar unpacked 1.0.0';
@@ -288,7 +298,7 @@ if ($posix && "$uid" != '0')
$pkg_unpack libfoo;
$* libbar 2>>EOE != 0;
- error: no configured package satisfies dependency on libfoo
+ error: unable to satisfy dependency on libfoo
EOE
$* libfoo 2>'configured libfoo/1.0.0';
@@ -317,7 +327,7 @@ if ($posix && "$uid" != '0')
$pkg_unpack libbar;
$* libbar 2>>EOE != 0;
- error: no configured package satisfies dependency on libfoo >= 1.1.0
+ error: unable to satisfy dependency on libfoo >= 1.1.0
EOE
$pkg_disfigure libfoo 2>'disfigured libfoo/1.0.0';
@@ -344,7 +354,7 @@ if ($posix && "$uid" != '0')
$pkg_unpack libbar;
$* libbar 2>>EOE != 0;
- error: no configured package satisfies dependency on libfox | libfoo >= 1.2.0
+ error: unable to satisfy dependency on libfox | libfoo >= 1.2.0
EOE
$pkg_disfigure libfoo 2>'disfigured libfoo/1.1.0';
@@ -411,3 +421,68 @@ if ($posix && "$uid" != '0')
test -d cfg/libhello != 0
}
}
+
+: dependency-alternatives
+:
+{
+ # Remove the config.cxx variable override to avoid the 'dropping no longer
+ # used variable' warning.
+ #
+ test.arguments = $regex.filter_out_match($test.arguments, 'config.cxx=.*')
+
+ +$clone_root_cfg && $rep_add $rep/t8a && $rep_fetch --trust-yes
+
+ : multiple-dependencies
+ :
+ {
+ $clone_cfg;
+
+ $pkg_fetch foo/1.0.0 && $pkg_unpack foo;
+
+ $pkg_fetch libbar/1.0.0 && $pkg_unpack libbar;
+ $* libbar 2>!;
+
+ # Make sure that dependent configuration fails if some of the alternative
+ # dependencies is not configured.
+ #
+ $* foo 2>>EOE != 0;
+ error: unable to satisfy dependency on {libbar ^1.0.0 libbaz ^1.0.0}
+ EOE
+
+ $pkg_fetch libbaz/1.0.0 && $pkg_unpack libbaz;
+ $* libbaz 2>!;
+
+ $* foo 2>'configured foo/1.0.0';
+
+ $pkg_disfigure foo 2>!;
+ $pkg_purge foo 2>!;
+ $pkg_disfigure libbaz 2>!;
+ $pkg_purge libbaz 2>!;
+ $pkg_disfigure libbar 2>!;
+ $pkg_purge libbar 2>!
+ }
+
+ : reflect
+ :
+ {
+ $clone_cfg;
+
+ $pkg_fetch fox/1.0.0 && $pkg_unpack fox;
+ $pkg_fetch libbaz/1.0.0 && $pkg_unpack libbaz;
+
+ $* libbaz 2>!;
+
+ $* fox 2>'configured fox/1.0.0';
+
+ cat cfg/fox-1.0.0/build/config.build >>~%EOO%;
+ %.*
+ config.fox.backend = libbaz
+ %.*
+ EOO
+
+ $pkg_disfigure fox 2>!;
+ $pkg_purge fox 2>!;
+ $pkg_disfigure libbaz 2>!;
+ $pkg_purge libbaz 2>!
+ }
+}
diff --git a/tests/pkg-configure/t8a b/tests/pkg-configure/t8a
new file mode 120000
index 0000000..8fa2bda
--- /dev/null
+++ b/tests/pkg-configure/t8a
@@ -0,0 +1 @@
+../common/dependency-alternatives/t8a/ \ No newline at end of file
diff --git a/tests/pkg-drop.testscript b/tests/pkg-drop.testscript
index db9cf7c..a3c48ab 100644
--- a/tests/pkg-drop.testscript
+++ b/tests/pkg-drop.testscript
@@ -3,23 +3,14 @@
.include common.testscript config.testscript remote.testscript
-# Source repository:
+# Source repository (see pkg-build for details):
#
# pkg-drop
# |-- t4a
-# | |-- libfoo-1.1.0.tar.gz
-# | `-- repositories.manifest
-# |-- t4b -> t4a (prerequisite repository)
-# | |-- libbar-1.1.0.tar.gz -> libfoo == 1.1.0
-# | `-- repositories.manifest
-# |-- t4c -> t4b (prerequisite repository)
-# | |-- libbaz-1.1.0.tar.gz -> libfoo, libbar
-# | |-- libfoo-1.0.0.tar.gz
-# | `-- repositories.manifest
-# `-- t4d -> t4c (complement)
-# |-- libbiz-1.0.0.tar.gz -> libfox, libfoo, libbaz
-# |-- libfox-1.0.0.tar.gz
-# `-- repositories.manifest
+# |-- t4b
+# |-- t4c
+# |-- t4d
+# `-- t7a
# Prepare repositories used by tests if running in the local mode.
#
@@ -30,8 +21,11 @@
cp -r $src/t4b $out/t4b && $rep_create $out/t4b &$out/t4b/packages.manifest
cp -r $src/t4c $out/t4c && $rep_create $out/t4c &$out/t4c/packages.manifest
cp -r $src/t4d $out/t4d && $rep_create $out/t4d &$out/t4d/packages.manifest
+ cp -r $src/t7a $out/t7a && $rep_create $out/t7a &$out/t7a/packages.manifest
end
+cfg_create += 2>!
+cfg_link += 2>!
pkg_build += -d cfg --yes 2>!
pkg_status += -d cfg
rep_add += -d cfg 2>!
@@ -45,6 +39,22 @@ $* 2>>EOE != 0
info: run 'bpkg help pkg-drop' for more information
EOE
+: all-all-pattern
+:
+$clone_cfg;
+$* --all --all-pattern 'lib*' 2>>EOE != 0
+ error: both --all|-a and --all-pattern specified
+ info: run 'bpkg help pkg-drop' for more information
+ EOE
+
+: all-pattern-name
+:
+$clone_cfg;
+$* --all-pattern 'lib*' libbaz 2>>EOE != 0
+ error: both --all-pattern and package argument specified
+ info: run 'bpkg help pkg-drop' for more information
+ EOE
+
: unknown-package
:
$clone_cfg;
@@ -376,12 +386,13 @@ $* libfoo/1.0.0 2>>~%EOE% != 0
: keep-drop-options
:
-: Test --drop-dependent, --keep-dependent, --keep-unused, option.
+: Test --drop-dependent, --keep-dependent, --dependent-exit, --keep-unused
+: options.
:
{
+$clone_cfg && $rep_add $rep/t4b && $rep_fetch
- : keep-drop-dependent
+ : keep-exit-drop-dependent
:
{
$clone_cfg && $pkg_build libbar;
@@ -391,6 +402,8 @@ $* libfoo/1.0.0 2>>~%EOE% != 0
libbar (requires libfoo)
EOE
+ $* --dependent-exit 100 libfoo == 100;
+
$* --drop-dependent libfoo 2>>EOE
disfigured libbar
disfigured libfoo
@@ -444,3 +457,377 @@ $* libfoo/1.0.0 2>>~%EOE% != 0
$* libfoo 2>'purged libfoo'
}
+
+: linked-configs
+:
+{
+ # Get rid of -d option.
+ #
+ pkg_build = [cmdline] $0 pkg-build --yes --sys-no-query 2>!
+
+ : 3-configs
+ :
+ {
+ +$clone_root_cfg && $rep_add $rep/t4c && $rep_fetch
+
+ +$cfg_create -d cfg-bar &cfg-bar/***
+ +$cfg_create -d cfg-foo &cfg-foo/***
+
+ +$cfg_link -d cfg cfg-bar
+ +$cfg_link -d cfg-bar cfg-foo
+
+ : baz
+ :
+ {
+ $clone_cfg;
+ cp -pr ../cfg-bar ./;
+ cp -pr ../cfg-foo ./;
+
+ $pkg_build -d cfg-bar libbar@"$rep/t4b" ?libfoo +{ --config-id 2 } \
+ --trust-yes;
+
+ $pkg_build -d cfg libbaz;
+
+ $pkg_build -d cfg '?libbar' +{ --config-id 1 };
+
+ $* libbaz <<EOI 2>>/~%EOE%
+ y
+ y
+ EOI
+ following dependencies were automatically built but will no longer be used:
+ libbar [cfg-bar/]
+ libfoo [cfg-foo/]
+ %drop unused packages\? \[Y.n\] drop libbaz%
+ drop libbar [cfg-bar/]
+ drop libfoo [cfg-foo/]
+ %continue\? \[Y.n\] disfigured libbaz%
+ disfigured libbar [cfg-bar/]
+ disfigured libfoo [cfg-foo/]
+ purged libbaz
+ purged libbar [cfg-bar/]
+ purged libfoo [cfg-foo/]
+ EOE
+ }
+
+ : foo
+ :
+ {
+ $clone_cfg;
+ cp -pr ../cfg-bar ./;
+ cp -pr ../cfg-foo ./;
+
+ $pkg_build -d cfg-bar libbar@"$rep/t4b" ?libfoo +{ --config-id 2 } \
+ --trust-yes;
+
+ $pkg_build -d cfg libbaz;
+
+ # Make sure that dependents of a package being dropped can be found in
+ # implicitly linked configurations recursively. Note that configuring
+ # libbar as system, we make libbaz an only dependent of libfoo.
+ #
+ $pkg_build -d cfg '?sys:libbar' +{ --config-id 1 };
+
+ $pkg_status -r libbaz >>/EOO;
+ !libbaz configured 1.1.0
+ libbar [cfg-bar/] configured,system !* available [1.1.0]
+ libfoo [cfg-foo/] configured 1.1.0
+ EOO
+
+ $pkg_status -d cfg-bar -r libbar >>EOO;
+ libbar configured,system !* available 1.1.0
+ EOO
+
+ $pkg_status -d cfg-foo libfoo >'libfoo configured 1.1.0';
+
+ $* -d cfg-foo libfoo <<EOI 2>>/~%EOE%;
+ y
+ y
+ y
+ EOI
+ following dependent packages will have to be dropped as well:
+ libbaz [cfg/] (requires libfoo)
+ %drop dependent packages\? \[y.N\] following dependencies were automatically built but will no longer be used:%
+ sys:libbar [cfg-bar/]
+ %drop unused packages\? \[Y.n\] drop libbaz \[cfg/\]%
+ drop libfoo
+ drop libbar [cfg-bar/]
+ %continue\? \[Y.n\] disfigured libbaz \[cfg/\]%
+ disfigured libfoo
+ purged libbar [cfg-bar/]
+ purged libbaz [cfg/]
+ purged libfoo
+ EOE
+
+ $pkg_status libbaz >'libbaz available 1.1.0';
+ $pkg_status -d cfg-bar libbar >'libbar available 1.1.0';
+ $pkg_status -d cfg-foo libfoo >'libfoo unknown'
+ }
+
+ : bar
+ :
+ {
+ $clone_cfg;
+ cp -pr ../cfg-bar ./;
+ cp -pr ../cfg-foo ./;
+
+ # Test that if we turn implicit links into explicit, then all dependents
+ # are still discovered.
+ #
+ $cfg_link -d cfg-bar cfg;
+ $cfg_link -d cfg-foo cfg-bar;
+
+ $pkg_build -d cfg-bar libbar@"$rep/t4b" ?libfoo +{ --config-id 2 } \
+ --trust-yes;
+
+ $pkg_build -d cfg libbaz;
+
+ $pkg_status -r libbaz >>/EOO;
+ !libbaz configured 1.1.0
+ !libbar [cfg-bar/] configured !1.1.0
+ libfoo [cfg-foo/] configured 1.1.0
+ libfoo [cfg-foo/] configured 1.1.0
+ EOO
+
+ $pkg_status -d cfg-bar -r libbar >>/EOO;
+ !libbar configured !1.1.0
+ libfoo [cfg-foo/] configured 1.1.0
+ EOO
+
+ $pkg_status -d cfg-foo libfoo >'libfoo configured 1.1.0';
+
+ $* -d cfg-bar libbar <<EOI 2>>/~%EOE%;
+ y
+ y
+ y
+ EOI
+ following dependent packages will have to be dropped as well:
+ libbaz [cfg/] (requires libbar)
+ %drop dependent packages\? \[y.N\] following dependencies were automatically built but will no longer be used:%
+ libfoo [cfg-foo/]
+ %drop unused packages\? \[Y.n\] drop libbaz \[cfg/\]%
+ drop libbar
+ drop libfoo [cfg-foo/]
+ %continue\? \[Y.n\] disfigured libbaz \[cfg/\]%
+ disfigured libbar
+ disfigured libfoo [cfg-foo/]
+ purged libbaz [cfg/]
+ purged libbar
+ purged libfoo [cfg-foo/]
+ EOE
+
+ $pkg_status libbaz >'libbaz available 1.1.0';
+ $pkg_status -d cfg-bar libbar >'libbar available 1.1.0';
+ $pkg_status -d cfg-foo libfoo >'libfoo unknown'
+ }
+ }
+}
+
+: buildtime-dep
+:
+{
+ +$clone_cfg && $rep_add $rep/t7a && $rep_fetch
+ +$cfg_create -d cfg2 --type host &cfg2/***
+ +$cfg_link -d cfg cfg2
+
+ : drop-dependent
+ :
+ {
+ $clone_cfg;
+ cp -pr ../cfg2 ./;
+
+ $pkg_build libbar --yes &cfg2/.bpkg/build2/***;
+
+ $* libbar <<EOI 2>>/~%EOE%;
+ y
+ y
+ EOI
+ following dependencies were automatically built but will no longer be used:
+ foo [cfg2/]
+ libbaz [cfg2/]
+ libbuild2-bar [cfg2/.bpkg/build2/]
+ libbaz
+ %drop unused packages\? \[Y.n\] drop libbar%
+ drop foo [cfg2/]
+ drop libbaz [cfg2/]
+ drop libbuild2-bar [cfg2/.bpkg/build2/]
+ drop libbaz
+ %continue\? \[Y.n\] disfigured libbar%
+ disfigured foo [cfg2/]
+ disfigured libbaz [cfg2/]
+ disfigured libbuild2-bar [cfg2/.bpkg/build2/]
+ disfigured libbaz
+ purged libbar
+ purged foo [cfg2/]
+ purged libbaz [cfg2/]
+ purged libbuild2-bar [cfg2/.bpkg/build2/]
+ purged libbaz
+ EOE
+
+ $pkg_status -r libbar >'libbar available 1.0.0'
+ }
+
+ : drop-dependency
+ :
+ {
+ $clone_cfg;
+ cp -pr ../cfg2 ./;
+
+ $pkg_build libbar --yes &cfg2/.bpkg/build2/***;
+
+ $* -d cfg2 libbaz <<EOI 2>>/~%EOE%;
+ y
+ y
+ y
+ EOI
+ following dependent packages will have to be dropped as well:
+ foo (requires libbaz)
+ libbar [cfg/] (requires foo)
+ %drop dependent packages\? \[y.N\] following dependencies were automatically built but will no longer be used:%
+ libbuild2-bar [cfg2/.bpkg/build2/]
+ libbaz [cfg/]
+ %drop unused packages\? \[Y.n\] drop libbar \[cfg/\]%
+ drop foo
+ drop libbaz
+ drop libbuild2-bar [cfg2/.bpkg/build2/]
+ drop libbaz [cfg/]
+ %continue\? \[Y.n\] disfigured libbar \[cfg/\]%
+ disfigured foo
+ disfigured libbaz
+ disfigured libbuild2-bar [cfg2/.bpkg/build2/]
+ disfigured libbaz [cfg/]
+ purged libbar [cfg/]
+ purged foo
+ purged libbaz
+ purged libbuild2-bar [cfg2/.bpkg/build2/]
+ purged libbaz [cfg/]
+ EOE
+
+ $pkg_status -r libbar >'libbar available 1.0.0'
+ }
+
+ : drop-private-dependency
+ :
+ {
+ $clone_root_cfg && $rep_add $rep/t7a && $rep_fetch;
+
+ $pkg_build libbar --yes &cfg/.bpkg/host/*** &cfg/.bpkg/build2/***;
+
+ $* -d cfg/.bpkg/build2/ libbuild2-bar <<EOI 2>>/~%EOE%
+ y
+ y
+ y
+ EOI
+ following dependent packages will have to be dropped as well:
+ foo [cfg/.bpkg/host/] (requires libbuild2-bar)
+ libbar [cfg/] (requires foo [cfg/.bpkg/host/])
+ %drop dependent packages\? \[y.N\] following dependencies were automatically built but will no longer be used:%
+ libbaz [cfg/.bpkg/host/]
+ libbaz [cfg/]
+ %drop unused packages\? \[Y.n\] drop libbar \[cfg/\]%
+ drop foo [cfg/.bpkg/host/]
+ drop libbuild2-bar
+ drop libbaz [cfg/.bpkg/host/]
+ drop libbaz [cfg/]
+ %continue\? \[Y.n\] disfigured libbar \[cfg/\]%
+ disfigured foo [cfg/.bpkg/host/]
+ disfigured libbuild2-bar
+ disfigured libbaz [cfg/.bpkg/host/]
+ disfigured libbaz [cfg/]
+ purged libbar [cfg/]
+ purged foo [cfg/.bpkg/host/]
+ purged libbuild2-bar
+ purged libbaz [cfg/.bpkg/host/]
+ purged libbaz [cfg/]
+ EOE
+ }
+
+ : skip-deleted-dependency
+ :
+ {
+ $clone_cfg;
+ cp -pr ../cfg2 ./;
+
+ $pkg_build libbar --yes &cfg/lib*/*** &cfg/lib* &cfg2/.bpkg/build2/***;
+
+ mv cfg cfg.tmp;
+
+ $* -d cfg2 libbaz <<EOI 2>>/~%EOE%;
+ y
+ y
+ y
+ EOI
+ following dependent packages will have to be dropped as well:
+ foo (requires libbaz)
+ %drop dependent packages\? \[y.N\] following dependencies were automatically built but will no longer be used:%
+ libbuild2-bar [cfg2/.bpkg/build2/]
+ %drop unused packages\? \[Y.n\] drop foo%
+ drop libbaz
+ drop libbuild2-bar [cfg2/.bpkg/build2/]
+ %continue\? \[Y.n\] disfigured foo%
+ disfigured libbaz
+ disfigured libbuild2-bar [cfg2/.bpkg/build2/]
+ purged foo
+ purged libbaz
+ purged libbuild2-bar [cfg2/.bpkg/build2/]
+ EOE
+
+ # While at it, test that we properly handle the missing prerequisite
+ # situation.
+ #
+ mv cfg.tmp cfg;
+
+ $* libbar 2>>/EOE != 0;
+ error: unable to find prerequisite package foo in linked configuration cfg2/
+ EOE
+
+ $pkg_status -d cfg2 -r 2>>EOE
+ info: no held packages in the configuration
+ info: use --all|-a to see status of all packages
+ EOE
+ }
+}
+
+: all-options
+:
+{
+ +$clone_cfg && $rep_add $rep/t4b $rep/t4c && $rep_fetch
+
+ test.arguments += --yes
+
+ : all
+ :
+ {
+ $clone_cfg;
+
+ $pkg_build libbaz libbar;
+
+ $* --all 2>>EOO
+ disfigured libbaz
+ disfigured libbar
+ disfigured libfoo
+ purged libbaz
+ purged libbar
+ purged libfoo
+ EOO
+ }
+
+ : all-pattern
+ :
+ {
+ $clone_cfg;
+
+ $pkg_build libbaz libbar libfoo;
+
+ $* --all-pattern 'libb*' 2>>EOO;
+ disfigured libbaz
+ disfigured libbar
+ purged libbaz
+ purged libbar
+ EOO
+
+ $* libfoo 2>>EOO
+ disfigured libfoo
+ purged libfoo
+ EOO
+ }
+}
diff --git a/tests/pkg-drop/t7a b/tests/pkg-drop/t7a
new file mode 120000
index 0000000..d02b5d4
--- /dev/null
+++ b/tests/pkg-drop/t7a
@@ -0,0 +1 @@
+../common/linked/t7a \ No newline at end of file
diff --git a/tests/pkg-fetch.testscript b/tests/pkg-fetch.testscript
index 7d32523..5046c5d 100644
--- a/tests/pkg-fetch.testscript
+++ b/tests/pkg-fetch.testscript
@@ -160,7 +160,14 @@ $* libfoo/1.0.0 2>>/EOE != 0
$* -e $src/t1/libfoo-1.0.0.tar.gz 2>'using libfoo/1.0.0 (external)';
$pkg_status libfoo/1.0.0 1>'libfoo fetched 1.0.0';
- $pkg_purge libfoo 2>'purged libfoo/1.0.0'
+ $* libfoo/1.1.0 2>'fetched libfoo/1.1.0';
+ $pkg_unpack libfoo 2>'unpacked libfoo/1.1.0';
+ test -d cfg/libfoo-1.1.0;
+ $* libfoo/1.1.0 2>'fetched libfoo/1.1.0';
+ test -d cfg/libfoo-1.1.0 == 1;
+ $pkg_status libfoo/1.1.0 1>'libfoo fetched 1.1.0';
+
+ $pkg_purge libfoo 2>'purged libfoo/1.1.0'
}
: purge-existing
diff --git a/tests/pkg-status.testscript b/tests/pkg-status.testscript
index 86a85d4..885b0fb 100644
--- a/tests/pkg-status.testscript
+++ b/tests/pkg-status.testscript
@@ -40,7 +40,7 @@
cp -r $src/testing $out/testing
cp -r $src/unstable $out/unstable
- c = $rep_create 2>!
+ c = [cmdline] $rep_create 2>!
$c $out/extra &$out/extra/packages.manifest
$c $out/stable &$out/stable/packages.manifest
@@ -54,183 +54,288 @@
$git_extract $src/git/style-basic.tar &$out_git/state0/***
end
-pkg_fetch += 2>!
-pkg_purge += -d cfg 2>!
rep_add += -d cfg 2>!
rep_fetch += -d cfg --auth all --trust-yes 2>!
+pkg_fetch += 2>!
+pkg_build += -d cfg --yes 2>!
+pkg_purge += -d cfg 2>!
+pkg_drop += -d cfg --yes 2>!
-: basics
+: lines
:
{
+$clone_cfg
- : not-fetched
+ : basics
:
{
+$clone_cfg
- : libfoo-1.0.0
- :
- $clone_cfg;
- $* libfoo/1.0.0 >'libfoo unknown 1.0.0'
-
- : libfoo
- :
- $clone_cfg;
- $* libfoo >'libfoo unknown'
- }
-
- : rep-fetched
- :
- {
- +$clone_cfg && $rep_add $rep/stable && $rep_fetch
-
- +cp -r cfg ./fetched
- +$pkg_fetch libfoo/1.0.0 -d fetched &fetched/libfoo-1.0.0.tar.gz
-
- : libfoo-1.0.0
+ : not-fetched
:
- $clone_cfg;
- $* libfoo/1.0.0 >'libfoo available 1.0.0'
+ {
+ +$clone_cfg
- : libfoo-1.0.0+0
- :
- $clone_cfg;
- $* libfoo/1.0.0+0 >'libfoo available 1.0.0'
+ : libfoo-1.0.0
+ :
+ $clone_cfg;
+ $* libfoo/1.0.0 >'libfoo unknown 1.0.0'
- : libfoo
- :
- $clone_cfg;
- $* libfoo >'libfoo available 1.0.0'
+ : libfoo
+ :
+ $clone_cfg;
+ $* libfoo >'libfoo unknown'
+ }
- : pkg-fetched
+ : rep-fetched
:
{
- clone_cfg = cp -r ../../fetched cfg
+ +$clone_cfg && $rep_add $rep/stable && $rep_fetch
+
+ +cp -r cfg ./fetched
+ +$pkg_fetch libfoo/1.0.0 -d fetched &fetched/libfoo-1.0.0.tar.gz
: libfoo-1.0.0
:
$clone_cfg;
- $* libfoo/1.0.0 >'libfoo fetched 1.0.0'
+ $* libfoo/1.0.0 >'libfoo available 1.0.0'
+
+ : libfoo-1.0.0+0
+ :
+ $clone_cfg;
+ $* libfoo/1.0.0+0 >'libfoo available 1.0.0'
: libfoo
:
$clone_cfg;
- $* libfoo >'libfoo fetched 1.0.0'
+ $* libfoo >'libfoo available 1.0.0'
+
+ : pkg-fetched
+ :
+ {
+ clone_cfg = cp -r ../../fetched cfg
+
+ : libfoo-1.0.0
+ :
+ $clone_cfg;
+ $* libfoo/1.0.0 >'libfoo fetched 1.0.0'
+
+ : libfoo
+ :
+ $clone_cfg;
+ $* libfoo >'libfoo fetched 1.0.0'
+ }
}
}
-}
-
-: multiple-versions
-{
- # Prepare the nested tests to copy the root configuration. Note that they
- # must provide the destination directory name as an argument.
- #
- clone_cfg = cp -r $~/../cfg
- : extra
- :
+ : multiple-versions
{
- # Here we, first, prepare 2 configurations that derive from each other, and
- # then spawn 2 tests on them.
+ # Prepare the nested tests to copy the root configuration. Note that they
+ # must provide the destination directory name as an argument.
#
- +$clone_cfg extra && $rep_add -d extra $rep/extra && $rep_fetch -d extra
+ clone_cfg = cp -r $~/../cfg
- +cp -r extra extra-stable
- +$rep_add -d extra-stable $rep/stable && $rep_fetch -d extra-stable
-
- : libbar
+ : extra
:
- $* -d ../extra libbar >'libbar available 1.1.0+1 [1.0.0]'
-
- : libbar-stable
- :
- $* -d ../extra-stable libbar >'libbar available 1.1.0+1 1.0.0'
- }
+ {
+ # Here we, first, prepare 2 configurations that derive from each other,
+ # and then spawn 2 tests on them.
+ #
+ +$clone_cfg extra && $rep_add -d extra $rep/extra && $rep_fetch -d extra
- : testing
- :
- {
- +$clone_cfg ./ && $rep_add $rep/testing && $rep_fetch
+ +cp -r extra extra-stable
+ +$rep_add -d extra-stable $rep/stable && $rep_fetch -d extra-stable
- clone_cfg = cp -r ../cfg ./
+ : libbar
+ :
+ $* -d ../extra libbar >'libbar available 1.1.0+1 [1.0.0]'
- : no-version
- :
- {
- $clone_cfg;
- $* libbar >'libbar available [1.1.0+1] 1.1.0 1.0.0+1 1.0.0'
+ : libbar-stable
+ :
+ $* -d ../extra-stable libbar >'libbar available 1.1.0+1 1.0.0'
}
- : no-revision
+ : testing
:
{
- $clone_cfg;
- $* libbar/1.0.0 >'libbar available 1.0.0+1 1.0.0'
+ +$clone_cfg ./ && $rep_add $rep/testing && $rep_fetch
+
+ clone_cfg = cp -r ../cfg ./
+
+ : no-version
+ :
+ {
+ $clone_cfg;
+ $* libbar >'libbar available [1.1.0+1] 1.1.0 1.0.0+1 1.0.0'
+ }
+
+ : no-revision
+ :
+ {
+ $clone_cfg;
+ $* libbar/1.0.0 >'libbar available 1.0.0+1 1.0.0'
+ }
+
+ : zero-revision
+ :
+ {
+ $clone_cfg;
+ $* libbar/1.0.0+0 >'libbar available 1.0.0'
+ }
+
+ : recursive
+ :
+ {
+ $clone_cfg;
+
+ $pkg_build libbar;
+
+ $* libbar --recursive >>EOO;
+ !libbar configured 1.1.0 available [1.1.0+1]
+ libbaz configured 1.0.0
+ EOO
+
+ $pkg_drop libbar
+ }
}
- : zero-revision
+ : unstable
:
{
- $clone_cfg;
- $* libbar/1.0.0+0 >'libbar available 1.0.0'
+ # Here we, first, prepare 3 configurations that derive from each other,
+ # and then spawn 3 tests on them.
+ #
+ +$clone_cfg ./ && $rep_add $rep/unstable && $rep_fetch
+
+ +cp -r cfg fetched1
+ +$pkg_fetch libbar/1.0.0+1 -d fetched1 &fetched1/libbar-1.0.0+1.tar.gz
+
+ +cp -r fetched1 fetched2
+ +$pkg_purge -d fetched2 libbar &!fetched2/libbar-1.0.0+1.tar.gz
+ +$pkg_fetch libbar/2.0.0 -d fetched2 &fetched2/libbar-2.0.0.tar.gz
+
+ : not-fetched
+ :
+ $* -d ../cfg libbar >'libbar available 2.0.0 [1.1.0+1] 1.1.0 1.0.0+1 1.0.0'
+
+ : fetched-1
+ :
+ $* -d ../fetched1 libbar >'libbar fetched 1.0.0+1 available 2.0.0 [1.1.0+1] 1.1.0'
+
+ : fetched-2
+ :
+ $* -d ../fetched2 libbar >'libbar fetched 2.0.0'
}
}
- : unstable
+ : git-rep
:
+ if! $git_supported
{
- # Here we, first, prepare 3 configurations that derive from each other, and
- # then spawn 3 tests on them.
+ # Skip git repository tests.
#
- +$clone_cfg ./ && $rep_add $rep/unstable && $rep_fetch
-
- +cp -r cfg fetched1
- +$pkg_fetch libbar/1.0.0+1 -d fetched1 &fetched1/libbar-1.0.0+1.tar.gz
-
- +cp -r fetched1 fetched2
- +$pkg_purge -d fetched2 libbar &!fetched2/libbar-1.0.0+1.tar.gz
- +$pkg_fetch libbar/2.0.0 -d fetched2 &fetched2/libbar-2.0.0.tar.gz
+ }
+ else
+ {
+ rep = "$rep_git/state0"
+ test.cleanups += &cfg/.bpkg/repos/*/***
- : not-fetched
+ : complement-cycle
:
- $* -d ../cfg libbar >'libbar available 2.0.0 [1.1.0+1] 1.1.0 1.0.0+1 1.0.0'
-
- : fetched-1
+ : Make sure that we properly handle the root<->style repository dependency
+ : cycle while searching for the style-basic package, that is an available
+ : package but not from the user-added repository (or its complement), and
+ : so is not detected as buildable by the status command. Note that the root
+ : repository is the default complement for git repositories (see
+ : rep_fetch() implementation for the reasoning).
:
- $* -d ../fetched1 libbar >'libbar fetched 1.0.0+1 available 2.0.0 [1.1.0+1] 1.1.0'
+ $clone_root_cfg;
+ $rep_add "$rep/libbar.git#master" && $rep_add "$rep/style.git#master";
- : fetched-2
- :
- $* -d ../fetched2 libbar >'libbar fetched 2.0.0'
+ $rep_fetch 2>!;
+
+ $* style-basic >~'%style-basic available \[1\.1\.0-a\.0\..+\]%'
}
}
-: git-rep
+: json
:
-if! $git_supported
{
- # Skip git repository tests.
- #
-}
-else
-{
- rep = "$rep_git/state0"
- test.cleanups += &cfg/.bpkg/repos/*/***
+ test.arguments += --stdout-format json
+
+ +$clone_cfg
- : complement-cycle
+ : not-fetched
:
- : Make sure that we properly handle the root<->style repository dependency
- : cycle while searching for the style-basic package, that is an available
- : package but not from the user-added repository (or its complement), and so
- : is not detected as buildable by the status command. Note that the root
- : repository is the default complement for git repositories (see rep_fetch()
- : implementation for the reasoning).
+ {
+ +$clone_cfg
+
+ : libfoo-1.0.0
+ :
+ $clone_cfg;
+ $* libfoo/1.0.0 >>EOO
+ [
+ {
+ "name": "libfoo",
+ "status": "unknown",
+ "version": "1.0.0"
+ }
+ ]
+ EOO
+
+ : libfoo
+ :
+ $clone_cfg;
+ $* libfoo >>EOO
+ [
+ {
+ "name": "libfoo",
+ "status": "unknown"
+ }
+ ]
+ EOO
+ }
+
+ : fetched
:
- $clone_root_cfg;
- $rep_add "$rep/libbar.git#master" && $rep_add "$rep/style.git#master";
+ {
+ +$clone_cfg
- $rep_fetch 2>!;
+ +$rep_add $rep/testing && $rep_fetch
- $* style-basic >~'%style-basic available \[1\.1\.0-a\.0\..+\]%'
+ : recursive
+ :
+ {
+ $clone_cfg;
+
+ $pkg_build libbar;
+
+ $* libbar --recursive --constraint >>EOO;
+ [
+ {
+ "name": "libbar",
+ "status": "configured",
+ "version": "1.1.0",
+ "hold_package": true,
+ "available_versions": [
+ {
+ "version": "1.1.0+1",
+ "dependency": true
+ }
+ ],
+ "dependencies": [
+ {
+ "name": "libbaz",
+ "constraint": "^1.0.0",
+ "status": "configured",
+ "version": "1.0.0"
+ }
+ ]
+ }
+ ]
+ EOO
+
+ $pkg_drop libbar
+ }
+ }
}
diff --git a/tests/pkg-status/extra/libbar-1.1.0+1.tar.gz b/tests/pkg-status/extra/libbar-1.1.0+1.tar.gz
index 890e9e2..08f2867 100644
--- a/tests/pkg-status/extra/libbar-1.1.0+1.tar.gz
+++ b/tests/pkg-status/extra/libbar-1.1.0+1.tar.gz
Binary files differ
diff --git a/tests/pkg-status/stable/libbar-1.0.0.tar.gz b/tests/pkg-status/stable/libbar-1.0.0.tar.gz
index 97e6e32..e49f301 100644
--- a/tests/pkg-status/stable/libbar-1.0.0.tar.gz
+++ b/tests/pkg-status/stable/libbar-1.0.0.tar.gz
Binary files differ
diff --git a/tests/pkg-status/stable/libfoo-1.0.0.tar.gz b/tests/pkg-status/stable/libfoo-1.0.0.tar.gz
index 5e7fa17..da5cc08 100644
--- a/tests/pkg-status/stable/libfoo-1.0.0.tar.gz
+++ b/tests/pkg-status/stable/libfoo-1.0.0.tar.gz
Binary files differ
diff --git a/tests/pkg-status/testing/libbar-1.0.0+1.tar.gz b/tests/pkg-status/testing/libbar-1.0.0+1.tar.gz
index 5794085..d38cbbd 100644
--- a/tests/pkg-status/testing/libbar-1.0.0+1.tar.gz
+++ b/tests/pkg-status/testing/libbar-1.0.0+1.tar.gz
Binary files differ
diff --git a/tests/pkg-status/testing/libbar-1.1.0.tar.gz b/tests/pkg-status/testing/libbar-1.1.0.tar.gz
index b01ac44..a5e060d 100644
--- a/tests/pkg-status/testing/libbar-1.1.0.tar.gz
+++ b/tests/pkg-status/testing/libbar-1.1.0.tar.gz
Binary files differ
diff --git a/tests/pkg-status/testing/libbaz-1.0.0.tar.gz b/tests/pkg-status/testing/libbaz-1.0.0.tar.gz
new file mode 100644
index 0000000..8d4c2f3
--- /dev/null
+++ b/tests/pkg-status/testing/libbaz-1.0.0.tar.gz
Binary files differ
diff --git a/tests/pkg-status/unstable/libbar-2.0.0.tar.gz b/tests/pkg-status/unstable/libbar-2.0.0.tar.gz
index 6cc5890..98616f8 100644
--- a/tests/pkg-status/unstable/libbar-2.0.0.tar.gz
+++ b/tests/pkg-status/unstable/libbar-2.0.0.tar.gz
Binary files differ
diff --git a/tests/pkg-system.testscript b/tests/pkg-system.testscript
index 26d6893..11ed1cc 100644
--- a/tests/pkg-system.testscript
+++ b/tests/pkg-system.testscript
@@ -52,10 +52,9 @@ rep_remove += -d cfg 2>!
{
$clone_cfg;
- $pkg_build 'sys:libbar' 2>>/EOE != 0;
- error: unknown package libbar
- info: configuration cfg/ has no repositories
- info: use 'bpkg rep-add' to add a repository
+ $pkg_build 'sys:libbar' 2>>EOE != 0;
+ error: unknown package sys:libbar
+ info: consider specifying sys:libbar/*
EOE
$pkg_build 'sys:libbar/1' 2>>EOE;
@@ -90,28 +89,28 @@ rep_remove += -d cfg 2>!
{
$clone_cfg;
- $pkg_build 'sys:libbar' '?sys:libbar' 2>>EOE != 0;
+ $pkg_build 'sys:libbar' 'sys:libbar/1.0.0' 2>>EOE != 0;
error: duplicate package libbar
info: first mentioned as sys:libbar
- info: second mentioned as ?sys:libbar
+ info: second mentioned as sys:libbar/1.0.0
EOE
- $pkg_build '?sys:libbar' 'sys:libbar' 2>>EOE != 0;
+ $pkg_build '?sys:libbar' '?sys:libbar/1.0.0' 2>>EOE != 0;
error: duplicate package libbar
info: first mentioned as ?sys:libbar
- info: second mentioned as sys:libbar
+ info: second mentioned as ?sys:libbar/1.0.0
EOE
- $pkg_build '?sys:libbar' libbar 2>>EOE != 0;
+ $pkg_build 'sys:libbar' libbar 2>>EOE != 0;
error: duplicate package libbar
- info: first mentioned as ?sys:libbar
+ info: first mentioned as sys:libbar
info: second mentioned as libbar
EOE
- $pkg_build libbar '?sys:libbar' 2>>EOE != 0;
+ $pkg_build ?libbar '?sys:libbar' +{ --config-id 0 } 2>>EOE != 0;
error: duplicate package libbar
- info: first mentioned as libbar
- info: second mentioned as ?sys:libbar
+ info: first mentioned as ?libbar
+ info: second mentioned as ?sys:libbar +{ --config-id 0 }
EOE
$pkg_build 'sys:libbar' libbar 2>>EOE != 0;
@@ -711,12 +710,12 @@ rep_remove += -d cfg 2>!
#
$pkg_build foo 'sys:libbar/1' 2>>EOE != 0;
error: unable to satisfy constraints on package libbar
- info: foo depends on (libbar >= 2)
info: command line depends on (libbar == 1)
- info: available sys:libbar/2
+ info: foo/2 depends on (libbar >= 2)
info: available sys:libbar/1
+ info: available sys:libbar/2
+ info: while satisfying foo/2
info: explicitly specify libbar version to manually satisfy both constraints
- info: while satisfying foo/2
EOE
$pkg_drop libbar
@@ -850,10 +849,6 @@ rep_remove += -d cfg 2>!
info: while satisfying foo/2
EOE
- $pkg_build 'sys:libbar' 2>>EOE != 0;
- error: unknown package libbar
- EOE
-
$pkg_build foo 'sys:libbar/1' 2>>EOE != 0;
error: dependency libbar >= 2 of package foo is not available in source
info: sys:libbar/1 does not satisfy the constrains
@@ -872,11 +867,26 @@ rep_remove += -d cfg 2>!
$pkg_status libbar >'libbar unknown'
}
+ : find-all
+ :
+ : Test that sys:libbar can be built to hold even if its stub package is not
+ : available from the configured repository but only from its prerequisite
+ : repository.
+ :
+ {
+ $clone_cfg;
+
+ $pkg_build 'sys:libbar' 2>>EOE;
+ configured sys:libbar/*
+ EOE
+
+ $pkg_status libbar >'!libbar configured,system !*'
+ }
+
: syslibbar1-foo-syslibbar-drop-foo
:
- : The overall plan is to build foo ?sys:libbar/2, fail to build sys:libbar
- : and foo 'sys:libbar', but succeed to build foo ?sys:libbar/3 and foo
- : ?sys:libbar.
+ : The overall plan is to build foo ?sys:libbar/2, then foo ?sys:libbar/3,
+ : and then foo ?sys:libbar.
:
{
$clone_cfg;
@@ -896,14 +906,6 @@ rep_remove += -d cfg 2>!
$pkg_status foo >'!foo configured 2';
$pkg_status libbar >'libbar configured,system !2';
- # Fail as libbar while being selected is still unknown (not present in t3
- # repo).
- #
- $pkg_build 'sys:libbar' 2>'error: unknown package libbar' != 0;
- $pkg_build foo 'sys:libbar' 2>'error: unknown package libbar' != 0;
- $pkg_status foo 1>'!foo configured 2';
- $pkg_status libbar 1>'libbar configured,system !2';
-
# Build foo and ?sys:libbar/3.
#
$pkg_build foo '?sys:libbar/3' 2>>~%EOE%;
diff --git a/tests/pkg-system/libbar-0+1.tar.gz b/tests/pkg-system/libbar-0+1.tar.gz
index 9f90936..7461443 100644
--- a/tests/pkg-system/libbar-0+1.tar.gz
+++ b/tests/pkg-system/libbar-0+1.tar.gz
Binary files differ
diff --git a/tests/pkg-update.testscript b/tests/pkg-update.testscript
index f0a8f64..89833d4 100644
--- a/tests/pkg-update.testscript
+++ b/tests/pkg-update.testscript
@@ -47,7 +47,7 @@
&$out/hello/signature.manifest
end
-config_cxx = config.cxx=$quote($recall($cxx.path) $cxx.config.mode, true)
+config_cxx = [cmdline] config.cxx=$quote($recall($cxx.path) $cxx.config.mode, true)
pkg_configure += -d cfg $config_cxx 2>!
pkg_disfigure += -d cfg
diff --git a/tests/pkg-verify.testscript b/tests/pkg-verify.testscript
index 81821cb..8057aab 100644
--- a/tests/pkg-verify.testscript
+++ b/tests/pkg-verify.testscript
@@ -8,6 +8,7 @@
# pkg-verify
# |-- foo-1.tar.gz
# |-- foo-2.tar.gz (manifest with unknown name)
+# |-- libbaz-1.0.0.tar.gz (manifest with unsatisfiable toolchain constraint)
# `-- not-a-package.tar.gz
: valid-package
@@ -77,20 +78,25 @@ $* --deep --ignore-unknown --manifest $src/foo-2.tar.gz >>EOO
version: 2
summary: The "Foo" utility
license: MIT
- description: \
+ description:\
This package contains the foo utility.
\
description-type: text/plain
- changes: \
+ changes:\
Version 2
* First public release.
\
+ changes-type: text/plain
url: http://www.example.org/foo
email: foo-users@example.org
depends: bar == 2
+ bootstrap-build:\
+ project = foo
+
+ \
EOO
: incomplete-dependency
@@ -107,3 +113,54 @@ $* --ignore-unknown --manifest $src/foo-2.tar.gz >>EOO
email: foo-users@example.org
depends: bar == $
EOO
+
+: no-bootstrap-build
+:
+{
+ : no-deep
+ :
+ $* --manifest $src/foo-3.tar.gz >>EOO
+ : 1
+ name: foo
+ version: 3
+ summary: The "Foo" utility
+ license: MIT
+ description-file: README
+ changes-file: NEWS
+ url: http://www.example.org/foo
+ email: foo-users@example.org
+ depends: bar == $
+ EOO
+
+ : deep
+ :
+ $* --deep --manifest $src/foo-3.tar.gz 2>>/~%EOE% != 0
+ %error: unable to find bootstrap.build file in package archive .+/foo-3.tar.gz%
+ EOE
+}
+
+: compatibility
+:
+{
+ : fail
+ :
+ $* --manifest $src/libbaz-1.0.0.tar.gz 2>>/~%EOE% != 0
+ %error: unable to satisfy constraint \(build2 >= 65536.0.0\) for package .+/libbaz-1.0.0.tar.gz%
+ % info: available build2 version is .+%
+ EOE
+
+ : success
+ :
+ $* --manifest --ignore-unknown $src/libbaz-1.0.0.tar.gz >>EOO
+ : 1
+ name: libbaz
+ version: 1.0.0
+ summary: libbaz
+ license: MIT
+ description: libbaz library
+ url: http://example.org
+ email: pkg@example.org
+ depends: * build2 >= 65536.0.0
+ depends: * bpkg >= 65536.0.0
+ EOO
+}
diff --git a/tests/pkg-verify/foo-2.tar.gz b/tests/pkg-verify/foo-2.tar.gz
index 5f5739d..f2d3db8 100644
--- a/tests/pkg-verify/foo-2.tar.gz
+++ b/tests/pkg-verify/foo-2.tar.gz
Binary files differ
diff --git a/tests/pkg-verify/foo-3.tar.gz b/tests/pkg-verify/foo-3.tar.gz
new file mode 100644
index 0000000..67edccd
--- /dev/null
+++ b/tests/pkg-verify/foo-3.tar.gz
Binary files differ
diff --git a/tests/pkg-verify/libbaz-1.0.0.tar.gz b/tests/pkg-verify/libbaz-1.0.0.tar.gz
new file mode 120000
index 0000000..a95a5dc
--- /dev/null
+++ b/tests/pkg-verify/libbaz-1.0.0.tar.gz
@@ -0,0 +1 @@
+../common/compatibility/t15/libbaz-1.0.0.tar.gz \ No newline at end of file
diff --git a/tests/remote-git.testscript b/tests/remote-git.testscript
index 308c067..377277d 100644
--- a/tests/remote-git.testscript
+++ b/tests/remote-git.testscript
@@ -66,12 +66,17 @@ else
end
# Command for extracting the git repository from a tarball into the output
-# directory (see above).
+# directory (see above). Note: should only be used for .tar, not .tar.gz.
#
# Note that we can expect that the tar program is present on the platform. We
# will use the same options as we do for unpacking of package archives (see
# pkg-unpack.cxx).
#
-git_extract = ($cxx.target.class != 'windows' \
+# Note that on Windows we still use tar rather than bsdtar here, since the
+# later fails for dangling symlinks and we have such symlinks in the
+# repository archives which are used for testing.
+#
+git_extract = [cmdline] \
+ ($cxx.target.class != 'windows' \
? tar -C $out_git -xf \
: tar -C $regex.replace($out_git, '\\', '/') --force-local -xf)
diff --git a/tests/rep-auth.testscript b/tests/rep-auth.testscript
index 0ba46fa..5d51857 100644
--- a/tests/rep-auth.testscript
+++ b/tests/rep-auth.testscript
@@ -24,7 +24,7 @@
# Prepare repositories used by tests if running in the local mode.
#
+if! $remote
- rc = $rep_create 2>!
+ rc = [cmdline] $rep_create 2>!
# Create the 'unsigned1' repository.
#
@@ -50,7 +50,7 @@
#
cp -r $src/unsigned $out/self-match
- echo 'certificate: \' >+$out/self-match/repositories.manifest
+ echo 'certificate:\' >+$out/self-match/repositories.manifest
cat <<<$src_base/auth/self-cert.pem >+$out/self-match/repositories.manifest
echo '\' >+$out/self-match/repositories.manifest
@@ -62,7 +62,7 @@
#
cp -r $src/unsigned $out/self-any-match
- echo 'certificate: \' >+$out/self-any-match/repositories.manifest
+ echo 'certificate:\' >+$out/self-any-match/repositories.manifest
cat <<<$src_base/auth/self-any-cert.pem >+$out/self-any-match/repositories.manifest
echo '\' >+$out/self-any-match/repositories.manifest
@@ -75,7 +75,7 @@
#
cp -r $src/unsigned $out/subdomain-match
- echo 'certificate: \' >+$out/subdomain-match/repositories.manifest
+ echo 'certificate:\' >+$out/subdomain-match/repositories.manifest
cat <<<$src_base/auth/subdomain-cert.pem >+$out/subdomain-match/repositories.manifest
echo '\' >+$out/subdomain-match/repositories.manifest
@@ -87,7 +87,7 @@
#
cp -r $src/unsigned $out/name-mismatch
- echo 'certificate: \' >+$out/name-mismatch/repositories.manifest
+ echo 'certificate:\' >+$out/name-mismatch/repositories.manifest
cat <<<$src_base/auth/mismatch-cert.pem >+$out/name-mismatch/repositories.manifest
echo '\' >+$out/name-mismatch/repositories.manifest
@@ -128,8 +128,8 @@ rep_fetch += -d cfg
# Check if rep-fetch command was successfull or not.
#
-fetched = $pkg_status foo >'foo available 1'
-not_fetched = $pkg_status foo >'foo unknown'
+fetched = [cmdline] $pkg_status foo >'foo available 1'
+not_fetched = [cmdline] $pkg_status foo >'foo unknown'
sc = " " # Space character to append to here-document line when required.
@@ -605,7 +605,7 @@ sc = " " # Space character to append to here-document line when required.
{
cp -r $src/unsigned rep;
- echo 'certificate: \' >+rep/repositories.manifest;
+ echo 'certificate:\' >+rep/repositories.manifest;
cat <<<$src_base/auth/noemail-cert.pem >+rep/repositories.manifest;
echo '\' >+rep/repositories.manifest;
@@ -620,7 +620,7 @@ sc = " " # Space character to append to here-document line when required.
{
cp -r $src/unsigned rep;
- echo 'certificate: \' >+rep/repositories.manifest;
+ echo 'certificate:\' >+rep/repositories.manifest;
cat <<<$src_base/auth/expired-cert.pem >+rep/repositories.manifest;
echo '\' >+rep/repositories.manifest;
diff --git a/tests/rep-auth/expired/packages.manifest b/tests/rep-auth/expired/packages.manifest
index 4fa075a..aac8bfd 100644
--- a/tests/rep-auth/expired/packages.manifest
+++ b/tests/rep-auth/expired/packages.manifest
@@ -1,5 +1,5 @@
: 1
-sha256sum: dbeea68d374f6ca66b5f65652e26a0d8324d3fe118341ac470c07214ceb34b60
+sha256sum: 28183f366660bb265ef488c2b5b8696cd007392f7c204a9ee6ecbcba6b44b375
:
name: foo
version: 1
diff --git a/tests/rep-auth/expired/repositories.manifest b/tests/rep-auth/expired/repositories.manifest
index 7e68ee8..3658269 100644
--- a/tests/rep-auth/expired/repositories.manifest
+++ b/tests/rep-auth/expired/repositories.manifest
@@ -1,5 +1,5 @@
: 1
-certificate: \
+certificate:\
-----BEGIN CERTIFICATE-----
MIIFLzCCAxegAwIBAgIJAOx1KvcHxv3GMA0GCSqGSIb3DQEBCwUAMDMxFzAVBgNV
BAoMDkNvZGUgU3ludGhlc2lzMRgwFgYDVQQDDA9uYW1lOmJ1aWxkMi5vcmcwHhcN
diff --git a/tests/rep-auth/expired/signature.manifest b/tests/rep-auth/expired/signature.manifest
index 454653f..99095f4 100644
--- a/tests/rep-auth/expired/signature.manifest
+++ b/tests/rep-auth/expired/signature.manifest
@@ -1,13 +1,13 @@
: 1
-sha256sum: ad6c46b7c2b994957215025de0f0ce9be192f6973a8b36f5d3f3d1670172af1e
-signature: \
-PQrvkr6ONdIIrEaORzsgpXNpNkMVmdjW8Ahgpi+sdwyjsxkgMj/0gHjyF0amPi6ae+zUu4igDeKM
-JnWuShDMbxLy+zxy6IpjpchwtPymM3euqQnLWebRJ2b+9RlBVGIRCl25VnFn+mmHW76+yRnOT6nI
-fYNCM6eiLihWKLRUCnsj18PZSV7EK68Q3iPAUws4F4A63eKqJe/qSWkRjJrVpi+UFDjYkJi9542D
-MELmYFXV592UVoQ3PPK+ZU/Ja+cosHND9mHe2xvLKHFnBKUi5LKvZ+aZdiZXisnisX9PgvGzk+5j
-qzzJUse21NLA2J4D+jiH8r2fj4qjh1dLHgocQj3GQeN8r18XvaLtP9+tcLYfphWnyOfk3q12xtn8
-h9yNoVPHVsMiYrB7JXX2TM4qPqepi3f8iQmEfZxWqlI5/cpeQz3DTDZ4wagNbBoYzavUqTjriXCU
-eLWVZz89JkYhWYTBPx26XZp56TsN16ZrN6oMtHgVy6YmmbjIea7sZXhNc8a/7FCdxap8PZfJS1cW
-LieoZqQF5C0O8+sJnYl88Pp76mtQIgXEsfc5TnvgMosQfXbHzMbD7naD0woWqu5GVKWx6+AdK2uH
-KbuSXYrJYfmuS2Ptp/T7ZIkRE2YZPe4+lsMkIiqY1fg2qIdWgpQiH0ZE3Ki3GH4FUWEkurWgxYk=
+sha256sum: a7642c76aba10b702453882504d1d39033ad5e19d1fcd97ad40b37005e9e2927
+signature:\
+ClTGigHFr2VpVZIz5KjxNHKJyEszJ2eP0CLyOS63rim02Vrpl9kIeIIceRdTfs3R6u62gu89MsFi
+0VbO37h8DDHY+aZKtlAKg4t5pZjdQ3T56AP+GcB0m0t5oXxEFiGF1JdWOz+ukbvWaA8BmSPWzTsl
+m7aU+MdyiQKg7bdyXzKqc/vSY28CuTvhaTpDz43qJOVFC2ezk7jbmPuhaQIVqmLHEiKMQGCynVUV
+DI9OZZ98QpplY9d1qNMdc0jWVAhtAuTBW+E+zD4fT4j6mL/1zEz5WWoOAbceyD0Gg5pp0P8ZBg1+
+gzzOLL3ikdMqq+fQY2AKcYzxyTFNCJsnX6KeaYESZZUsI4YOURf+VcjJb24AX3KBevidMcbcGyoo
+zBNmqWOLIheK983zDovccj3bRnmByprV95VxWEl7omifZ56Lsfow6rQM2KEVmdSk2ScJUJew4zR6
+1OI7m3GEqrwqlBzKn0uviasMSZMSj4pbBr5bv5Y6bvryB3RUnJft0ui1p3vP1s9zxmVAdDhuRmQ0
+ntcQey/vEPyU/q3vQ3riJop5cYXu9IJpg7EGcRJTNDKq1OMtFbnEY5ZljmtD9e/neplPb6p/hacK
+PIEzHP8aWzJjDxoXibt8CSfwCfn5mCiBrtfoJ3u7V8Ghish9/SVxKwoJ3dfso4gBSgfdHk/LsPI=
\
diff --git a/tests/rep-create.testscript b/tests/rep-create.testscript
index 4f40f7a..bca7305 100644
--- a/tests/rep-create.testscript
+++ b/tests/rep-create.testscript
@@ -19,7 +19,7 @@
# Make sure the cloned repository has a valid location, so we can use
# rep-info command to validate the repository info.
#
- clone_rep = mkdir 1/ && cp -r $src/stable 1/
+ clone_rep = [cmdline] mkdir 1/ && cp -r $src/stable 1/
: without-key
:
@@ -41,8 +41,12 @@
url: http://www.example.org/bar
email: bar-users@example.org
depends: foo == 1
+ bootstrap-build:\
+ project = bar
+
+ \
location: bar-1.tar.gz
- sha256sum: 514a99f5fadb94f946f8abff59caa9c3cc442cd4f30d4383a1cf6d26ca058036
+ sha256sum: 56528e387d1b8e18e3ee7e8510916afdb65f881acfd49d959fae6f434c3bab3c
:
name: foo
version: 1
@@ -50,8 +54,12 @@
license: MIT
url: http://www.example.org/foo
email: foo-users@example.org
+ bootstrap-build:\
+ project = foo
+
+ \
location: foo-1.tar.gz
- sha256sum: fee330a362a4f87ff42a954aa305b6446d541b7b60000ebcd2fbf68f2b1ae58e
+ sha256sum: 1d88df336611286cdbd84f5c1d87bedc774bc833e200de675e34d9b219c66cfc
EOO
}
@@ -78,8 +86,12 @@
url: http://www.example.org/bar
email: bar-users@example.org
depends: foo == 1
+ bootstrap-build:\
+ project = bar
+
+ \
location: bar-1.tar.gz
- sha256sum: 514a99f5fadb94f946f8abff59caa9c3cc442cd4f30d4383a1cf6d26ca058036
+ sha256sum: 56528e387d1b8e18e3ee7e8510916afdb65f881acfd49d959fae6f434c3bab3c
:
name: foo
version: 1
@@ -87,8 +99,12 @@
license: MIT
url: http://www.example.org/foo
email: foo-users@example.org
+ bootstrap-build:\
+ project = foo
+
+ \
location: foo-1.tar.gz
- sha256sum: fee330a362a4f87ff42a954aa305b6446d541b7b60000ebcd2fbf68f2b1ae58e
+ sha256sum: 1d88df336611286cdbd84f5c1d87bedc774bc833e200de675e34d9b219c66cfc
EOO
}
}
@@ -104,7 +120,7 @@
# Make sure the cloned repository has a valid location, so we can use
# rep-info command to validate the repository info.
#
- clone_rep = mkdir 1/ && cp -r ../stable 1/
+ clone_rep = [cmdline] mkdir 1/ && cp -r ../stable 1/
: with-key
:
@@ -132,8 +148,12 @@
url: http://www.example.org/bar
email: bar-users@example.org
depends: foo == 1
+ bootstrap-build:\\
+ project = bar
+
+ \\
location: bar-1.tar.gz
- sha256sum: 514a99f5fadb94f946f8abff59caa9c3cc442cd4f30d4383a1cf6d26ca058036
+ sha256sum: 56528e387d1b8e18e3ee7e8510916afdb65f881acfd49d959fae6f434c3bab3c
:
name: foo
version: 1
@@ -141,8 +161,12 @@
license: MIT
url: http://www.example.org/foo
email: foo-users@example.org
+ bootstrap-build:\\
+ project = foo
+
+ \\
location: foo-1.tar.gz
- sha256sum: fee330a362a4f87ff42a954aa305b6446d541b7b60000ebcd2fbf68f2b1ae58e
+ sha256sum: 1d88df336611286cdbd84f5c1d87bedc774bc833e200de675e34d9b219c66cfc
EOO
}
@@ -180,7 +204,7 @@
: Test that package manifest that contains an unknown name is properly handled.
:
{
- clone_rep = cp -r $src/testing ./
+ clone_rep = [cmdline] cp -r $src/testing ./
: fail
:
@@ -204,7 +228,7 @@
: Here we break the 'stable' repository prior to running a test.
:
{
- clone_rep = cp -r $src/stable ./
+ clone_rep = [cmdline] cp -r $src/stable ./
: no-repositories-manifest
:
diff --git a/tests/rep-create/stable/bar-1.tar.gz b/tests/rep-create/stable/bar-1.tar.gz
index 66c79a2..d4e6103 100644
--- a/tests/rep-create/stable/bar-1.tar.gz
+++ b/tests/rep-create/stable/bar-1.tar.gz
Binary files differ
diff --git a/tests/rep-create/testing/foo-2.tar.gz b/tests/rep-create/testing/foo-2.tar.gz
index cb39194..77a7678 100644
--- a/tests/rep-create/testing/foo-2.tar.gz
+++ b/tests/rep-create/testing/foo-2.tar.gz
Binary files differ
diff --git a/tests/rep-fetch-git-refname.testscript b/tests/rep-fetch-git-refname.testscript
index b5de881..0275d56 100644
--- a/tests/rep-fetch-git-refname.testscript
+++ b/tests/rep-fetch-git-refname.testscript
@@ -39,7 +39,7 @@
: changed
:
{
- g = git -C
+ g = [cmdline] git -C
u = "$rep_git/state1"
d = cfg/libfoo-1.0.0
@@ -93,6 +93,8 @@
%fetching submodule 'libbar/extras' from .+style-basic\.git+%
$info4
%submodule path 'libbar/extras': checked out .+%
+ verifying symlinks...
+ %fixing up symlinks...%?
distributing libfoo/1.0.0
checked out libfoo/1.0.0
EOE
@@ -149,6 +151,8 @@
%fetching submodule 'libbaz' from .+libbaz\.git%
$info2
%submodule path 'libbaz': checked out .+%
+ verifying symlinks...
+ %fixing up symlinks...%?
distributing libfoo/1.0.0
checked out libfoo/1.0.0
EOE
diff --git a/tests/rep-fetch.testscript b/tests/rep-fetch.testscript
index b9b9e05..b713c0c 100644
--- a/tests/rep-fetch.testscript
+++ b/tests/rep-fetch.testscript
@@ -631,11 +631,15 @@ if! $remote
: unchanged-external
:
+ : Test that iteration is still incremented when a non-external package
+ : from a pkg repository is switched to the same unedited external
+ : package.
+ :
{
$clone_cfg && $rep_add $src/libhello-1.0.0;
$* 2>!;
- $pkg_status libhello >'libhello unpacked 1.0.0'
+ $pkg_status libhello >'libhello unpacked 1.0.0 available 1.0.0#1'
}
: changed-external
@@ -655,6 +659,9 @@ if! $remote
: git-rep
:
+ : Test that iteration is still incremented when a non-external package
+ : from a git repository is switched to the same unedited external package.
+ :
if ($git_supported && !$remote)
{
rep = $canonicalize([dir_path] $out_git/state0);
@@ -668,7 +675,7 @@ if! $remote
$rep_add $rep/style.git;
$* 2>!;
- $pkg_status style >"style unpacked 1.0.0";
+ $pkg_status style >"style unpacked 1.0.0 available 1.0.0#1";
$pkg_purge style 2>"purged style/1.0.0"
}
diff --git a/tests/rep-fetch/no-cycle/extra/libbar-1.1.0+1.tar.gz b/tests/rep-fetch/no-cycle/extra/libbar-1.1.0+1.tar.gz
index 890e9e2..8c5c1ee 100644
--- a/tests/rep-fetch/no-cycle/extra/libbar-1.1.0+1.tar.gz
+++ b/tests/rep-fetch/no-cycle/extra/libbar-1.1.0+1.tar.gz
Binary files differ
diff --git a/tests/rep-fetch/no-cycle/math/libbar-1.0.0.tar.gz b/tests/rep-fetch/no-cycle/math/libbar-1.0.0.tar.gz
index 97e6e32..0697d84 100644
--- a/tests/rep-fetch/no-cycle/math/libbar-1.0.0.tar.gz
+++ b/tests/rep-fetch/no-cycle/math/libbar-1.0.0.tar.gz
Binary files differ
diff --git a/tests/rep-fetch/no-cycle/stable/libfoo-1.0.0.tar.gz b/tests/rep-fetch/no-cycle/stable/libfoo-1.0.0.tar.gz
index 5e7fa17..7cb8934 100644
--- a/tests/rep-fetch/no-cycle/stable/libfoo-1.0.0.tar.gz
+++ b/tests/rep-fetch/no-cycle/stable/libfoo-1.0.0.tar.gz
Binary files differ
diff --git a/tests/rep-info.testscript b/tests/rep-info.testscript
index 8ea0e5b..2c02c8f 100644
--- a/tests/rep-info.testscript
+++ b/tests/rep-info.testscript
@@ -13,6 +13,8 @@
# | |-- foo-1.tar.gz
# | `-- repositories.manifest
# |
+# |-- t15 (see pkg-build for details)
+# |
# `-- git
# |-- libbar.git -> style-basic.git (prerequisite)
# `-- style-basic.git
@@ -20,7 +22,7 @@
# Prepare repositories used by tests if running in the local mode.
#
+if! $remote
- rc = $rep_create 2>!
+ rc = [cmdline] $rep_create 2>!
# Create the unsigned 'testing' repository.
#
@@ -35,6 +37,11 @@
$rc --key $key $out/signed &$out/signed/packages.manifest \
&$out/signed/signature.manifest
+ # Create the compatibility repository.
+ #
+ cp -r $src/t15 $out/compatibility
+ $rc $out/compatibility &$out/compatibility/packages.manifest --ignore-unknown
+
# Create git repositories.
#
$git_extract $src/git/libbar.tar
@@ -109,8 +116,12 @@ $* --name $rep/testing >"pkg:build2.org/rep-info/testing ($rep/testing)"
license: MIT
url: http://www.example.org/foo
email: foo-users@example.org
+ bootstrap-build:\
+ project = foo
+
+ \
location: foo-1.tar.gz
- sha256sum: fee330a362a4f87ff42a954aa305b6446d541b7b60000ebcd2fbf68f2b1ae58e
+ sha256sum: 1d88df336611286cdbd84f5c1d87bedc774bc833e200de675e34d9b219c66cfc
EOO
: deep
@@ -121,6 +132,10 @@ $* --name $rep/testing >"pkg:build2.org/rep-info/testing ($rep/testing)"
: dir
:
{
+ # Note that on Windows we still use tar rather than bsdtar here, since
+ # the later fails for dangling symlinks and we have such symlinks in
+ # this repository archive.
+ #
tar ($posix ? : --force-local) -xf $src/git/libbar.tar &state0/***;
$* --type dir "state0/libbar.git" >>~%EOO%d;
@@ -129,12 +144,21 @@ $* --name $rep/testing >"pkg:build2.org/rep-info/testing ($rep/testing)"
version: 1.0.0+1
summary: libbar
license: MIT
- description: \
+ description:\
TODO
\
description-type: text/plain
%.+
+ bootstrap-build:\
+ project = libbar
+
+ using config
+ using version
+ using dist
+
+ \
+ %.+
EOO
rm state0/libbar.git/libbar/README;
@@ -158,23 +182,41 @@ $* --name $rep/testing >"pkg:build2.org/rep-info/testing ($rep/testing)"
version: 1.0.0+1
summary: libbar
license: MIT
- description: \
+ description:\
TODO
\
description-type: text/plain
%.+
+ bootstrap-build:\
+ project = libbar
+
+ using config
+ using version
+ using dist
+
+ \
+ %.+
:
name: libmbar
version: 1.0.0
summary: libmbar
license: MIT
- description: \
+ description:\
TODO
\
description-type: text/plain
%.+
+ bootstrap-build:\
+ project = libmbar
+
+ using config
+ using version
+ using dist
+
+ \
+ %.+
EOO
}
}
@@ -203,8 +245,12 @@ $* --name $rep/testing >"pkg:build2.org/rep-info/testing ($rep/testing)"
license: MIT
url: http://www.example.org/foo
email: foo-users@example.org
+ bootstrap-build:\
+ project = foo
+
+ \
location: foo-1.tar.gz
- sha256sum: fee330a362a4f87ff42a954aa305b6446d541b7b60000ebcd2fbf68f2b1ae58e
+ sha256sum: 1d88df336611286cdbd84f5c1d87bedc774bc833e200de675e34d9b219c66cfc
EOO
}
}
@@ -397,3 +443,60 @@ else
EOO
}
}
+
+: compatibility
+:
+{
+ : packages
+ :
+ {
+ $* --packages $rep/compatibility >>EOO
+
+ libbar/1.0.0
+ libbaz/1.0.0
+ libbiz/1.0.0
+ libfoo/1.0.0
+ EOO
+ }
+
+ : package-manifests-ignore-toolchain
+ :
+ {
+ $* --packages --manifest --ignore-unknown $rep/compatibility >>~%EOO%
+ : 1
+ name: libbar
+ version: 1.0.0
+ %.+
+ depends: * build2 >= 0.16.0
+ depends: * bpkg >= 0.16.0
+ %.+
+ :
+ name: libbaz
+ version: 1.0.0
+ summary: libbaz
+ %.+
+ depends: * build2 >= 65536.0.0
+ depends: * bpkg >= 65536.0.0
+ %.+
+ :
+ name: libbiz
+ version: 1.0.0
+ summary: libbiz
+ %.+
+ :
+ name: libfoo
+ version: 1.0.0
+ summary: libfoo
+ %.+
+ EOO
+ }
+
+ : package-manifests-fail
+ :
+ {
+ $* --packages --manifest $rep/compatibility 2>>~%EOE% != 0
+ error: unable to satisfy constraint (build2 >= 65536.0.0) for package libbaz
+ % info: available build2 version is .+%
+ EOE
+ }
+}
diff --git a/tests/rep-info/t15 b/tests/rep-info/t15
new file mode 120000
index 0000000..c7ad857
--- /dev/null
+++ b/tests/rep-info/t15
@@ -0,0 +1 @@
+../common/compatibility/t15 \ No newline at end of file
diff --git a/tests/rep-list.testscript b/tests/rep-list.testscript
index 3e9cfb7..50a6501 100644
--- a/tests/rep-list.testscript
+++ b/tests/rep-list.testscript
@@ -37,7 +37,7 @@
cp -r $src/stable $out/stable
cp -r $src/testing $out/testing
- c = $rep_create 2>!
+ c = [cmdline] $rep_create 2>!
$c $out/extra &$out/extra/packages.manifest
$c $out/math &$out/math/packages.manifest
diff --git a/tests/rep-list/extra/libbar-1.1.0+1.tar.gz b/tests/rep-list/extra/libbar-1.1.0+1.tar.gz
index 890e9e2..b761fa7 100644
--- a/tests/rep-list/extra/libbar-1.1.0+1.tar.gz
+++ b/tests/rep-list/extra/libbar-1.1.0+1.tar.gz
Binary files differ
diff --git a/tests/rep-list/math/libbar-1.0.0.tar.gz b/tests/rep-list/math/libbar-1.0.0.tar.gz
index 97e6e32..425315a 100644
--- a/tests/rep-list/math/libbar-1.0.0.tar.gz
+++ b/tests/rep-list/math/libbar-1.0.0.tar.gz
Binary files differ
diff --git a/tests/rep-list/stable/libfoo-1.0.0.tar.gz b/tests/rep-list/stable/libfoo-1.0.0.tar.gz
index 5e7fa17..1b030ab 100644
--- a/tests/rep-list/stable/libfoo-1.0.0.tar.gz
+++ b/tests/rep-list/stable/libfoo-1.0.0.tar.gz
Binary files differ
diff --git a/tests/rep-list/testing/libbar-2.0.0.tar.gz b/tests/rep-list/testing/libbar-2.0.0.tar.gz
index 6cc5890..55cd8bd 100644
--- a/tests/rep-list/testing/libbar-2.0.0.tar.gz
+++ b/tests/rep-list/testing/libbar-2.0.0.tar.gz
Binary files differ
diff --git a/tests/rep-remove.testscript b/tests/rep-remove.testscript
index e6f3d91..92a974b 100644
--- a/tests/rep-remove.testscript
+++ b/tests/rep-remove.testscript
@@ -41,7 +41,7 @@
cp -r $src/testing $out/testing
cp -r $src/alpha $out/alpha
- c = $rep_create 2>!
+ c = [cmdline] $rep_create 2>!
$c $out/extra &$out/extra/packages.manifest
$c $out/math &$out/math/packages.manifest
diff --git a/tests/rep-remove/alpha/libbar-2.0.0.tar.gz b/tests/rep-remove/alpha/libbar-2.0.0.tar.gz
index 6cc5890..576b562 100644
--- a/tests/rep-remove/alpha/libbar-2.0.0.tar.gz
+++ b/tests/rep-remove/alpha/libbar-2.0.0.tar.gz
Binary files differ
diff --git a/tests/rep-remove/testing/libbar-2.0.0.tar.gz b/tests/rep-remove/testing/libbar-2.0.0.tar.gz
index 6cc5890..f2d26fe 100644..120000
--- a/tests/rep-remove/testing/libbar-2.0.0.tar.gz
+++ b/tests/rep-remove/testing/libbar-2.0.0.tar.gz
Binary files differ