From 4f090a8ad567dbf22941a7dd2b8005de104aa181 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Fri, 9 Aug 2024 22:27:01 -0400 Subject: [PATCH 01/38] push --- .gitlab/bin/push.sh | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/.gitlab/bin/push.sh b/.gitlab/bin/push.sh index e93101a..c425216 100755 --- a/.gitlab/bin/push.sh +++ b/.gitlab/bin/push.sh @@ -8,7 +8,7 @@ set -eu -o pipefail readonly APORTSDIR=$CI_PROJECT_DIR -readonly REPOS="backports user" +readonly REPOS="ilot" readonly BASEBRANCH=$CI_MERGE_REQUEST_TARGET_BRANCH_NAME export GIT_SSH_COMMAND="ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no" @@ -20,19 +20,19 @@ chmod 700 "$HOME"/.ssh/id_rsa chmod 700 "$HOME"/.abuild/$ABUILD_KEY_NAME.rsa echo "PACKAGER_PRIVKEY=$HOME/.abuild/$ABUILD_KEY_NAME.rsa" > $HOME/.abuild/abuild.conf -echo "REPODEST=$HOME/repo-apk" >> $HOME/.abuild/abuild.conf +echo "REPODEST=$HOME/apk" >> $HOME/.abuild/abuild.conf sudo cp $HOME/.abuild/$ABUILD_KEY_NAME.rsa.pub /etc/apk/keys/. -if [ -d $HOME/repo-apk ]; then - git -C $HOME/repo-apk fetch - git -C $HOME/repo-apk checkout $BASEBRANCH - git -C $HOME/repo-apk pull --rebase +if [ -d $HOME/apk ]; then + git -C $HOME/apk fetch + git -C $HOME/apk checkout $BASEBRANCH + git -C $HOME/apk pull --rebase else - git clone git@lab.ilot.io:ayakael/repo-apk -b $BASEBRANCH $HOME/repo-apk + git clone git@lab.ilot.io:ilot/apk -b $BASEBRANCH $HOME/apk fi for i in $(find packages -type f -name "*.apk"); do - install -vDm644 $i ${i/packages/$HOME\/repo-apk} + install -vDm644 $i ${i/packages/$HOME\/apk} done fetch_flags="-qn" @@ -40,17 +40,17 @@ git fetch $fetch_flags "$CI_MERGE_REQUEST_PROJECT_URL" \ "+refs/heads/$BASEBRANCH:refs/heads/$BASEBRANCH" for repo in $(changed_repos); do - rm $HOME/repo-apk/$repo/*/APKINDEX.tar.gz | true + rm $HOME/apk/$repo/*/APKINDEX.tar.gz | true mkdir -p $repo/DUMMY echo "pkgname=DUMMY" > $repo/DUMMY/APKBUILD cd $repo/DUMMY - for i in $(find $HOME/repo-apk/$repo -maxdepth 1 -mindepth 1 -printf '%P '); do + for i in $(find $HOME/apk/$repo -maxdepth 1 -mindepth 1 -printf '%P '); do CHOST=$i abuild index done cd "$CI_PROJECT_DIR" rm -R $repo/DUMMY done -git -C $HOME/repo-apk add . -git -C $HOME/repo-apk commit -m "Update from $CI_MERGE_REQUEST_IID - $CI_MERGE_REQUEST_TITLE" -git -C $HOME/repo-apk push +git -C $HOME/apk add . +git -C $HOME/apk commit -m "Update from $CI_MERGE_REQUEST_IID - $CI_MERGE_REQUEST_TITLE" +git -C $HOME/apk push From 649649303004a6ca85dc054cc3a0f753a336f66e Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Fri, 9 Aug 2024 22:27:50 -0400 Subject: [PATCH 02/38] archives/gitaly: new aport --- archives/gitaly/APKBUILD | 86 ++++++++++++++++++++++++++++++++++ archives/gitaly/config.patch | 91 ++++++++++++++++++++++++++++++++++++ archives/gitaly/gitaly.initd | 39 ++++++++++++++++ 3 files changed, 216 insertions(+) create mode 100644 archives/gitaly/APKBUILD create mode 100644 archives/gitaly/config.patch create mode 100644 archives/gitaly/gitaly.initd diff --git a/archives/gitaly/APKBUILD b/archives/gitaly/APKBUILD new file mode 100644 index 0000000..1b400ca --- /dev/null +++ b/archives/gitaly/APKBUILD @@ -0,0 +1,86 @@ +# Maintainer: Antoine Martin (ayakael) +# Contributor: Antoine Martin (ayakael) +# Contributor: Jakub Jirutka +pkgname=gitaly +pkgver=17.0.4 +pkgrel=0 +pkgdesc="A Git RPC service for handling all the git calls made by GitLab" +url="https://gitlab.com/gitlab-org/gitaly/" +arch="all" +# GPL-2.0-only WITH GCC-exception-2.0: bundled libgit2 +license="MIT AND GPL-2.0-only WITH GCC-exception-2.0" +depends=" + git>=2.42 + " +makedepends=" + bash + cmake + go + icu-dev + libssh2-dev + libxml2-dev + libxslt-dev + " +subpackages=" + $pkgname-backup + $pkgname-blackbox + $pkgname-praefect + $pkgname-openrc + " +source="https://gitlab.com/gitlab-org/gitaly/-/archive/v$pkgver/gitaly-v$pkgver.tar.gz + config.patch + $pkgname.initd + " +builddir="$srcdir/$pkgname-v$pkgver" +options="!check" + +build() { + make V=1 BUILD_TAGS="tracer_static tracer_static_jaeger" +} + +package() { + ## Go part + + make install DESTDIR="$pkgdir" PREFIX=/usr + + # Not very useful for us. + rm "$pkgdir"/usr/bin/gitaly-debug + rm "$pkgdir"/usr/bin/gitaly-wrapper + + install -m644 -D config.toml.example "$pkgdir"/etc/gitlab/gitaly.toml + install -m644 -D config.praefect.toml.example "$pkgdir"/etc/gitlab/praefect.toml + install -m644 -D cmd/gitaly-blackbox/config.toml.example "$pkgdir"/etc/gitlab/gitaly-blackbox.toml + + install -m755 -D "$srcdir"/gitaly.initd "$pkgdir"/etc/init.d/gitlab.gitaly +} + +backup() { + pkgdesc="Utility used by the backup Rake task to create/restore repository backups from Gitaly" + depends="" + + amove usr/bin/gitaly-backup +} + +# TODO: Add init script. +blackbox() { + pkgdesc="Prometheus exporter that measures GitLab server performance by performing a Git HTTP clone" + depends="" + + amove etc/gitlab/gitaly-blackbox.toml + amove usr/bin/gitaly-blackbox +} + +# TODO: Add init script. +praefect() { + pkgdesc="A reverse-proxy for Gitaly to manage a cluster of Gitaly nodes for HA" + depends="" + + amove etc/gitlab/praefect.toml + amove usr/bin/praefect +} + +sha512sums=" +2d06498c519c20804dd592cac3214cf8124ece1dda0d15342f8ccc6d9c9d2715dad24f9940e4d87b824320483c9882004bcef3747a8de347c1d48ec983a9f5cb gitaly-v17.0.4.tar.gz +7685330e637c3a34db941c9e6b8776d0611ec16297e8be998a3eb4716c455d9f015d433a4d27720c24e520d489dd56bdab7c0e4264f2852b4b0bfd6ecaa7f773 config.patch +c32105d921be16eaf559cf21d6840bc346cd92b5e37974cedecdb5a2d2ca1eb5e8fbb144f5fc8a1289bf9415102b313cf2d61ee510c80f08ab33a799f5ac7122 gitaly.initd +" diff --git a/archives/gitaly/config.patch b/archives/gitaly/config.patch new file mode 100644 index 0000000..9df7db0 --- /dev/null +++ b/archives/gitaly/config.patch @@ -0,0 +1,91 @@ +diff --git a/config.toml.example.orig b/config.toml.example +index 82b8502..9982087 100644 +--- a/config.toml.example.orig ++++ b/config.toml.example +@@ -2,19 +2,24 @@ + # For Gitaly documentation, see https://docs.gitlab.com/ee/administration/gitaly/. + + # A path which Gitaly should open a Unix socket. +-socket_path = "/home/git/gitlab/tmp/sockets/private/gitaly.socket" ++socket_path = "/run/gitlab/gitaly.socket" + + # Directory containing Gitaly executables. +-bin_dir = "/home/git/gitaly/_build/bin" ++bin_dir = "/usr/bin" + + # # Optional. The directory where Gitaly can create all files required to + # # properly operate at runtime. If not set, Gitaly will create a directory in + # # the global temporary directory. This directory must exist. +-# runtime_dir = "/home/git/gitaly/run" ++runtime_dir = "/run/gitaly" + + # # Optional if socket_path is set. TCP address for Gitaly to listen on. This is insecure (unencrypted connection). + # listen_addr = "localhost:9999" + ++# # Optional: configure where the Gitaly creates the sockets for internal connections. If unset, Gitaly will create a randomly ++# # named temp directory each time it boots. ++# # Non Gitaly clients should never connect to these sockets. ++internal_socket_dir = "/run/gitaly/internal" ++ + # # Optional. TCP over TLS address for Gitaly to listen on. + # tls_listen_addr = "localhost:8888" + +@@ -35,9 +40,9 @@ bin_dir = "/home/git/gitaly/_build/bin" + # # Gitaly supports TLS encryption. You must bring your own certificates because this isn’t provided automatically. + # [tls] + # # Path to the certificate. +-# certificate_path = '/home/git/cert.cert' ++# certificate_path = '/etc/gitlab/ssl/gitaly.crt' + # # Path to the key. +-# key_path = '/home/git/key.pem' ++# key_path = '/etc/gitlab/ssl/gitaly.key' + + # # Git settings + # [git] +@@ -58,7 +63,7 @@ bin_dir = "/home/git/gitaly/_build/bin" + # # The name of the storage + name = "default" + # # The path to the storage. +-path = "/home/git/repositories" ++path = "/var/lib/gitlab/repositories" + + # # You can optionally configure more storages for this Gitaly instance to serve up + # +@@ -70,12 +75,12 @@ path = "/home/git/repositories" + # # Optional. Configure Gitaly to output JSON-formatted log messages to stdout. + # [logging] + # # Directory where Gitaly stores extra log files. +-dir = "/home/git/gitlab/log" ++dir = "/vat/log/gitlab" + # # Log format. Either 'text' or 'json'. +-# format = "json" ++format = "text" + # # Optional. Set log level to only log entries with that severity or above. + # # Valid values are, in order, 'debug', 'info', 'warn', 'error', 'fatal', and 'panic'. Defaults to 'info'. +-# level = "warn" ++level = "warn" + # # Additionally, exceptions from the Go server can be reported to Sentry. Sentry DSN (Data Source Name) + # # for exception monitoring. + # sentry_dsn = "https://:@sentry.io/" +@@ -91,18 +96,18 @@ sentry_environment = "" + # # Custom Git hooks that are used to perform tasks based on changes performed in any repository. + [hooks] + # # Directory where custom Git hooks are installed. If left unset, no custom hooks are used. +-custom_hooks_dir = "/home/git/custom_hooks" ++custom_hooks_dir = "/etc/gitlab/custom_hooks" + + # # Gitaly must connect to the GitLab application to perform access checks when a user performs a change. + [gitlab] + # # URL of the GitLab server. +-url = "http+unix://%2Fhome%2Fgit%2Fgitlab%2Ftmp%2Fsockets%2Fgitlab-workhorse.socket" ++url = "http+unix://%2Frun%2Fgitlab%2Fworkhorse.socket" + # # 'relative_url_root' is only needed if a UNIX socket is used in 'url' and GitLab is configured to + # # use a relative path. For example, '/gitlab'. + # relative_url_root = '/' + # # Path of the file containing the secret token used to authenticate with GitLab. Use either 'secret_token' or 'secret' + # # but not both. +-secret_file = "/home/git/gitlab-shell/.gitlab_shell_secret" ++secret_file = "/etc/gitlab/gitlab_shell_secret" + # # Secret token used to authenticate with GitLab. + # secret = "" + diff --git a/archives/gitaly/gitaly.initd b/archives/gitaly/gitaly.initd new file mode 100644 index 0000000..290c922 --- /dev/null +++ b/archives/gitaly/gitaly.initd @@ -0,0 +1,39 @@ +#!/sbin/openrc-run + +name="Gitaly" +description="A Git RPC service for handling all the git calls made by GitLab" + +: ${gitaly_config:="/etc/gitlab/gitaly.toml"} +: ${gitaly_logfile:="/var/log/gitlab/gitaly.log"} + +command="/usr/bin/gitaly" +command_args="$gitaly_config" +command_background="yes" +command_user="git" + +output_log="$gitaly_logfile" +error_log="$gitaly_logfile" +pidfile="/run/gitaly.pid" +supervise_daemon_args="--env TZ=:/etc/localtime" +start_stop_daemon_args="$supervise_daemon_args" + +rc_ulimit="-n 15000" + +required_files="$gitaly_config" + +depend() { + use net +} + +start_pre() { + local socket_path=$(sed -En "s/^\s*socket_path\s*=\s*[\"']([^\"']+)[\"']/\1/p" "$gitaly_config") + local runtime_dir=$(sed -En "s/^\s*runtime_dir\s*=\s*[\"']([^\"']+)[\"']/\1/p" "$gitaly_config") + + if [ "$socket_path" ]; then + checkpath -q -d -m 755 -o $command_user "${socket_path%/*}" || return 1 + fi + if [ "$runtime_dir" ]; then + checkpath -q -d -m 750 -o $command_user "$runtime_dir" || return 1 + fi + checkpath -f -m 640 -o $command_user "$gitaly_logfile" +} From 8403bcb5345aa66a154089454b573576a516091e Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Fri, 9 Aug 2024 22:27:52 -0400 Subject: [PATCH 03/38] archives/gitlab-foss: new aport --- archives/gitlab-foss/APKBUILD | 375 ++++++++++++++++++ archives/gitlab-foss/bin-wrapper.in | 15 + archives/gitlab-foss/database-config.patch | 66 +++ archives/gitlab-foss/gitlab-foss.post-install | 108 +++++ archives/gitlab-foss/gitlab-foss.post-upgrade | 1 + archives/gitlab-foss/gitlab-foss.pre-install | 53 +++ archives/gitlab-foss/gitlab-rails.confd | 20 + archives/gitlab-foss/gitlab.confd | 85 ++++ archives/gitlab-foss/gitlab.initd | 50 +++ archives/gitlab-foss/gitlab.logrotate | 24 ++ archives/gitlab-foss/gitlab.mailroom.initd | 40 ++ archives/gitlab-foss/gitlab.rails.initd | 119 ++++++ archives/gitlab-foss/gitlab.sidekiq.initd | 76 ++++ archives/gitlab-foss/gitlab.workhorse.initd | 75 ++++ .../upgrade-sys-filesystem-depend.patch | 35 ++ 15 files changed, 1142 insertions(+) create mode 100644 archives/gitlab-foss/APKBUILD create mode 100644 archives/gitlab-foss/bin-wrapper.in create mode 100644 archives/gitlab-foss/database-config.patch create mode 100644 archives/gitlab-foss/gitlab-foss.post-install create mode 120000 archives/gitlab-foss/gitlab-foss.post-upgrade create mode 100644 archives/gitlab-foss/gitlab-foss.pre-install create mode 100644 archives/gitlab-foss/gitlab-rails.confd create mode 100644 archives/gitlab-foss/gitlab.confd create mode 100644 archives/gitlab-foss/gitlab.initd create mode 100644 archives/gitlab-foss/gitlab.logrotate create mode 100644 archives/gitlab-foss/gitlab.mailroom.initd create mode 100644 archives/gitlab-foss/gitlab.rails.initd create mode 100644 archives/gitlab-foss/gitlab.sidekiq.initd create mode 100644 archives/gitlab-foss/gitlab.workhorse.initd create mode 100644 archives/gitlab-foss/upgrade-sys-filesystem-depend.patch diff --git a/archives/gitlab-foss/APKBUILD b/archives/gitlab-foss/APKBUILD new file mode 100644 index 0000000..21331a4 --- /dev/null +++ b/archives/gitlab-foss/APKBUILD @@ -0,0 +1,375 @@ +# Maintainer: Antoine Martin (ayakael) +# Contributor: Jakub Jirutka +# Contributor: Antoine Martin (ayakael) +pkgname=gitlab-foss +_pkgname=${pkgname%-foss} +pkgver=17.0.4 +_gittag=v$pkgver +pkgrel=0 +pkgdesc="A version control for your server" +url="https://gitlab.com/gitlab-org/gitlab-foss" +arch="x86_64 aarch64" +license="MIT" +# ruby-irb is needed only for Rails console (gitlab-rails console) +depends=" + $pkgname-assets=$pkgver-r$pkgrel + ca-certificates + cmd:dpkg-deb + exiftool + git>=2.42.0 + gitaly~=17.0 + gitlab-shell>=14.35 + graphicsmagick + http-parser + procps + py-docutils + python3 + redis>=2.8 + ruby3.2 + ruby3.2-bigdecimal + ruby3.2-bundler + ruby3.2-fiddle + ruby3.2-io-console + ruby3.2-irb + ruby3.2-json + ruby3.2-rake + ruby3.2-rdoc + ruby3.2-webrick + shared-mime-info + tzdata + " +makedepends=" + cargo + clang-dev + cmd:chrpath + cmake + file-dev + go + gpgme-dev + icu-dev + libffi-dev + libgcrypt-dev + libpq-dev + libxml2-dev + libxslt-dev + linux-headers + llvm + nodejs + openssl-dev + protobuf-dev + re2-dev + ruby3.2-dev + rust + yarn>=1.2.0 + " +pkgusers="git" +pkggroups="git www-data" +install="$pkgname.pre-install $pkgname.post-install $pkgname.post-upgrade" +subpackages="$pkgname-assets::noarch $pkgname-openrc" +source="https://gitlab.com/gitlab-org/gitlab-foss/-/archive/$_gittag/gitlab-foss-$_gittag.tar.gz + database-config.patch + $_pkgname.initd + $_pkgname.mailroom.initd + $_pkgname.rails.initd + $_pkgname.sidekiq.initd + $_pkgname.workhorse.initd + $_pkgname.confd + $_pkgname.logrotate + bin-wrapper.in + upgrade-sys-filesystem-depend.patch + " +builddir="$srcdir/gitlab-foss-$_gittag" + +_prefix="usr/lib/bundles/$_pkgname" + +export BUNDLE_DEPLOYMENT=true +export BUNDLE_FORCE_RUBY_PLATFORM=true +export BUNDLE_FROZEN=true +# Should be tied to $JOBS, but rust native code fails to build +export BUNDLE_JOBS=1 + +prepare() { + default_prepare + + # The default log level is very chatty. + sed -i 's/^\(\s*config.log_level\s*=\).*$/\1 :warn/' \ + config/environments/production.rb + + # This is not needed, the secret_token is generated by the + # gitlab-shell package. It also makes problems in the build phase. + rm config/initializers/gitlab_shell_secret_token.rb + + # Remove all locale files except en. + find locale -type d -mindepth 1 ! -name en -exec rm -rf {} + + + # Allow use of any bundler + sed -i -e '/BUNDLED/,+1d' Gemfile.lock +} + +build() { + local bundle_without='exclude development kerberos mysql test' + + cd "$builddir"/workhorse + + make + + cd "$builddir" + + msg "Installing Ruby gems..." + bundle config --local without "$bundle_without" + bundle config --local build.ffi --enable-system-libffi + bundle config --local build.gpgme --use-system-libraries + bundle config --local build.re2 --enable-system-libraries + bundle config --local build.nokogiri --use-system-libraries \ + --with-xml2-include=/usr/include/libxml2 \ + --with-xslt-include=/usr/include/libxslt + bundle config --local build.ruby-magic --enable-system-libraries + bundle config --local build.google-protobuf '-- --with-cflags=-D__va_copy=va_copy' + bundle config --local path "vendor/bundle" + + bundle install --no-cache + + # Replace bundled CA bundle with symlink. + ( + cd vendor/bundle/ruby/*/gems/aws-sdk-core-*/ + rm ca-bundle.crt + ln -s /etc/ssl/certs/ca-certificates.crt ca-bundle.crt + ) + + # Remove faulty RPATH. + chrpath -d vendor/bundle/ruby/*/extensions/*/*/ruby-magic-*/magic/magic.so + + # Patch installed gem gitlab-markup to use python3. + # Option "-S" causes that Python cannot find docutils module. + sed -i 's/python2 -S/python3/g' \ + vendor/bundle/ruby/*/gems/gitlab-markup-*/lib/github/markups.rb + + # Remove non-sense require of code for tests from top-level module + # (we're gonna delete tests from the package). + sed -i '/require .carrierwave\/test\/matchers./d' \ + vendor/bundle/ruby/*/gems/carrierwave-*/lib/carrierwave.rb + + msg "Installing npm modules..." + yarn install --production --frozen-lockfile + + # Since we have moved assets gems into a group, they are not implicitly + # loaded by default. This will be reverted after compiling assets. + sed -i.bak '/Bundler.require(\*Rails.groups/s/)/, :assets)/' \ + config/application.rb + + # assets:precompile and gettext:compile bootstraps the app, + # so they needs configs. + cp config/gitlab.yml.example config/gitlab.yml + cp config/database.yml.postgresql config/database.yml + cp config/secrets.yml.example config/secrets.yml + + # The configured path is not readable for the user building + # the package, so we must remove it; GitLab will use the default path. + sed -i '/^\s*secret_file:.*/d' config/gitlab.yml + + ( + export NODE_ENV=production + export RAILS_ENV=production + export SKIP_STORAGE_VALIDATION=true + export USE_DB=false + export NO_SOURCEMAPS=true + export NODE_OPTIONS="--max_old_space_size=3584" + + msg "Compiling GetText PO files..." + bundle exec rake gettext:compile + + msg "Compiling assets (this will take few minutes)..." + bundle exec rake gitlab:assets:compile + ) + + # Revert changes. + mv config/application.rb.bak config/application.rb + + msg "Cleaning assets gems..." + bundle config --local without 'exclude development kerberos mysql test assets' + bundle clean + + # Create executables in bin/*. + # See also https://github.com/bundler/bundler/issues/6149. + bundle binstubs --force bundler gitlab-mail_room puma sidekiq + + # Cleanup + rm config/database.yml config/gitlab.yml config/secrets.yml +} + +package() { + local destdir="$pkgdir/$_prefix" + local datadir="$pkgdir/var/lib/gitlab" + local file dest + + install -d -m755 "$destdir" "$destdir"/bin + + install -d -m755 -o git -g git \ + "$datadir" \ + "$pkgdir"/etc/gitlab \ + "$pkgdir"/var/log/gitlab \ + "$datadir"/pages + + install -d -m700 -o git -g git \ + "$datadir"/artifacts \ + "$datadir"/builds \ + "$datadir"/ci_secure_files \ + "$datadir"/dependency_proxy \ + "$datadir"/encrypted_settings \ + "$datadir"/external-diffs \ + "$datadir"/lfs-objects \ + "$datadir"/packages \ + "$datadir"/pages \ + "$datadir"/terraform_state \ + "$datadir"/uploads + + install -d -m0750 -o git -g www-data \ + "$datadir"/pages + + install -d -m02770 -o git -g git \ + "$datadir"/repositories + + # Install application files. + # Note: *VERSION files and doc directory are required (Help in GitLab + # menu refers to the doc directory). + cp -rl .bundle config.ru Gemfile* INSTALLATION_TYPE Rakefile ./*VERSION \ + app data db doc fixtures config lib locale metrics_server public sidekiq_cluster vendor gems \ + "$destdir"/ + + install -m755 -t "$destdir"/bin/ \ + bin/bundle \ + bin/mail_room \ + bin/metrics-server \ + bin/rails \ + bin/rake \ + bin/sidekiq \ + bin/sidekiq-cluster \ + bin/sidekiqmon \ + bin/puma + + cd "$destdir" + + # Not needed in runtime since we have already compiled all assets. + rm -r app/assets + rm -r vendor/assets + find public/assets -name '*.vue' -delete + find public/assets -type d -exec rmdir --ignore-fail-on-non-empty '{}' \; + # These load gems in the assets group. + rm config/initializers/sprockets.rb + + # Remove more stuff not neeeded in production. + rm -r lib/support + rm -r db/fixtures/development + find lib/tasks -maxdepth 1 -type f ! -name cache.rake ! -name setup.rake -delete + find lib/tasks/gitlab \( -name 'generate_docs.*' \ + -o -name 'shell.*' \ + -o -name 'test.*' \) -delete + + + cd "$destdir"/vendor/bundle/ruby/*/ + + # Remove tests, documentations and other useless files. + find gems/ \( -name 'doc' \ + -o -name 'spec' \ + -o -name 'test' \) \ + -type d -maxdepth 2 -exec rm -fr "{}" + + find gems/ \( -name 'README*' \ + -o -name 'CHANGELOG*' \ + -o -name 'CONTRIBUT*' \ + -o -name '*LICENSE*' \ + -o -name 'Rakefile' \ + -o -name '.*' \) \ + -type f -delete + + # Remove bundled libgit2 sources. + rm -r gems/rugged-*/vendor/libgit2 + + # Remove assets, they are already compiled. + rm -r gems/tanuki_emoji-*/app/assets + + # Remove build logs and cache. + rm -rf build_info/ cache/ + find extensions/ \( -name gem_make.out -o -name mkmf.log \) -delete + + + cd "$destdir" + + # Install and symlink config files. + for file in cable.yml.example \ + database.yml.postgresql \ + gitlab.yml.example \ + puma.rb.example \ + resque.yml.example \ + sidekiq.yml.example \ + initializers/smtp_settings.rb.sample + do + dest="$(basename "${file%.*}")" + install -m640 -g git -D config/$file "$pkgdir"/etc/gitlab/$dest + ln -sf /etc/gitlab/$dest "$pkgdir"/$_prefix/config/${file%.*} + done + + # This file will be generated by the post-install script, just prepare symlink. + ln -sf /etc/gitlab/secrets.yml config/secrets.yml + # These shouldn't be necessary, they are all configurable, but OmniBus + # creates them too, so just to be sure... + ln -sf /etc/gitlab/gitlab_kas_secret .gitlab_kas_secret + ln -sf /etc/gitlab/gitlab_pages_secret .gitlab_pages_secret + ln -sf /etc/gitlab/gitlab_shell_secret .gitlab_shell_secret + ln -sf /etc/gitlab/gitlab_workhorse_secret .gitlab_workhorse_secret + + # Some paths are hard-coded in GitLab, so we must make symlinks. :( + ln -sf /var/lib/gitlab/uploads public/uploads + ln -sf /var/log/gitlab log + ln -sf /var/tmp/gitlab tmp + + cat > "$datadir"/.profile <<-EOF + export RAILS_ENV=production + export NODE_ENV=production + export EXECJS_RUNTIME=Disabled + EOF + + # Install wrapper scripts to /usr/bin. + local name; for name in rake rails; do + sed "s/__COMMAND__/$name/g" "$srcdir"/bin-wrapper.in \ + > "$builddir"/gitlab-$name + install -m755 -D "$builddir"/gitlab-$name "$pkgdir"/usr/bin/gitlab-$name + done + + + cd "$builddir"/workhorse + + # Install workhorse. + make install DESTDIR="$pkgdir" PREFIX=/usr + install -m644 config.toml.example "$pkgdir"/etc/gitlab/workhorse.toml + + + for file in $_pkgname $_pkgname.rails $_pkgname.sidekiq $_pkgname.mailroom $_pkgname.workhorse; do + install -m755 -D "$srcdir"/$file.initd "$pkgdir"/etc/init.d/$file + done + + install -m644 -D "$srcdir"/$_pkgname.confd \ + "$pkgdir"/etc/conf.d/$_pkgname + + install -m644 -D "$srcdir"/$_pkgname.logrotate \ + "$pkgdir"/etc/logrotate.d/$_pkgname +} + +assets() { + depends="" + + amove $_prefix/public/assets +} + +sha512sums=" +e09cfbbe4237f42bd8509c551031fd3526b75762beae7dac5164ecc4056ae07890a3ddb8500f1573f0ca9d697150654d1fcab3b3d0a3b93e5382addcee298c5b gitlab-foss-v17.0.4.tar.gz +daa496f3d9146f9dbddff62477bf49d5c7bd2f2a4cdbadc70ee51c8230f3ef01dc950ef157154b31c7e7bef0beecc5cbac50fbac65a79d6d9099b27bcba8b2ab database-config.patch +80d9bf2d064c1d4310566e087e14220e075430c46d9a6c4641c1141fbdc05381ae14a3ae7dfcb7dcb75dbf7af17a136f81764c7a4d109f248a81033782dce23b gitlab.initd +1f451b67a5d5e58650b0fe862a2b65cfb8bff5502b37d94ae90619c1ff9affbecf24428303a2849bebce5f94bef37078f0e5710e344bbab616134e910938384a gitlab.mailroom.initd +d8cdeb54c46f8204936bf5750833649e4586d3dd1942eed45955ed1661ae5f5080f59184fcb59a8f73c1405faccbf02b3db3d2c12fc2a4a81424cd35ce390768 gitlab.rails.initd +cb4ec100f0ea7ffcbb37aead8423e636629e2f4848b2974a7b2468e96cb1081ca732ac336417b08dd943afb961df888c73af1334dcbe054dfd361e74f492fd86 gitlab.sidekiq.initd +85c4e257a030832bd70ad1e257ae7cb568b31e01201fc845abac02d00f02492ca694be1fa2bf743dd8c8623e6a79d36adee3f4de02040134c11158a6001c064b gitlab.workhorse.initd +4dc00b16462f30591297fcb535fc364185d3ed76e9956597f0423a8dfd8a9a351f6ac29d9f0c73052c11324fba4768eb89a21c6bef4da99f15baaea8c9ab8407 gitlab.confd +57f258246925fbef0780caebdf005983c72fe3db1ab3242a1e00137bd322f5ec6c0fd958db7178b8fc22103d071f550d6f71f08422bcd9e859d2a734b2ecef00 gitlab.logrotate +a944c3886388ba1574bf8c96b6de4d9f24ef4a83f553c31a224e17a3b01f2a5c65b60c59b7ed7ca4b25670c60ea8dd41b96a8a623d909d2bb09bdf2520ed7f23 bin-wrapper.in +0eaa7de9a906ddb0fe84b7afbaec893a134bbbdb9e71da75cf4095ef40404643e51447aee88d3cad6e565bc709b34ffd8901cc93061e4a2a410838aed42d3644 upgrade-sys-filesystem-depend.patch +" diff --git a/archives/gitlab-foss/bin-wrapper.in b/archives/gitlab-foss/bin-wrapper.in new file mode 100644 index 0000000..aa1d411 --- /dev/null +++ b/archives/gitlab-foss/bin-wrapper.in @@ -0,0 +1,15 @@ +#!/bin/sh + +BUNDLE_DIR='/usr/lib/bundles/gitlab' +export RAILS_ENV='production' +export NODE_ENV='production' +export EXECJS_RUNTIME='Disabled' + +cd $BUNDLE_DIR +install -m 700 -o git -g git -d "$(readlink ./tmp)" + +if [ "$(id -un)" != 'git' ]; then + exec su git -c '"$0" "$@"' -- bin/__COMMAND__ "$@" +else + exec bin/__COMMAND__ "$@" +fi diff --git a/archives/gitlab-foss/database-config.patch b/archives/gitlab-foss/database-config.patch new file mode 100644 index 0000000..9b113e1 --- /dev/null +++ b/archives/gitlab-foss/database-config.patch @@ -0,0 +1,66 @@ +diff --git a/config/database.yml.postgresql.orig b/config/database.yml.postgresql +index da9f458..2d6d44e 100644 +--- a/config/database.yml.postgresql.orig ++++ b/config/database.yml.postgresql +@@ -26,13 +26,6 @@ production: + username: git + password: "secure password" + host: localhost +- geo: +- adapter: postgresql +- encoding: unicode +- database: gitlabhq_geo_production +- username: git +- password: "secure password" +- host: localhost + + # + # Development specific +@@ -57,13 +50,6 @@ development: + host: localhost + variables: + statement_timeout: 15s +- geo: +- adapter: postgresql +- encoding: unicode +- database: gitlabhq_geo_development +- username: postgres +- password: "secure password" +- host: localhost + + # + # Staging specific +@@ -84,13 +70,6 @@ staging: + username: git + password: "secure password" + host: localhost +- geo: +- adapter: postgresql +- encoding: unicode +- database: gitlabhq_geo_staging +- username: git +- password: "secure password" +- host: localhost + + # Warning: The database defined as "test" will be erased and + # re-generated from your development database when you run "rake". +@@ -119,19 +98,3 @@ test: &test + reaping_frequency: nil + variables: + statement_timeout: 15s +- geo: +- adapter: postgresql +- encoding: unicode +- database: gitlabhq_geo_test +- username: postgres +- password: +- host: localhost +- reaping_frequency: nil +- embedding: +- adapter: postgresql +- encoding: unicode +- database: gitlabhq_embedding_test +- username: postgres +- password: +- host: localhost +- reaping_frequency: nil diff --git a/archives/gitlab-foss/gitlab-foss.post-install b/archives/gitlab-foss/gitlab-foss.post-install new file mode 100644 index 0000000..65d05cc --- /dev/null +++ b/archives/gitlab-foss/gitlab-foss.post-install @@ -0,0 +1,108 @@ +#!/bin/sh +set -eu + +group='git' +data_dir='/var/lib/gitlab' +secrets_file='/etc/gitlab/secrets.yml' +shell_secret_file='/etc/gitlab/gitlab_shell_secret' +workhorse_secret_file='/etc/gitlab/gitlab_workhorse_secret' +kas_secret_file='/etc/gitlab/gitlab_kas_secret' + +gen_random_b64() { + local bits="$1" + ruby <<-EOF + require 'securerandom' + require 'base64' + puts Base64.strict_encode64(SecureRandom.random_bytes($bits)) + EOF +} + + +echo "* Checking $secrets_file" >&2 + +ruby <<-EOF + require 'openssl' + require 'securerandom' + require 'yaml' + + secrets_file = '$secrets_file' + changed = false + + secrets = YAML.load_file(secrets_file) if File.exist?(secrets_file) + secrets ||= {} + prod = secrets['production'] ||= {} + prod['db_key_base'] ||= ( changed = true; SecureRandom.hex(64) ) + prod['secret_key_base'] ||= ( changed = true; SecureRandom.hex(64) ) + prod['otp_key_base'] ||= ( changed = true; SecureRandom.hex(64) ) + prod['encrypted_settings_key_base'] ||= ( changed = true; SecureRandom.hex(64) ) + prod['openid_connect_signing_key'] ||= begin + changed = true + prod.delete('jws_private_key') || OpenSSL::PKey::RSA.new(2048).to_pem + end + # db/fixtures/production/010_settings.rb + prod['ci_jwt_signing_key'] ||= ( changed = true; OpenSSL::PKey::RSA.new(2048).to_pem ) + + if changed + STDERR.puts "* Generating random secrets into #{secrets_file}" + File.write(secrets_file, YAML.dump(secrets), mode: 'w', perm: 0640) + end +EOF +chown root:$group "$secrets_file" + +if [ ! -f "$shell_secret_file" ]; then + echo "* Generating random secret in $shell_secret_file" >&2 + + head -c 512 /dev/urandom | LC_CTYPE=C tr -cd 'a-zA-Z0-9' | head -c 64 > "$shell_secret_file" + chown root:$group "$shell_secret_file" + chmod 0640 "$shell_secret_file" +fi + +if [ ! -f "$workhorse_secret_file" ]; then + echo "* Generating random secret in $workhorse_secret_file" >&2 + + # Sync with lib/gitlab/workhorse.rb. + gen_random_b64 32 > "$workhorse_secret_file" + chown root:$group "$workhorse_secret_file" + chmod 0640 "$workhorse_secret_file" +fi + +if [ ! -f "$kas_secret_file" ]; then + echo "* Generating random secret in $kas_secret_file" >&2 + + # Sync with lib/gitlab/workhorse.rb. + gen_random_b64 32 > "$kas_secret_file" + chown root:$group "$kas_secret_file" + chmod 0640 "$kas_secret_file" +fi + +# NOTE: We create this symlink in post-install script instead of APKBULD, +# so user can decide to have tmp dir inside $data_dir (e.g. it's on bigger disk). +if [ ! -e "$data_dir"/tmp ]; then + ln -s /var/tmp/gitlab "$data_dir"/tmp +fi + + +if [ "${0##*.}" = 'post-upgrade' ]; then + cat >&2 <<-EOF + * + * To finish GitLab upgrade run: + * + * gitlab-rake gitlab:db:configure + * + EOF +else + cat >&2 <<-EOF + * + * 1. Adjust settings in /etc/gitlab/database.yml and gitlab.yml. + * + * 2. Create database for GitLab: + * + * psql -c "CREATE ROLE gitlab PASSWORD 'top-secret' INHERIT LOGIN;" + * psql -c "CREATE DATABASE gitlab OWNER gitlab ENCODING 'UTF-8';" + * psql -d gitlab -c "CREATE EXTENSION pg_trgm; CREATE EXTENSION btree_gist;" + * + * 3. Run "gitlab-rake gitlab:setup", or "gitlab-rake gitlab:db:configure" if + * you are updating existing database. + * + EOF +fi diff --git a/archives/gitlab-foss/gitlab-foss.post-upgrade b/archives/gitlab-foss/gitlab-foss.post-upgrade new file mode 120000 index 0000000..20d2b0c --- /dev/null +++ b/archives/gitlab-foss/gitlab-foss.post-upgrade @@ -0,0 +1 @@ +gitlab-foss.post-install \ No newline at end of file diff --git a/archives/gitlab-foss/gitlab-foss.pre-install b/archives/gitlab-foss/gitlab-foss.pre-install new file mode 100644 index 0000000..66ad895 --- /dev/null +++ b/archives/gitlab-foss/gitlab-foss.pre-install @@ -0,0 +1,53 @@ +#!/bin/sh +# It's very important to set user/group correctly. + +git_dir='/var/lib/gitlab' + +if ! getent group git 1>/dev/null; then + echo '* Creating group git' 1>&2 + + addgroup -S git +fi + +if ! id git 2>/dev/null 1>&2; then + echo '* Creating user git' 1>&2 + + adduser -DHS -G git -h "$git_dir" -s /bin/sh \ + -g "added by apk for gitlab-foss" git + passwd -u git 1>/dev/null # unlock +fi + +if ! id -Gn git | grep -Fq redis; then + echo '* Adding user git to group redis' 1>&2 + + addgroup git redis +fi + +if [ "$(id -gn git)" != 'git' ]; then + cat >&2 <<-EOF + !! + !! User git has primary group $(id -gn git). We strongly recommend to change + !! git's primary group to git, otherwise GitLab may not work correctly. + !! + EOF + + # Add it at least as a supplementary group. + adduser git git +fi + +user_home="$(getent passwd git | cut -d: -f6)" + +if [ "$user_home" != "$git_dir" ]; then + cat >&2 <<-EOF + !! + !! User git has home directory in $user_home, but this package assumes + !! $git_dir. Although it's possible to use a different directory, + !! it's really not easy. + !! + !! Please change git's home directory to $git_dir, or adjust settings + !! and move files yourself. Otherwise GitLab will not work! + !! + EOF +fi + +exit 0 diff --git a/archives/gitlab-foss/gitlab-rails.confd b/archives/gitlab-foss/gitlab-rails.confd new file mode 100644 index 0000000..d85aa9c --- /dev/null +++ b/archives/gitlab-foss/gitlab-rails.confd @@ -0,0 +1,20 @@ +# Configuration for /etc/init.d/gitlab.rails + +# Path to the Puma configuration file. +#puma_config="/etc/gitlab/puma.rb" + +# IP address and port for Puma server to listen on. +#puma_listen_tcp="127.0.0.1:8080" + +# Absolute path of unix socket for Puma server to listen on. +#puma_listen_unix="/run/gitlab/gitlab.socket" + +# Path to the file to redirect stdout from Puma server to. +#puma_stdout_file="/var/log/gitlab/puma_stdout.log" + +# Path to the file to redirect stderr from Puma server to. +#puma_stderr_file="/var/log/gitlab/puma_stderr.log" + +# Action Cable uses a separate thread pool per Puma worker. This configures +# number of threads in the pool. +#action_cable_worker_pool_size=4 diff --git a/archives/gitlab-foss/gitlab.confd b/archives/gitlab-foss/gitlab.confd new file mode 100644 index 0000000..ade6bcc --- /dev/null +++ b/archives/gitlab-foss/gitlab.confd @@ -0,0 +1,85 @@ +# Configuration file for /etc/init.d/gitlab and +# /etc/init.d/gitlab.{mailroom,rails,sidekiq,workhorse} + + +# Path to the base directory for the Prometheus metrics used by Puma and +# Sidekiq. +#metrics_dir=/dev/shm/gitlab + + +# How many Puma worker processes to create (0 to disable cluster mode). +#puma_workers=3 + +# IP address and port for Puma server to listen on. +#puma_listen_tcp="127.0.0.1:8080" + +# Absolute path of unix socket for Puma server to listen on. +#puma_listen_unix="/run/gitlab/gitlab.socket" + +# Action Cable uses a separate thread pool per Puma worker. This configures +# number of threads in the pool. +#action_cable_worker_pool_size=4 + + +# IP address and port, or absolute path of the unix socket, where should +# Workhorse listen on for connections from a web server. +#workhorse_listen="/run/gitlab/workhorse.socket" + +# How long to wait for response headers when proxying the request. +#workhorse_proxy_header_timeout="1m0s" + +# Number of API requests allowed at single time. +#workhorse_api_limit= + +# Maximum queueing duration of requests (default 30s). +#workhorse_api_queue_duration= + +# Number of API requests allowed to be queued. +#workhorse_api_queue_limit= + +# Long polling duration for job requesting for runners (default 0s - disabled) +#workhorse_ci_long_polling_duration= + +# Log format to use: text, json, structured, none. Defaults to "text". +#workhorse_log_format= + +# Prometheus listening address. +#workhorse_prometheus_listen= + +# Sentry DSN for Workhorse. +#workhorse_sentry_dsn= + + +# Specify how many processes to create using sidekiq-cluster and which queue +# they should handle. Each whitespace-separated item equates to one additional +# Sidekiq process, and comma-separated values in each item determine the queues +# it works on. The special queue name "*" means all queues. +# Example: "* gitlab_shell process_commit,post_receive" +# See https://docs.gitlab.com/ee/administration/sidekiq/extra_sidekiq_processes.html. +#sidekiq_queue_groups="*" + +# Maximum threads to use with Sidekiq (default: 50, 0 to disable). +#sidekiq_max_concurrency= + +# Minimum threads to use with Sidekiq (default: 0). +#sidekiq_min_concurrency= + +# The number of seconds to wait between worker checks. +#sidekiq_interval= + +# Graceful timeout for all running processes. +#sidekiq_shutdown_timeout= + +# Run workers for all queues in sidekiq_queues.yml except the given ones. +#sidekiq_negate=no + +# Run workers based on the provided selector. +#sidekiq_queue_selector=no + +# Memory limit (in MiB) for the Sidekiq process. If the RSS (Resident Set Size) +# of the Sidekiq process exceeds this limit, a delayed shutdown is triggered. +#sidekiq_memkiller_max_rss=2000 + + +# Enable mail_room to handle incoming mails? +#mailroom_enabled="no" diff --git a/archives/gitlab-foss/gitlab.initd b/archives/gitlab-foss/gitlab.initd new file mode 100644 index 0000000..cdf212e --- /dev/null +++ b/archives/gitlab-foss/gitlab.initd @@ -0,0 +1,50 @@ +#!/sbin/openrc-run + +name="GitLab" +description="Meta script for starting/stopping all the GitLab components" + +: ${mailroom_enabled:="no"} +: ${pages_enabled:="yes"} + +subservices="gitlab.rails gitlab.gitaly gitlab.sidekiq gitlab.workhorse" +if yesno "$mailroom_enabled"; then + subservices="$subservices gitlab.mailroom" +fi +if yesno "$pages_enabled" && [ -e /etc/init.d/gitlab.pages ]; then + subservices="$subservices gitlab.pages" +fi + +depend() { + need redis postgresql + use net +} + +start() { + local ret=0 + + ebegin "Starting all GitLab components" + local svc; for svc in $subservices; do + service $svc start || ret=1 + done + eend $ret +} + +stop() { + local ret=0 + + ebegin "Stopping all GitLab components" + local svc; for svc in $subservices; do + service $svc stop || ret=1 + done + eend $ret +} + +status() { + local ret=0 + + local svc; for svc in $subservices; do + echo "$svc:" + service $svc status || ret=1 + done + eend $ret +} diff --git a/archives/gitlab-foss/gitlab.logrotate b/archives/gitlab-foss/gitlab.logrotate new file mode 100644 index 0000000..721ff49 --- /dev/null +++ b/archives/gitlab-foss/gitlab.logrotate @@ -0,0 +1,24 @@ +/var/log/gitlab/workhorse.log { + compress + maxsize 10M + minsize 1M + missingok + postrotate + /etc/init.d/gitlab.workhorse --quiet --ifstarted reopen + endscript + sharedscripts + rotate 5 + weekly +} + +/var/log/gitlab/*.log { + compress + copytruncate + delaycompress + maxsize 10M + minsize 1M + missingok + sharedscripts + rotate 10 + weekly +} diff --git a/archives/gitlab-foss/gitlab.mailroom.initd b/archives/gitlab-foss/gitlab.mailroom.initd new file mode 100644 index 0000000..e6d6a64 --- /dev/null +++ b/archives/gitlab-foss/gitlab.mailroom.initd @@ -0,0 +1,40 @@ +#!/sbin/openrc-run + +supervisor=supervise-daemon + +name="GitLab (mailroom)" +description="GitLab service for processing incoming mails." + +: ${gitlab_base:="/usr/lib/bundles/gitlab"} +: ${gitlab_config:="/etc/gitlab/gitlab.yml"} +: ${mailroom_logfile:="/var/log/gitlab/mail_room.log"} +: ${mailroom_config:="$gitlab_base/config/mail_room.yml"} + +command="$gitlab_base/bin/mail_room" +command_args="-c $mailroom_config" +command_background="yes" +command_user="git" + +directory="$gitlab_base" +error_log="$mailroom_logfile" +output_log="$mailroom_logfile" + +supervise_daemon_args=" + --env RAILS_ENV=production + --env TZ=:/etc/localtime + --env MAIL_ROOM_GITLAB_CONFIG_FILE=$gitlab_config + " +start_stop_daemon_args="--interpreted $supervise_daemon_args" +pidfile="/run/gitlab/mail_room.pid" + +required_files="$mailroom_config $gitlab_config" + +depend() { + need redis + use net +} + +start_pre() { + checkpath -d -m 755 -o $command_user -q "${pidfile%/*}" || return 1 + checkpath -f -m 640 -o $command_user "$mailroom_logfile" +} diff --git a/archives/gitlab-foss/gitlab.rails.initd b/archives/gitlab-foss/gitlab.rails.initd new file mode 100644 index 0000000..4c824d9 --- /dev/null +++ b/archives/gitlab-foss/gitlab.rails.initd @@ -0,0 +1,119 @@ +#!/sbin/openrc-run + +name="GitLab Rails" +description="GitLab application" + +extra_started_commands="reload reopen" +description_reload="Reload configuration" +description_reopen="Reopen log files" + +: ${gitlab_base:="/usr/lib/bundles/gitlab"} +: ${metrics_dir:="/dev/shm/gitlab"} + +: ${action_cable_worker_pool_size:=4} +: ${gitlab_config:="/etc/gitlab/gitlab.yml"} +: ${puma_workers:=3} +: ${puma_listen_unix:="/run/gitlab/gitlab.socket"} +: ${puma_listen_tcp:="127.0.0.1:8080"} +: ${puma_stdout_file:="/var/log/gitlab/puma_stdout.log"} +: ${puma_stderr_file:="/var/log/gitlab/puma_stderr.log"} +: ${puma_config:="/etc/gitlab/puma.rb"} +: ${puma_metrics_dir:="$metrics_dir/puma"} + +command="$gitlab_base/bin/puma" +command_args=" + --config $puma_config + --workers $puma_workers + --bind tcp://$puma_listen_tcp + --bind unix://$puma_listen_unix + --redirect-stdout $puma_stdout_file + --redirect-stderr $puma_stderr_file + --redirect-append + --state /run/gitlab/puma.state + " +command_background="yes" +command_user="git" +directory="$gitlab_base" + +supervise_daemon_args=" + --env ACTION_CABLE_WORKER_POOL_SIZE=$action_cable_worker_pool_size + --env RAILS_ENV=production + --env NODE_ENV=production + --env EXECJS_RUNTIME=Disabled + --env GITLAB_BASE=$gitlab_base + --env TZ=:/etc/localtime + --env prometheus_multiproc_dir=$puma_metrics_dir + ${supervise_daemon_args:-} + " +start_stop_daemon_args=" + --interpreted + $supervise_daemon_args + $start_stop_daemon_args + " +pidfile="/run/gitlab/puma.pid" + +required_files="$gitlab_config $puma_config" + +depend() { + need redis + want sshd postgresql docker-registry + use net +} + +start_pre() { + checkpath -d -m 755 -o $command_user -q "${pidfile%/*}" || return 1 + checkpath -d -m 700 -o $command_user -q "$(readlink -f "$gitlab_base"/tmp)" || return 1 + checkpath -d -m 700 -o $command_user -q "$metrics_dir" || return 1 + checkpath -d -m 700 -o $command_user --directory-truncate "$puma_metrics_dir" || return 1 + checkpath -f -m 644 -o $command_user "$puma_stdout_file" || return 1 + checkpath -f -m 644 -o $command_user "$puma_stderr_file" || return 1 + + # Ruby requires sticky bit on TMP directory. + checkpath -d -m 1777 /tmp + + local downloads_path="$(_parse_yaml "$gitlab_config" \ + production.gitlab.repository_downloads_path)" + + if [ -n "$downloads_path" ]; then + checkpath -d -m 700 -o $command_user -q "$downloads_path" + fi + + checkpath --directory --owner $command_user --mode 0775 \ + /var/tmp/gitlab/downloads \ + /var/tmp/gitlab/backups + +} + +reload() { + ebegin "Reloading $name" + + if [ "$supervisor" ]; then + $supervisor "$RC_SVCNAME" --signal USR2 + else + start-stop-daemon --pidfile "$pidfile" --signal USR2 + fi + eend $? +} + +reopen() { + ebegin "Telling $name to reopen log files" + + if [ "$supervisor" ]; then + $supervisor "$RC_SVCNAME" --signal USR1 + else + start-stop-daemon --pidfile "$pidfile" --signal USR1 + fi + eend $? +} + +_parse_yaml() { + local file="$1" + local key="$2" + local default="${3:-}" + local key_path="$(echo "[\"$key\"]" | sed 's/\./"]["/g')" + + ruby <<-EOF + require "yaml" + puts YAML.load_file("$file")$key_path rescue puts "$default" + EOF +} diff --git a/archives/gitlab-foss/gitlab.sidekiq.initd b/archives/gitlab-foss/gitlab.sidekiq.initd new file mode 100644 index 0000000..eb30f4c --- /dev/null +++ b/archives/gitlab-foss/gitlab.sidekiq.initd @@ -0,0 +1,76 @@ +#!/sbin/openrc-run + +extra_started_commands="finish" + +name="GitLab Sidekiq" +description="GitLab backgroud workers" +description_finish="Stop fetching new jobs and finish current ones" + +: ${gitlab_base:="/usr/lib/bundles/gitlab"} +: ${metrics_dir:="/dev/shm/gitlab"} + +: ${sidekiq_logfile:="/var/log/gitlab/sidekiq.log"} +: ${sidekiq_memkiller_max_rss:="2000"} # default per Omnibus +: ${sidekiq_metrics_dir:="$metrics_dir/sidekiq"} +: ${sidekiq_negate:="no"} +: ${sidekiq_queue_groups:="*"} +: ${sidekiq_queue_selector:="no"} + +command="$gitlab_base/bin/sidekiq-cluster" +# Note: The rest of the options is set in start_pre(). +command_args="-r $gitlab_base -e production ${command_args:-}" +command_background="yes" +command_user="git" + +directory="$gitlab_base" +error_log="$sidekiq_logfile" +output_log="$sidekiq_logfile" + +supervise_daemon_args=" + --env RAILS_ENV=production + --env NODE_ENV=production + --env EXECJS_RUNTIME=Disabled + --env TZ=:/etc/localtime + --env SIDEKIQ_MEMORY_KILLER_MAX_RSS=$(( sidekiq_memkiller_max_rss * 1024 )) + --env prometheus_multiproc_dir=$sidekiq_metrics_dir + " +start_stop_daemon_args="--interpreted $supervise_daemon_args" +pidfile="/run/gitlab/sidekiq.pid" + +depend() { + need redis + use net postgresql +} + +start_pre() { + yesno "$sidekiq_queue_selector" && command_args="$command_args --queue-selector" + + command_args="$command_args + $(optif --max-concurrency ${sidekiq_max_concurrency:-}) + $(optif --min-concurrency ${sidekiq_min_concurrency:-}) + $(optif --interval ${sidekiq_interval:-}) + $(optif --timeout ${sidekiq_shutdown_timeout:-}) + $(set -f; printf "'%s' " $sidekiq_queue_groups) + " + yesno "$sidekiq_negate" && command_args="$command_args --negate" + + checkpath -d -m 755 -o $command_user -q "${pidfile%/*}" || return 1 + checkpath -d -m 700 -o $command_user -q "$metrics_dir" || return 1 + checkpath -d -m 700 -o $command_user --directory-truncate "$sidekiq_metrics_dir" || return 1 + checkpath -f -m 644 -o $command_user "$sidekiq_logfile" +} + +finish() { + ebegin "Telling $name to stop fetching new jobs" + + if [ "$supervisor" ]; then + $supervisor "$RC_SVCNAME" --signal TSTP + else + start-stop-daemon --pidfile "$pidfile" --signal TSTP + fi + eend $? +} + +optif() { + test -n "$2" && printf '%s/n' "$1=$2" || true +} diff --git a/archives/gitlab-foss/gitlab.workhorse.initd b/archives/gitlab-foss/gitlab.workhorse.initd new file mode 100644 index 0000000..4b04d7c --- /dev/null +++ b/archives/gitlab-foss/gitlab.workhorse.initd @@ -0,0 +1,75 @@ +#!/sbin/openrc-run + +extra_started_commands="reopen" + +name="GitLab Workhorse" +description="A reverse proxy for GitLab." +description_reopen="Reopen log files" + +: ${gitlab_base:="/usr/lib/bundles/gitlab"} +: ${workhorse_logfile:="/var/log/gitlab/workhorse.log"} +: ${workhorse_access_log:="no"} + +command="/usr/bin/gitlab-workhorse" +# Note: The rest of the options is set in start_pre(). +command_args=" + -authBackend=http://${puma_listen_tcp:="127.0.0.1:8080"} + -config=${workhorse_config:="/etc/gitlab/workhorse.toml"} + -documentRoot=${gitlab_public_dir:="$gitlab_base/public"} + -listenAddr=${workhorse_listen:="/run/gitlab/workhorse.socket"} + -listenUmask=${workhorse_listen_umask:="000"} + -logFile=$workhorse_logfile + -secretPath=${workhorse_secret_path:="/etc/gitlab/gitlab_workhorse_secret"} + " +command_background="yes" +command_user="git" +directory="$gitlab_base" +pidfile="/run/gitlab/workhorse.pid" + +depend() { + use net +} + +start_pre() { + local listen_net="tcp" + [ "${workhorse_listen:0:1}" = '/' ] && listen_net="unix" + + command_args="$command_args + -listenNetwork=$listen_net + $(optif -apiCiLongPollingDuration "$workhorse_ci_long_polling_duration") + $(optif -apiLimit "$workhorse_api_limit") + $(optif -apiQueueDuration "$workhorse_api_queue_duration") + $(optif -apiQueueLimit "$workhorse_api_queue_limit") + $(optif -authSocket "$puma_listen_unix") + $(optif -logFormat "$workhorse_log_format") + $(optif -prometheusListenAddr "$workhorse_prometheus_listen_addr") + $(optif -proxyHeadersTimeout "$workhorse_proxy_header_timeout")" + # FIXME: not implemented + #yesno "$workhorse_access_log" || command_args="$command_args -disableAccessLog" + + start_stop_daemon_args="$start_stop_daemon_args + $(optif '--env GITLAB_WORKHORSE_SENTRY_DSN' "$workhorse_sentry_dns")" + supervise_daemon_args="$supervise_daemon_args + $(optif '--env GITLAB_WORKHORSE_SENTRY_DSN' "$workhorse_sentry_dns")" + + checkpath -d -m 755 -o $command_user -q "${pidfile%/*}" || return 1 + if [ "$listen_net" = "unix" ]; then + checkpath -d -m 755 -o $command_user -q "${workhorse_listen%/*}" || return 1 + fi + checkpath -f -m 640 -o $command_user "$workhorse_logfile" +} + +reopen() { + ebegin "Telling $name to reopen log files" + + if [ "$supervisor" ]; then + $supervisor "$RC_SVCNAME" --signal HUP + else + start-stop-daemon --pidfile "$pidfile" --signal HUP + fi + eend $? +} + +optif() { + test -n "$2" && printf '%s/n' "$1=$2" || true +} diff --git a/archives/gitlab-foss/upgrade-sys-filesystem-depend.patch b/archives/gitlab-foss/upgrade-sys-filesystem-depend.patch new file mode 100644 index 0000000..d608191 --- /dev/null +++ b/archives/gitlab-foss/upgrade-sys-filesystem-depend.patch @@ -0,0 +1,35 @@ +diff --git a/Gemfile.orig b/Gemfile +index c1e9e34..a4448b7 100644 +--- a/Gemfile.orig ++++ b/Gemfile +@@ -525,7 +525,7 @@ gem 'health_check', '~> 3.0' # rubocop:todo Gemfile/MissingFeatureCategory + + # System information + gem 'vmstat', '~> 2.3.0' # rubocop:todo Gemfile/MissingFeatureCategory +-gem 'sys-filesystem', '~> 1.4.3' # rubocop:todo Gemfile/MissingFeatureCategory ++gem 'sys-filesystem', '~> 1.4.5' # rubocop:todo Gemfile/MissingFeatureCategory + + # NTP client + gem 'net-ntp' # rubocop:todo Gemfile/MissingFeatureCategory +diff --git a/Gemfile.lock.orig b/Gemfile.lock +index bb66169..a4da10b 100644 +--- a/Gemfile.lock.orig ++++ b/Gemfile.lock +@@ -1657,7 +1657,7 @@ GEM + attr_required (>= 0.0.5) + httpclient (>= 2.4) + sync (0.5.0) +- sys-filesystem (1.4.3) ++ sys-filesystem (1.4.5) + ffi (~> 1.1) + sysexits (1.2.0) + table_print (1.5.7) +@@ -2123,7 +2123,7 @@ DEPENDENCIES + stackprof (~> 0.2.25) + state_machines-activerecord (~> 0.8.0) + static_holmes (~> 0.7.7) +- sys-filesystem (~> 1.4.3) ++ sys-filesystem (~> 1.4.5) + tanuki_emoji (~> 0.9) + telesignenterprise (~> 2.2) + terser (= 1.0.2) From 9f600f271f48839710652797fb9bf1a42436cf5c Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Fri, 9 Aug 2024 22:27:54 -0400 Subject: [PATCH 04/38] archives/gitlab-pages: new aport --- archives/gitlab-pages/APKBUILD | 35 ++++++++++++++ archives/gitlab-pages/gitlab-pages.initd | 55 ++++++++++++++++++++++ archives/gitlab-pages/ungit-makefile.patch | 18 +++++++ 3 files changed, 108 insertions(+) create mode 100644 archives/gitlab-pages/APKBUILD create mode 100644 archives/gitlab-pages/gitlab-pages.initd create mode 100644 archives/gitlab-pages/ungit-makefile.patch diff --git a/archives/gitlab-pages/APKBUILD b/archives/gitlab-pages/APKBUILD new file mode 100644 index 0000000..6ab7745 --- /dev/null +++ b/archives/gitlab-pages/APKBUILD @@ -0,0 +1,35 @@ +# Maintainer: Antoine Martin (ayakael) +# Contributor: Antoine Martin (ayakael) +# Contributor: Jakub Jirutka +pkgname=gitlab-pages +pkgver=17.0.4 +_gittag="v$pkgver" +pkgrel=0 +pkgdesc="A daemon used to serve static websites for GitLab users" +url="https://gitlab.com/gitlab-org/gitlab-pages/" +arch="all" +license="MIT" +makedepends="go>=1.5" +source=" + https://gitlab.com/gitlab-org/gitlab-pages/-/archive/$_gittag/gitlab-pages-$_gittag.tar.gz + ungit-makefile.patch + $pkgname.initd + " +subpackages="$pkgname-openrc" +builddir="$srcdir"/$pkgname-$_gittag + +build() { + make VERSION=$pkgver REVISION=$pkgrel GOPATH="$srcdir" CGO_ENABLED=0 +} + +package() { + install -D -m 755 $pkgname "$pkgdir"/usr/bin/$pkgname + install -m755 -D "$srcdir"/$pkgname.initd \ + "$pkgdir"/etc/init.d/gitlab.pages +} + +sha512sums=" +fde33d01f7b3810a9a094c09fce19976c41a2ccc9eaf720a0f4dd285eb2d0f35de8d2d607cdbaa670221711919043d681fd3fda6e14d67ae1454619746c1e453 gitlab-pages-v17.0.4.tar.gz +710a9b652327e57e620c2bdb02bf912a6f61044eaaf61d36c6612284e9b951d2ac6f5eef77dfea16a0cde328bd4c556d9e47791c560139c27cb9659076f809b1 ungit-makefile.patch +20bc66c1c3548568ed353ca8d584f9108b9688f9375f212a18efc7b8386fdaafb3b2dc9e865f21c7f8fd31ada6e91842a8bb8d397f64851d853bb0de3e0e60bb gitlab-pages.initd +" diff --git a/archives/gitlab-pages/gitlab-pages.initd b/archives/gitlab-pages/gitlab-pages.initd new file mode 100644 index 0000000..4a34507 --- /dev/null +++ b/archives/gitlab-pages/gitlab-pages.initd @@ -0,0 +1,55 @@ +#!/sbin/openrc-run + +name="GitLab Pages" +description="A daemon used to serve static websites for GitLab users" + +: ${pages_user:=${user:-"git"}} +: ${pages_root:="/var/lib/gitlab/pages"} +: ${pages_logfile:="/var/log/gitlab/pages.log"} + +command="/usr/bin/gitlab-pages" +# Note: The rest of the options is set in start_pre(). +command_args=" + -pages-domain=$pages_domain + -pages-root=$pages_root + -redirect-http=${pages_redirect_http:-true} + -use-http2=${pages_use_http2:-true} + " +command_background="yes" + +start_stop_daemon_args=" + --chdir $pages_root + --user $pages_user + --stdout $pages_logfile + --stderr $pages_logfile" +pidfile="/run/gitlab-pages.pid" + + +depend() { + use net +} + +start_pre() { + local item + + for item in $pages_listen_http; do + command_args="$command_args -listen-http=$item" + done + for item in $pages_listen_https; do + command_args="$command_args -listen-https=$item" + done + for item in $pages_listen_proxy; do + command_args="$command_args -listen-proxy=$item" + done + + command_args="$command_args + $(optif -metrics-address "$pages_metrics_address") + $(optif -root-cert "$pages_root_cert") + $(optif -root-key "$pages_root_key")" + + checkpath -m 640 -o $pages_user -f "$pages_logfile" +} + +optif() { + test -n "$2" && printf '%s/n' "$1=$2" || true +} diff --git a/archives/gitlab-pages/ungit-makefile.patch b/archives/gitlab-pages/ungit-makefile.patch new file mode 100644 index 0000000..4cbc132 --- /dev/null +++ b/archives/gitlab-pages/ungit-makefile.patch @@ -0,0 +1,18 @@ +diff --git a/Makefile.internal.mk.orig b/Makefile.internal.mk +index 6dfaa1b..207bdaf 100644 +--- a/Makefile.internal.mk.orig ++++ b/Makefile.internal.mk +@@ -1,13 +1,3 @@ +-REVISION := $(shell git rev-parse --short HEAD || echo unknown) +-LAST_TAG := $(shell git describe --tags --abbrev=0) +-COMMITS := $(shell echo `git log --oneline $(LAST_TAG)..HEAD | wc -l`) +-VERSION := $(shell cat VERSION) +-BRANCH := $(shell git rev-parse --abbrev-ref HEAD) +- +-ifneq (v$(VERSION),$(LAST_TAG)) +- VERSION := $(shell echo $(VERSION)~beta.$(COMMITS).g$(REVISION)) +-endif +- + VERSION_FLAGS :=-X "main.VERSION=$(VERSION)" -X "main.REVISION=$(REVISION)" + + export GOBIN := $(CURDIR)/bin From fa80820d8e2da8a84f933fff8213fab43cfc1b37 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Fri, 9 Aug 2024 22:27:57 -0400 Subject: [PATCH 05/38] archives/gitlab-shell: new aport --- archives/gitlab-shell/APKBUILD | 66 +++++++++++ .../gitlab-shell/change-config-path.patch | 11 ++ archives/gitlab-shell/config.patch | 112 ++++++++++++++++++ archives/gitlab-shell/gitconfig | 17 +++ .../gitlab-shell/gitlab-shell.post-install | 23 ++++ .../gitlab-shell/gitlab-shell.pre-install | 41 +++++++ 6 files changed, 270 insertions(+) create mode 100644 archives/gitlab-shell/APKBUILD create mode 100644 archives/gitlab-shell/change-config-path.patch create mode 100644 archives/gitlab-shell/config.patch create mode 100644 archives/gitlab-shell/gitconfig create mode 100644 archives/gitlab-shell/gitlab-shell.post-install create mode 100644 archives/gitlab-shell/gitlab-shell.pre-install diff --git a/archives/gitlab-shell/APKBUILD b/archives/gitlab-shell/APKBUILD new file mode 100644 index 0000000..b1d202a --- /dev/null +++ b/archives/gitlab-shell/APKBUILD @@ -0,0 +1,66 @@ +# Maintainer: Antoine Martin (ayakael) +# Contributor: Antoine Martin (ayakael) +# Contributor: Jakub Jirutka +pkgname=gitlab-shell +pkgver=14.36.0 +pkgrel=0 +pkgdesc="GitLab Shell handles git SSH sessions for GitLab" +url="https://gitlab.com/gitlab-org/gitlab-shell" +arch="all" +license="MIT" +depends="git openssh" +makedepends="go krb5-dev" +pkgusers="git" +pkggroups="git" +install="$pkgname.pre-install $pkgname.post-install" +# NOTE: user vs system gitconfig, see https://gitlab.com/gitlab-org/omnibus-gitlab/-/merge_requests/6166 +source="https://gitlab.com/gitlab-org/gitlab-shell/-/archive/v$pkgver/gitlab-shell-v$pkgver.tar.gz + config.patch + change-config-path.patch + gitconfig + " +builddir="$srcdir/$pkgname-v$pkgver" +options="!check" + +build() { + # BUILD_TAGS - build without tracing libs, + # see https://gitlab.com/gitlab-org/labkit/-/merge_requests/2 + make build \ + VERSION_STRING="$pkgver" \ + BUILD_TAGS="" +} + +package() { + local datadir="$pkgdir/var/lib/gitlab" + local libdir="$pkgdir/usr/lib/gitlab-shell" + + # XXX: I couldn't figure out how/where is gitlab-shell called, + # so I kept /usr/lib/gitlab-shell. It should be changed to /usr. + make install DESTDIR="$pkgdir" PREFIX=/usr/lib/gitlab-shell + + install -m644 VERSION "$libdir"/ + install -m644 -D config.yml.example "$pkgdir"/etc/gitlab/gitlab-shell.yml + + cd "$pkgdir" + + rm "$libdir"/bin/gitlab-sshd + + install -d -m755 -o git -g git \ + "$pkgdir"/var/log/gitlab \ + "$datadir" + + install -d -m02770 -o git -g git \ + "$datadir"/repositories + + install -m644 -o git -g git "$srcdir"/gitconfig "$datadir"/.gitconfig + + ln -s /etc/gitlab/gitlab-shell.yml "$libdir"/config.yml + ln -s /etc/gitlab/gitlab_shell_secret "$libdir"/.gitlab_shell_secret +} + +sha512sums=" +6b302be3630e60e3c9f76e58c61674bf08c3fe1395c9af5f354b9a557ecd1ddb43d27c9a995f868c4e4e2e734dd424a37c73e78d26b00f1f6a78f8670b45c371 gitlab-shell-v14.36.0.tar.gz +e9dd69c57c65197493f75bdde682075c6ab22892ed07d37c7a73129fb42a8349a676d5986bfd17f1df331645334248383845f21ce08d1e9664c38e4bbf5343ba config.patch +499b3a46ea94a33a23b01f6a7509d74f5a6781b930619b3b8ae42bdeae8a052cc636578744d7992b4ae4f9b9f72b11ee3d3c0f5e50986fa3f7e35b979b08aada change-config-path.patch +c53da7f145593693392d9fa880ad5a1909bfc7504fd1c93d94a468c3e0f5cc80f712f41ee1dc8bf38105b410c1165658f208bd88a70c4674104c78af33d8d09c gitconfig +" diff --git a/archives/gitlab-shell/change-config-path.patch b/archives/gitlab-shell/change-config-path.patch new file mode 100644 index 0000000..52d44ce --- /dev/null +++ b/archives/gitlab-shell/change-config-path.patch @@ -0,0 +1,11 @@ +--- a/support/gitlab_config.rb ++++ b/support/gitlab_config.rb +@@ -4,7 +4,7 @@ class GitlabConfig + attr_reader :config + + def initialize +- @config = YAML.load_file(File.join(ROOT_PATH, 'config.yml')) ++ @config = YAML.load_file(ENV.fetch('GITLAB_SHELL_CONFIG', '/etc/gitlab/gitlab-shell.yml')) + end + + def home diff --git a/archives/gitlab-shell/config.patch b/archives/gitlab-shell/config.patch new file mode 100644 index 0000000..6dabe44 --- /dev/null +++ b/archives/gitlab-shell/config.patch @@ -0,0 +1,112 @@ +diff --git a/config.yml.example.orig b/config.yml.example +index fb147c4..98eb0e3 100644 +--- a/config.yml.example.orig ++++ b/config.yml.example +@@ -13,7 +13,7 @@ user: git + # only listen on a Unix domain socket. For Unix domain sockets use + # "http+unix://", e.g. + # "http+unix://%2Fpath%2Fto%2Fsocket" +-gitlab_url: "http+unix://%2Fhome%2Fgit%2Fgitlab%2Ftmp%2Fsockets%2Fgitlab-workhorse.socket" ++gitlab_url: "http+unix://%2Frun%2Fgitlab%2Fworkhorse.socket" + + # When a http+unix:// is used in gitlab_url, this is the relative URL root to GitLab. + # Not used if gitlab_url is http:// or https://. +@@ -29,15 +29,15 @@ http_settings: + # + + # File used as authorized_keys for gitlab user +-auth_file: "/home/git/.ssh/authorized_keys" ++auth_file: "/var/lib/gitlab/.ssh/authorized_keys" + + # SSL certificate dir where custom certificates can be placed + # https://golang.org/pkg/crypto/x509/ +-# ssl_cert_dir: /opt/gitlab/embedded/ssl/certs/ ++# ssl_cert_dir: /etc/gitlab/ssl/certs/ + + # File that contains the secret key for verifying access to GitLab. + # Default is .gitlab_shell_secret in the gitlab-shell directory. +-# secret_file: "/home/git/gitlab-shell/.gitlab_shell_secret" ++secret_file: "/etc/gitlab/gitlab_shell_secret" + # + # The secret field supersedes the secret_file, and if set that + # file will not be read. +@@ -45,13 +45,13 @@ auth_file: "/home/git/.ssh/authorized_keys" + + # Log file. + # Default is gitlab-shell.log in the root directory. +-# log_file: "/home/git/gitlab-shell/gitlab-shell.log" ++log_file: "/var/log/gitlab/gitlab-shell.log" + + # Log level. INFO by default +-log_level: INFO ++log_level: WARN + + # Log format. 'json' by default, can be changed to 'text' if needed +-# log_format: json ++log_format: text + + # Audit usernames. + # Set to true to see real usernames in the logs instead of key ids, which is easier to follow, but +@@ -62,62 +62,6 @@ audit_usernames: false + # For more details, visit https://docs.gitlab.com/ee/development/distributed_tracing.html + # gitlab_tracing: opentracing://driver + +-# This section configures the built-in SSH server. Ignored when running on OpenSSH. +-sshd: +- # Address which the SSH server listens on. Defaults to [::]:22. +- listen: "[::]:22" +- # Set to true if gitlab-sshd is being fronted by a load balancer that implements +- # the PROXY protocol. +- proxy_protocol: false +- # Proxy protocol policy ("use", "require", "reject", "ignore"), "use" is the default value +- # Values: https://github.com/pires/go-proxyproto/blob/195fedcfbfc1be163f3a0d507fac1709e9d81fed/policy.go#L20 +- proxy_policy: "use" +- # Proxy allowed IP addresses. Takes precedent over proxy_policy. Disabled by default. +- # proxy_allowed: +- # - "192.168.0.1" +- # - "192.168.1.0/24" +- # Address which the server listens on HTTP for monitoring/health checks. Defaults to localhost:9122. +- web_listen: "localhost:9122" +- # Maximum number of concurrent sessions allowed on a single SSH connection. Defaults to 10. +- concurrent_sessions_limit: 10 +- # Sets an interval after which server will send keepalive message to a client. Defaults to 15s. +- client_alive_interval: 15 +- # The server waits for this time for the ongoing connections to complete before shutting down. Defaults to 10s. +- grace_period: 10 +- # The server disconnects after this time if the user has not successfully logged in. Defaults to 60s. +- login_grace_time: 60 +- # A short timeout to decide to abort the connection if the protocol header is not seen within it. Defaults to 500ms +- proxy_header_timeout: 500ms +- # The endpoint that returns 200 OK if the server is ready to receive incoming connections; otherwise, it returns 503 Service Unavailable. Defaults to "/start". +- readiness_probe: "/start" +- # The endpoint that returns 200 OK if the server is alive. Defaults to "/health". +- liveness_probe: "/health" +- # Specifies the available message authentication code algorithms that are used for protecting data integrity +- macs: [hmac-sha2-256-etm@openssh.com, hmac-sha2-512-etm@openssh.com, hmac-sha2-256, hmac-sha2-512, hmac-sha1] +- # Specifies the available Key Exchange algorithms +- kex_algorithms: [curve25519-sha256, curve25519-sha256@libssh.org, ecdh-sha2-nistp256, ecdh-sha2-nistp384, ecdh-sha2-nistp521, diffie-hellman-group14-sha256, diffie-hellman-group14-sha1] +- # Specified the ciphers allowed +- ciphers: [aes128-gcm@openssh.com, chacha20-poly1305@openssh.com, aes256-gcm@openssh.com, aes128-ctr, aes192-ctr,aes256-ctr] +- # Specified the available Public Key algorithms +- public_key_algorithms: [ssh-rsa, ssh-dss, ecdsa-sha2-nistp256, sk-ecdsa-sha2-nistp256@openssh.com, ecdsa-sha2-nistp384, ecdsa-sha2-nistp521, ssh-ed25519, sk-ssh-ed25519@openssh.com, rsa-sha2-256, rsa-sha2-512] +- # SSH host key files. +- host_key_files: +- - /run/secrets/ssh-hostkeys/ssh_host_rsa_key +- - /run/secrets/ssh-hostkeys/ssh_host_ecdsa_key +- - /run/secrets/ssh-hostkeys/ssh_host_ed25519_key +- host_key_certs: +- - /run/secrets/ssh-hostkeys/ssh_host_rsa_key-cert.pub +- - /run/secrets/ssh-hostkeys/ssh_host_ecdsa_key-cert.pub +- - /run/secrets/ssh-hostkeys/ssh_host_ed25519_key-cert.pub +- # GSSAPI-related settings +- gssapi: +- # Enable the gssapi-with-mic authentication method. Defaults to false. +- enabled: false +- # Keytab path. Defaults to "", system default (usually /etc/krb5.keytab). +- keytab: "" +- # The Kerberos service name to be used by sshd. Defaults to "", accepts any service name in keytab file. +- service_principal_name: "" +- + lfs: + # https://gitlab.com/groups/gitlab-org/-/epics/11872, disabled by default. + pure_ssh_protocol: false diff --git a/archives/gitlab-shell/gitconfig b/archives/gitlab-shell/gitconfig new file mode 100644 index 0000000..ccf8053 --- /dev/null +++ b/archives/gitlab-shell/gitconfig @@ -0,0 +1,17 @@ +# Based on files/gitlab-cookbooks/gitlab/templates/default/gitconfig.erb +# in omnibus-gitlab. + +[user] +name = GitLab +email = gitlab@local.host + +[core] +# Needed for the web editor. +autocrlf = input +alternateRefsCommand="exit 0 #" +# This option is unnecessary on journaled file systems and it's not recognized +# by git >= 2.36. +# fsyncObjectFiles = true + +[gc] +auto = 0 diff --git a/archives/gitlab-shell/gitlab-shell.post-install b/archives/gitlab-shell/gitlab-shell.post-install new file mode 100644 index 0000000..01c425c --- /dev/null +++ b/archives/gitlab-shell/gitlab-shell.post-install @@ -0,0 +1,23 @@ +#!/bin/sh +set -eu + +keys_file='/var/lib/gitlab/.ssh/authorized_keys' + +if [ ! -f "$keys_file" ]; then + keys_dir="$(dirname "$keys_file")" + echo "* Initializing authorized_keys file in $keys_dir" 1>&2 + + mkdir -m0700 -p "$keys_dir" + chown git:git "$keys_dir" + + touch "$keys_file" + chmod 0600 "$keys_file" + chown git:git "$keys_file" +fi + +cat <&2 +* +* GitLab Shell has been initialized. Read /etc/gitlab/gitlab-shell.yml and +* modify settings as need. +* +EOF diff --git a/archives/gitlab-shell/gitlab-shell.pre-install b/archives/gitlab-shell/gitlab-shell.pre-install new file mode 100644 index 0000000..9421862 --- /dev/null +++ b/archives/gitlab-shell/gitlab-shell.pre-install @@ -0,0 +1,41 @@ +#!/bin/sh +# It's very important to set user/group correctly. + +git_dir='/var/lib/gitlab' + +if ! getent group git >/dev/null; then + echo '* Creating group git' >&2 + + addgroup -S git +fi + +if ! id git 2>/dev/null 1>&2; then + echo '* Creating user git' >&2 + + adduser -DHS -G git -h "$git_dir" -s /bin/sh \ + -g "added by apk for gitlab-shell" git + passwd -u git >/dev/null # unlock +fi + +if ! id -Gn git | grep -Fq redis; then + echo '* Adding user git to group redis' >&2 + + addgroup git redis +fi + +user_home="$(getent passwd git | cut -d: -f6)" + +if [ "$user_home" != "$git_dir" ]; then + cat >&2 <<-EOF + !! + !! User git has home directory in $user_home, but this package and gitlab-ce + !! package assumes $git_dir. Although it's possible to use a different + !! directory, it's really not easy. + !! + !! Please change git's home directory to $git_dir, or adjust settings + !! and move files yourself. Otherwise GitLab will not work! + !! + EOF +fi + +exit 0 From cda65a1f036d583cdde948fcfd4f2d917ba55c21 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Fri, 9 Aug 2024 22:27:59 -0400 Subject: [PATCH 06/38] archives/mastodon: new aport --- archives/mastodon/APKBUILD | 202 +++++++++++++++++++++ archives/mastodon/bin-wrapper.in | 15 ++ archives/mastodon/mastodon.initd | 41 +++++ archives/mastodon/mastodon.logrotate | 11 ++ archives/mastodon/mastodon.post-install | 27 +++ archives/mastodon/mastodon.post-upgrade | 1 + archives/mastodon/mastodon.pre-install | 54 ++++++ archives/mastodon/mastodon.sidekiq.initd | 32 ++++ archives/mastodon/mastodon.streaming.initd | 33 ++++ archives/mastodon/mastodon.web.initd | 29 +++ 10 files changed, 445 insertions(+) create mode 100644 archives/mastodon/APKBUILD create mode 100644 archives/mastodon/bin-wrapper.in create mode 100644 archives/mastodon/mastodon.initd create mode 100644 archives/mastodon/mastodon.logrotate create mode 100644 archives/mastodon/mastodon.post-install create mode 120000 archives/mastodon/mastodon.post-upgrade create mode 100644 archives/mastodon/mastodon.pre-install create mode 100644 archives/mastodon/mastodon.sidekiq.initd create mode 100644 archives/mastodon/mastodon.streaming.initd create mode 100644 archives/mastodon/mastodon.web.initd diff --git a/archives/mastodon/APKBUILD b/archives/mastodon/APKBUILD new file mode 100644 index 0000000..954ff0b --- /dev/null +++ b/archives/mastodon/APKBUILD @@ -0,0 +1,202 @@ +# Contributor: Antoine Martin (ayakael) +# Maintainer: Antoine Martin (ayakael) +pkgname=mastodon +_pkgname=$pkgname +pkgver=4.2.10 +_gittag=v$pkgver +pkgrel=1 +pkgdesc="Self-hosted social media and network server based on ActivityPub and OStatus" +arch="x86_64" +url="https://github.com/mastodon/mastodon" +license="AGPL-3.0-only" +depends=" + $pkgname-assets=$pkgver-r$pkgrel + ffmpeg + file + gcompat + imagemagick + nodejs + npm + protobuf + py3-elasticsearch + redis + ruby3.2 + ruby3.2-bundler + yarn + " +makedepends=" + gnu-libiconv-dev + icu-dev + libffi-dev + libidn-dev + libxml2-dev + libxslt-dev + openssl-dev + postgresql-dev + protobuf-dev + ruby3.2-dev + yaml-dev + zlib-dev + " +install=" + $pkgname.pre-install + $pkgname.post-upgrade + $pkgname.post-install + " +source=" + mastodon-$_gittag.tar.gz::https://github.com/mastodon/mastodon/archive/$_gittag.tar.gz + mastodon.initd + mastodon.web.initd + mastodon.sidekiq.initd + mastodon.streaming.initd + mastodon.logrotate + bin-wrapper.in + " +subpackages="$pkgname-openrc $pkgname-assets::noarch" +options="!check" # No test suite + +_prefix="usr/lib/bundles/$_pkgname" + +export BUNDLE_DEPLOYMENT=true +export BUNDLE_FORCE_RUBY_PLATFORM=true +export BUNDLE_FROZEN=true +export BUNDLE_JOBS=${JOBS:-2} + +prepare() { + default_prepare + + # Allow use of any bundler + sed -i -e '/BUNDLED/,+1d' Gemfile.lock + + # Allow use of higher Node versions + sed -i 's/"node": .*"/"node": ">=14.15"/' package.json + + mkdir -p "$srcdir"/gem-cache +} + +build() { + local bundle_without='exclude development' + + msg "Installing Ruby gems..." + bundle config --local build.nokogiri --use-system-libraries \ + --with-xml2-include=/usr/include/libxml2 \ + --with-xslt-include=/usr/include/libxslt + bundle config --local build.ffi --enable-system-libffi + bundle config --local build.idn --enable-system-libidn + bundle config --local path "vendor/bundle" + bundle config --local set deployment 'false' + bundle config --local set without "$bundle_without" + + bundle install --no-cache -j"$(getconf _NPROCESSORS_ONLN)" + + msg "Installing npm modules..." + yarn install --production --frozen-lockfile + + ( + msg "Compiling assets..." + export NODE_ENV=production + export RAILS_ENV=production + export NODE_OPTIONS="--openssl-legacy-provider" + + OTP_SECRET=precompile_placeholder SECRET_KEY_BASE=precompile_placeholder bundle exec rails assets:precompile + ) + + msg "Cleaning assets gems..." + bundle config --local without "$bundle_without" + bundle clean + + # Create executables in bin/*. + # See also https://github.com/bundler/bundler/issues/6149. + bundle binstubs --force bundler puma sidekiq +} + +package() { + local destdir="$pkgdir"/$_prefix + local datadir="$pkgdir/var/lib/gitlab" + # directory creation + install -dm 755 \ + "$destdir" \ + "$datadir" \ + "$pkgdir"/etc/init.d + + + # Install application files. + rmdir "$destdir" + cp -a "$builddir" "$destdir" + + install -m755 -t "$destdir"/bin/ \ + bin/bundle \ + bin/rails \ + bin/rake \ + bin/sidekiq \ + bin/sidekiqmon \ + bin/tootctl \ + bin/puma + + cd "$destdir"/vendor/bundle/ruby/*/ + + # Remove tests, documentations and other useless files. + find gems/ \( -name 'doc' \ + -o -name 'spec' \ + -o -name 'test' \) \ + -type d -maxdepth 2 -exec rm -fr "{}" + + find gems/ \( -name 'README*' \ + -o -name 'CHANGELOG*' \ + -o -name 'CONTRIBUT*' \ + -o -name '*LICENSE*' \ + -o -name 'Rakefile' \ + -o -name '.*' \) \ + -type f -delete + + # Remove assets, they are already compiled. + rm -r gems/doorkeeper-*/app/assets + rm -r gems/pghero-*/app/assets + + # Remove build logs and cache. + rm -rf build_info/ cache/ + find extensions/ \( -name gem_make.out -o -name mkmf.log \) -delete + + cat > "$datadir"/.profile <<-EOF + export RAILS_ENV=production + export NODE_ENV=production + export EXECJS_RUNTIME=Disabled + EOF + + # Install wrapper scripts to /usr/bin. + local name; for name in rake rails tootctl; do + sed "s/__COMMAND__/$name/g" "$srcdir"/bin-wrapper.in \ + > "$builddir"/mastodon-$name + install -m755 -D "$builddir"/mastodon-$name "$pkgdir"/usr/bin/mastodon-$name + done + + # Put the config file in /etc and link to it + touch "$pkgdir"/etc/mastodon.conf + ln -s /etc/mastodon.conf "$destdir"/.env.production + ln -s /usr/bin/node "$destdir"/node + + for file in $_pkgname $_pkgname.sidekiq $_pkgname.web $_pkgname.streaming; do + install -m755 -D "$srcdir"/$file.initd "$pkgdir"/etc/init.d/$file + done + + # Removing all prebuilt artifacts + rm -R "$destdir"/node_modules/*/prebuilds 2>&1 || true + + install -m644 -D "$srcdir"/$_pkgname.logrotate \ + "$pkgdir"/etc/logrotate.d/$_pkgname +} + +assets() { + depends="" + + amove $_prefix/public/assets +} + +sha512sums=" +1fe5417136bc020a83b83eaccef7f1f46c13fc8318681f12ba556b1b6b03e25ef7b6335c28f4e6722101e97b63020cbd0d3fbacdaf9b3b5a4b73c3cf3e230813 mastodon-v4.2.10.tar.gz +d49fea9451c97ccefe5e35b68e4274aeb427f9d1e910b89c1f6c810489c3bec1ccff72952fdaef95abf944b8aff0da84a52347540d36ff1fba5ccc19e1d935c6 mastodon.initd +eefe12a31268245f802222c0001dac884e03adb0d301e53a1512a3cd204836ca03ad083908cd14d146cf0dce99e3a4366570efd0e40a9a490ccd381d4c63c32f mastodon.web.initd +8fc9249c01693bb02b8d1a6177288d5d3549addde8c03eb35cc7a32dde669171872ebc2b5deb8019dc7a12970098f1af707171fa41129be31b04e1dc1651a777 mastodon.sidekiq.initd +03433a2f58600ca0d58e7c3713df2146ccdfc92033ccfe801dbd38bac39b66d6297f2b5ca02300caa36455b484eab2caa68c912c2f72150203bfa0e106c375fc mastodon.streaming.initd +83b3bae5b6fdb4d0dbc1cbe546c62c0aa77397b97d1a5d5377af032466677de188065b556710c0d96576bbae89cc76800f1ffb8cd718155eb2784da818f27619 mastodon.logrotate +dfd0e43ac6c28387bd4aa57fd98ae41aeb5a098b6deb3e44b89f07818e2470773b025364afee7ef6fd0f664cb86bbbbe8796c9f222f5436c256a787282fbe3e1 bin-wrapper.in +" diff --git a/archives/mastodon/bin-wrapper.in b/archives/mastodon/bin-wrapper.in new file mode 100644 index 0000000..eb1d637 --- /dev/null +++ b/archives/mastodon/bin-wrapper.in @@ -0,0 +1,15 @@ + +#!/bin/sh + +BUNDLE_DIR='/usr/lib/bundles/mastodon' +export RAILS_ENV='production' +export NODE_ENV='production' +export EXECJS_RUNTIME='Disabled' + +cd $BUNDLE_DIR + +if [ "$(id -un)" != 'mastodon' ]; then + exec su mastodon -c '"$0" "$@"' -- bin/__COMMAND__ "$@" +else + exec bin/__COMMAND__ "$@" +fi diff --git a/archives/mastodon/mastodon.initd b/archives/mastodon/mastodon.initd new file mode 100644 index 0000000..1454603 --- /dev/null +++ b/archives/mastodon/mastodon.initd @@ -0,0 +1,41 @@ +#!/sbin/openrc-run + +name="Mastodon" +description="Meta script for starting/stopping all the Mastodon components" + +subservices="mastodon.sidekiq mastodon.streaming mastodon.web" + +depend() { + need redis postgresql + use net +} + +start() { + local ret=0 + + ebegin "Starting all Mastodon components" + local svc; for svc in $subservices; do + service $svc start || ret=1 + done + eend $ret +} + +stop() { + local ret=0 + + ebegin "Stopping all Mastodon components" + local svc; for svc in $subservices; do + service $svc stop || ret=1 + done + eend $ret +} + +status() { + local ret=0 + + local svc; for svc in $subservices; do + echo "$svc:" + service $svc status || ret=1 + done + eend $ret +} diff --git a/archives/mastodon/mastodon.logrotate b/archives/mastodon/mastodon.logrotate new file mode 100644 index 0000000..cbfecfc --- /dev/null +++ b/archives/mastodon/mastodon.logrotate @@ -0,0 +1,11 @@ +/var/log/mastodon/*.log { + compress + copytruncate + delaycompress + maxsize 10M + minsize 1M + missingok + sharedscripts + rotate 10 + weekly +} diff --git a/archives/mastodon/mastodon.post-install b/archives/mastodon/mastodon.post-install new file mode 100644 index 0000000..9387d45 --- /dev/null +++ b/archives/mastodon/mastodon.post-install @@ -0,0 +1,27 @@ +#!/bin/sh +set -eu + +if [ "${0##*.}" = 'post-upgrade' ]; then + cat >&2 <<-EOF + * + * To finish Mastodon upgrade run: + * + * mastodon-rails db:migrate + * + EOF +else + cat >&2 <<-EOF + * + * 1. Adjust settings in /etc/mastodon.conf + * + * 2. Create database for Mastodon: + * + * psql -c "CREATE ROLE mastodon PASSWORD 'top-secret' INHERIT LOGIN;" + * psql -c "CREATE DATABASE mastodon OWNER mastodon ENCODING 'UTF-8';" + * psql -d mastodon -c "CREATE EXTENSION pg_trgm; CREATE EXTENSION btree_gist;" + * psql -c "ALTER DATABASE name OWNER TO new_owner;" + * + * 3. Run "mastodon-rake db:migrate" + * + EOF +fi diff --git a/archives/mastodon/mastodon.post-upgrade b/archives/mastodon/mastodon.post-upgrade new file mode 120000 index 0000000..0fcc8b2 --- /dev/null +++ b/archives/mastodon/mastodon.post-upgrade @@ -0,0 +1 @@ +mastodon.post-install \ No newline at end of file diff --git a/archives/mastodon/mastodon.pre-install b/archives/mastodon/mastodon.pre-install new file mode 100644 index 0000000..c869177 --- /dev/null +++ b/archives/mastodon/mastodon.pre-install @@ -0,0 +1,54 @@ +#!/bin/sh +# It's very important to set user/group correctly. + +mastodon_dir='/var/lib/mastodon' + +if ! getent group mastodon 1>/dev/null; then + echo '* Creating group mastodon' 1>&2 + + addgroup -S mastodon +fi + +if ! id mastodon 2>/dev/null 1>&2; then + echo '* Creating user mastodon' 1>&2 + + adduser -DHS -G mastodon -h "$mastodon_dir" -s /bin/sh \ + -g "added by apk for mastodon" mastodon + passwd -u mastodon 1>/dev/null # unlock +fi + +if ! id -Gn mastodon | grep -Fq redis; then + echo '* Adding user mastodon to group redis' 1>&2 + + addgroup mastodon redis +fi + +if [ "$(id -gn mastodon)" != 'mastodon' ]; then + cat >&2 <<-EOF + !! + !! User mastodon has primary group $(id -gn mastodon). We strongly recommend to change + !! mastodon's primary group to mastodon. + !! + EOF + + # Add it at least as a supplementary group. + adduser mastodon mastodon +fi + +user_home="$(getent passwd mastodon | cut -d: -f6)" + +if [ "$user_home" != "$mastodon_dir" ]; then + cat >&2 <<-EOF + !! + !! User mastodon has home directory in $user_home, but this package assumes + !! $mastodon_dir. Although it's possible to use a different directory, + !! it's really not easy. + !! + !! Please change mastodon's home directory to $mastodon_dir, or adjust settings + !! and move files yourself. Otherwise Mastodon will not work! + !! + EOF +fi + +exit 0 + diff --git a/archives/mastodon/mastodon.sidekiq.initd b/archives/mastodon/mastodon.sidekiq.initd new file mode 100644 index 0000000..98c0377 --- /dev/null +++ b/archives/mastodon/mastodon.sidekiq.initd @@ -0,0 +1,32 @@ +#!/sbin/openrc-run + +name="Mastodon background workers Service" +root="/usr/lib/bundles/mastodon" +pidfile="/run/mastodon-sidekiq.pid" +logfile="/var/log/mastodon/sidekiq.log" + +depend() { + use net + need redis +} + +start() { + ebegin "Starting Mastodon background workers" + + cd $root + + start-stop-daemon --start --background \ + --chdir "${root}" \ + --user="mastodon" \ + --make-pidfile --pidfile="${pidfile}" \ + -1 "${logfile}" -2 "${logfile}" \ + --exec /usr/bin/env -- RAILS_ENV=production DB_POOL=25 MALLOC_ARENA_MAX=2 bundle exec sidekiq -c 25 + eend $? +} + +stop() { + ebegin "Stopping Mastodon background workers" + start-stop-daemon --stop \ + --pidfile=${pidfile} \ + eend $? +} diff --git a/archives/mastodon/mastodon.streaming.initd b/archives/mastodon/mastodon.streaming.initd new file mode 100644 index 0000000..b41adc2 --- /dev/null +++ b/archives/mastodon/mastodon.streaming.initd @@ -0,0 +1,33 @@ +#!/sbin/openrc-run + +name="Mastodon streaming API service" +root="/usr/lib/bundles/mastodon" +pidfile="/run/mastodon-streaming.pid" +logfile="/var/log/mastodon/streaming.log" + +depend() { + use net +} + +start() { + ebegin "Starting Mastodon streaming API" + + cd $root + + start-stop-daemon --start \ + --background --quiet \ + --chdir "${root}" \ + --user="mastodon" \ + --make-pidfile --pidfile="${pidfile}" \ + --stdout "${logfile}" --stderr "${logfile}" \ + --exec /usr/bin/env -- NODE_ENV=production PORT=4000 /usr/bin/node ./streaming/index.js + eend $? +} + +stop() { + ebegin "Stopping Mastodon streaming API" + start-stop-daemon --stop \ + --pidfile="${pidfile}" \ + eend $? +} + diff --git a/archives/mastodon/mastodon.web.initd b/archives/mastodon/mastodon.web.initd new file mode 100644 index 0000000..42eace6 --- /dev/null +++ b/archives/mastodon/mastodon.web.initd @@ -0,0 +1,29 @@ +#!/sbin/openrc-run + +name="Mastodon Web Service" +root="/usr/lib/bundles/mastodon" +pidfile="/run/mastodon-web.pid" +logfile="/var/log/mastodon/web.log" + +depend() { + use net +} + +start() { + ebegin "Starting Mastodon web workers" + cd $root + start-stop-daemon --start --background \ + --chdir "${root}" \ + --user="mastodon" \ + --pidfile="${pidfile}" --make-pidfile \ + --stdout="${logfile}" --stderr="${logfile}" \ + --exec /usr/bin/env -- RAILS_ENV=production PORT=3000 bundle exec puma -C config/puma.rb + eend $? +} + +stop() { + ebegin "Stopping Mastodon web workers" + start-stop-daemon --stop \ + --pidfile=${pidfile} \ + eend $? +} From c434b5145e48e29cab062303ee1ab3af594bf31f Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Fri, 9 Aug 2024 22:28:02 -0400 Subject: [PATCH 07/38] archives/ruby3.2: new aport --- archives/ruby3.2/APKBUILD | 253 ++++++++++++++++++ .../ruby3.2/dont-install-bundled-gems.patch | 20 ++ archives/ruby3.2/fix-get_main_stack.patch | 68 +++++ archives/ruby3.2/fix-riscv64-build.patch | 38 +++ archives/ruby3.2/ruby3.2.post-upgrade | 17 ++ .../test_insns-lower-recursion-depth.patch | 47 ++++ 6 files changed, 443 insertions(+) create mode 100644 archives/ruby3.2/APKBUILD create mode 100644 archives/ruby3.2/dont-install-bundled-gems.patch create mode 100644 archives/ruby3.2/fix-get_main_stack.patch create mode 100644 archives/ruby3.2/fix-riscv64-build.patch create mode 100644 archives/ruby3.2/ruby3.2.post-upgrade create mode 100644 archives/ruby3.2/test_insns-lower-recursion-depth.patch diff --git a/archives/ruby3.2/APKBUILD b/archives/ruby3.2/APKBUILD new file mode 100644 index 0000000..59e7332 --- /dev/null +++ b/archives/ruby3.2/APKBUILD @@ -0,0 +1,253 @@ +# Contributor: Carlo Landmeter +# Contributor: Jakub Jirutka +# Maintainer: Jakub Jirutka +# +# secfixes: +# 3.1.4-r0: +# - CVE-2023-28755 +# - CVE-2023-28756 +# 3.1.3-r0: +# - CVE-2021-33621 +# 3.1.2-r0: +# - CVE-2022-28738 +# - CVE-2022-28739 +# 3.0.3-r0: +# - CVE-2021-41817 +# - CVE-2021-41816 +# - CVE-2021-41819 +# 2.7.4-r0: +# - CVE-2021-31799 +# - CVE-2021-31810 +# - CVE-2021-32066 +# 2.7.3-r0: +# - CVE-2021-28965 +# - CVE-2021-28966 +# 2.7.2-r0: +# - CVE-2020-25613 +# 2.6.6-r0: +# - CVE-2020-10663 +# - CVE-2020-10933 +# 2.6.5-r0: +# - CVE-2019-16255 +# - CVE-2019-16254 +# - CVE-2019-15845 +# - CVE-2019-16201 +# 2.5.2-r0: +# - CVE-2018-16395 +# - CVE-2018-16396 +# 2.5.1-r0: +# - CVE-2017-17742 +# - CVE-2018-6914 +# - CVE-2018-8777 +# - CVE-2018-8778 +# - CVE-2018-8779 +# - CVE-2018-8780 +# 2.4.2-r0: +# - CVE-2017-0898 +# - CVE-2017-10784 +# - CVE-2017-14033 +# - CVE-2017-14064 +# - CVE-2017-0899 +# - CVE-2017-0900 +# - CVE-2017-0901 +# - CVE-2017-0902 +# 2.4.3-r0: +# - CVE-2017-17405 +# +pkgname=ruby3.2 +# When upgrading, upgrade also each ruby- aport listed in file +# gems/bundled_gems. If some aport is missing or not in the main repo, +# create/move it. +pkgver=3.2.2 +_abiver="${pkgver%.*}.0" +pkgrel=0 +pkgdesc="An object-oriented language for quick and easy programming" +url="https://www.ruby-lang.org/" +arch="all" +license="Ruby AND BSD-2-Clause AND MIT" +depends="ca-certificates" +depends_dev=" + $pkgname=$pkgver-r$pkgrel + $pkgname-rdoc=$pkgver-r$pkgrel + gmp-dev + libucontext-dev + " +makedepends="$depends_dev + autoconf + gdbm-dev + libffi-dev + linux-headers + openssl-dev>3 + readline-dev + yaml-dev + zlib-dev + " +install="$pkgname.post-upgrade" +subpackages="$pkgname-dbg $pkgname-doc $pkgname-dev + $pkgname-rdoc::noarch + $pkgname-libs + $pkgname-full::noarch + " +source="https://cache.ruby-lang.org/pub/ruby/${pkgver%.*}/ruby-$pkgver.tar.gz + test_insns-lower-recursion-depth.patch + fix-get_main_stack.patch + dont-install-bundled-gems.patch + fix-riscv64-build.patch + " +replaces="ruby3.2-gems" +builddir="$srcdir"/ruby-$pkgver + +# For backward compatibility (pre 3.x). +for _i in bigdecimal etc fiddle gdbm io-console irb json; do + provides="$provides ruby3.2-$_i=$pkgver-r$pkgrel" +done + +_gemdir="/usr/lib/ruby/gems/$_abiver" +_rubydir="/usr/lib/ruby/$_abiver" +_chost="${CHOST/-alpine-/-}" + +case "$CARCH" in + x86) _arch="i386";; + *) _arch="$CARCH";; +esac + +prepare() { + default_prepare + autoconf + + # v2.7.1 - Of all the bootstraptest only test_fiber fails on s390x: + # test_fiber.rb bootstraptest.tmp.rb:8: [BUG] vm_call_cfunc: cfp consistency error (0x000003ffb63fefb0, 0x000003ffb42f5f58) + case "$CARCH" in + s390x) rm bootstraptest/test_fiber.rb;; + esac + + local name ver; while read -r name ver _; do + case "$name=$ver" in + [a-z]*=[0-9]*.[0-9]*) + if ! apk add -qs "ruby-$name>=$ver" >/dev/null 2>&1; then + warning "bump package ruby-$name to version $ver" + fi + echo "ruby-$name>=$ver" >> "$srcdir"/.ruby-full.depends + esac + done < "$builddir"/gems/bundled_gems +} + +build() { + # -fomit-frame-pointer makes ruby segfault, see gentoo bug #150413 + # In many places aliasing rules are broken; play it safe + # as it's risky with newer compilers to leave it as it is. + # -O2 - ruby is a language runtime, so performance is crucial. Moreover, + # ruby 3.1.1 fails with Bus Error when compiled with -Os on armhf/armv7. + # This makes ruby-libs 7% bigger (13.4 -> 14.4 MiB). + export CFLAGS="${CFLAGS/-Os/-O2} -fno-omit-frame-pointer -fno-strict-aliasing" + export CPPFLAGS="${CPPFLAGS/-Os/-O2} -fno-omit-frame-pointer -fno-strict-aliasing" + + # Needed for coroutine stuff + export LIBS="-lucontext" + + # ruby saves path to install. we want use $PATH + export INSTALL=install + + # the configure script does not detect isnan/isinf as macros + export ac_cv_func_isnan=yes + export ac_cv_func_isinf=yes + + ./configure \ + --build=$CBUILD \ + --host=$CHOST \ + --prefix=/usr \ + --sysconfdir=/etc \ + --mandir=/usr/share/man \ + --infodir=/usr/share/info \ + --with-sitedir=/usr/local/lib/site_ruby \ + --with-search-path="/usr/lib/site_ruby/\$(ruby_ver)/$_arch-linux" \ + --enable-pthread \ + --disable-rpath \ + --enable-shared \ + --with-mantype=man + make +} + +check() { + # https://bugs.ruby-lang.org/issues/18380 + local disable_tests="-n !/TestAddressResolve#test_socket_getnameinfo_domain_blocking/" + + case "$CARCH" in + x86 | armhf | armv7) + # TestReadline#test_interrupt_in_other_thread fails on 32 bit arches according + # to upstream, but the test is disabled just on Travis, not in test suite. + # https://bugs.ruby-lang.org/issues/18393 + disable_tests="$disable_tests -n !/TestReadline#test_interrupt_in_other_thread/" + ;; + esac + + make test TESTS="$disable_tests" +} + +package() { + make DESTDIR="$pkgdir" SUDO="" install + + install -m 644 -D COPYING "$pkgdir"/usr/share/licenses/$pkgname/COPYING + + cd "$pkgdir" + + # Remove bundled gem bundler; it's provided by a separate aport/package + # ruby-bundler. + rm -rf ./$_rubydir/bundler + rm ./$_rubydir/bundler.rb + rm -rf ./$_gemdir/gems/bundler-* + rm ./$_gemdir/specifications/default/bundler-*.gemspec + rm usr/bin/bundle usr/bin/bundler + + # Remove bundled CA certificates; they are provided by ca-certificates. + rm ./$_rubydir/rubygems/ssl_certs/*/*.pem + rmdir ./$_rubydir/rubygems/ssl_certs/* || true + + rm -Rf ./$_gemdir/cache/* + + if [ -d usr/local ]; then + local f=$(find usr/local -type f) + if [ -n "$f" ]; then + error "Found files in /usr/local:" + echo "$f" + return 1 + fi + rm -r usr/local + fi +} + +rdoc() { + pkgdesc="Ruby documentation tool" + license="Ruby" + depends="$pkgname" + + amove $_rubydir/rdoc + amove $_gemdir/gems/rdoc-* + amove $_gemdir/specifications/default/rdoc-* + amove usr/bin/ri + amove usr/bin/rdoc +} + +libs() { + pkgdesc="Libraries necessary to run Ruby" + depends="" + + amove usr/lib +} + +full() { + pkgdesc="Ruby with all bundled gems" + # bundler is bundled since Ruby 2.6, so include it in ruby-full despite + # that it's provided by a seprate aport/package. + depends="ruby ruby-rdoc ruby-bundler $(cat "$srcdir"/.ruby-full.depends)" + + mkdir -p "$subpkgdir" +} + +sha512sums=" +bcc68f3f24c1c8987d9c80b57332e5791f25b935ba38daf5addf60dbfe3a05f9dcaf21909681b88e862c67c6ed103150f73259c6e35c564f13a00f432e3c1e46 ruby-3.2.2.tar.gz +16fc1f35aee327d1ecac420b091beaa53c675e0504d5a6932004f17ca68a2c38f57b053b0a3903696f2232c5add160d363e3972a962f7f7bcb52e4e998c7315d test_insns-lower-recursion-depth.patch +42cd45c1db089a1ae57834684479a502e357ddba82ead5fa34e64c13971e7ab7ad2919ddd60a104a817864dd3e2e35bdbedb679210eb41d82cab36a0687e43d4 fix-get_main_stack.patch +a77da5e5eb7d60caf3f1cabb81e09b88dc505ddd746e34efd1908c0096621156d81cc65095b846ba9bdb66028891aefce883a43ddec6b56b5beb4aac5e4ee33f dont-install-bundled-gems.patch +000530316af1fca007fe8cee694b59e2e801674bcc1a2ebea95e67745d4afc0ce66c902fdbc88ee847a4fbf55115b183cd803cbf7c98ef685938efb3e2b7c991 fix-riscv64-build.patch +" diff --git a/archives/ruby3.2/dont-install-bundled-gems.patch b/archives/ruby3.2/dont-install-bundled-gems.patch new file mode 100644 index 0000000..b125fa0 --- /dev/null +++ b/archives/ruby3.2/dont-install-bundled-gems.patch @@ -0,0 +1,20 @@ +Don't install bundled gems - we package them separately. + +--- a/tool/rbinstall.rb ++++ b/tool/rbinstall.rb +@@ -990,6 +990,7 @@ + end + end + ++=begin XXX-Patched + install?(:ext, :comm, :gem, :'bundled-gems') do + gem_dir = Gem.default_dir + install_dir = with_destdir(gem_dir) +@@ -1057,6 +1058,7 @@ + puts "skipped bundled gems: #{gems.join(' ')}" + end + end ++=end + + parse_args() + diff --git a/archives/ruby3.2/fix-get_main_stack.patch b/archives/ruby3.2/fix-get_main_stack.patch new file mode 100644 index 0000000..864a314 --- /dev/null +++ b/archives/ruby3.2/fix-get_main_stack.patch @@ -0,0 +1,68 @@ +--- a/thread_pthread.c ++++ b/thread_pthread.c +@@ -858,9 +858,6 @@ + # define MAINSTACKADDR_AVAILABLE 0 + # endif + #endif +-#if MAINSTACKADDR_AVAILABLE && !defined(get_main_stack) +-# define get_main_stack(addr, size) get_stack(addr, size) +-#endif + + #ifdef STACKADDR_AVAILABLE + /* +@@ -942,6 +939,55 @@ + return 0; + #undef CHECK_ERR + } ++ ++#if defined(__linux__) && !defined(__GLIBC__) && defined(HAVE_GETRLIMIT) ++ ++#ifndef PAGE_SIZE ++#include ++#define PAGE_SIZE sysconf(_SC_PAGE_SIZE) ++#endif ++ ++static int ++get_main_stack(void **addr, size_t *size) ++{ ++ size_t start, end, limit, prevend = 0; ++ struct rlimit r; ++ FILE *f; ++ char buf[PATH_MAX+80], s[8]; ++ int n; ++ STACK_GROW_DIR_DETECTION; ++ ++ f = fopen("/proc/self/maps", "re"); ++ if (!f) ++ return -1; ++ n = 0; ++ while (fgets(buf, sizeof buf, f)) { ++ n = sscanf(buf, "%zx-%zx %*s %*s %*s %*s %7s", &start, &end, s); ++ if (n >= 2) { ++ if (n == 3 && strcmp(s, "[stack]") == 0) ++ break; ++ prevend = end; ++ } ++ n = 0; ++ } ++ fclose(f); ++ if (n == 0) ++ return -1; ++ ++ limit = 100 << 20; /* 100MB stack limit */ ++ if (getrlimit(RLIMIT_STACK, &r)==0 && r.rlim_cur < limit) ++ limit = r.rlim_cur & -PAGE_SIZE; ++ if (limit > end) limit = end; ++ if (prevend < end - limit) prevend = end - limit; ++ if (start > prevend) start = prevend; ++ *addr = IS_STACK_DIR_UPPER() ? (void *)start : (void *)end; ++ *size = end - start; ++ return 0; ++} ++#else ++# define get_main_stack(addr, size) get_stack(addr, size) ++#endif ++ + #endif + + static struct { diff --git a/archives/ruby3.2/fix-riscv64-build.patch b/archives/ruby3.2/fix-riscv64-build.patch new file mode 100644 index 0000000..e81e8b6 --- /dev/null +++ b/archives/ruby3.2/fix-riscv64-build.patch @@ -0,0 +1,38 @@ +Patch-Source: https://lists.openembedded.org/g/openembedded-core/message/161168 +partially extracted to actually apply onto a release tarball + +--- +From dfb22e4d6662bf72879eda806eaa78c7b52b519e Mon Sep 17 00:00:00 2001 +From: Khem Raj +Date: Tue, 25 Jan 2022 20:29:14 -0800 +Subject: [PATCH] vm_dump.c: Define REG_S1 and REG_S2 for musl/riscv + +These defines are missing in musl, there is a possible +patch to add them to musl, but we need a full list of +these names for mcontext that can be added once for all + +Upstream-Status: Inappropriate [musl bug] +Signed-off-by: Khem Raj +--- + vm_dump.c | 5 +++++ + 1 file changed, 5 insertions(+) + +diff --git a/vm_dump.c b/vm_dump.c +index a98f5aa..957b785 100644 +--- a/vm_dump.c ++++ b/vm_dump.c +@@ -39,6 +39,11 @@ + + #define MAX_POSBUF 128 + ++#if defined(__riscv) && !defined(__GLIBC__) ++# define REG_S1 9 ++# define REG_S2 18 ++#endif ++ + #define VM_CFP_CNT(ec, cfp) \ + ((rb_control_frame_t *)((ec)->vm_stack + (ec)->vm_stack_size) - \ + (rb_control_frame_t *)(cfp)) +-- +2.35.0 + diff --git a/archives/ruby3.2/ruby3.2.post-upgrade b/archives/ruby3.2/ruby3.2.post-upgrade new file mode 100644 index 0000000..6cba787 --- /dev/null +++ b/archives/ruby3.2/ruby3.2.post-upgrade @@ -0,0 +1,17 @@ +#!/bin/sh + +ver_new="$1" +ver_old="$2" + +if [ "$(apk version -t "$ver_old" "2.5.0-r0")" = "<" ]; then + cat >&2 <<-EOF + * + * In Ruby 2.5 more parts of the stdlib has been splitted into standalone + * gems, yet still installed with Ruby by default. We have moved some of + * them into separate subpackages. If you don't know which subpackages you + * need, you may install meta-package "ruby-full". + * + EOF +fi + +exit 0 diff --git a/archives/ruby3.2/test_insns-lower-recursion-depth.patch b/archives/ruby3.2/test_insns-lower-recursion-depth.patch new file mode 100644 index 0000000..0069720 --- /dev/null +++ b/archives/ruby3.2/test_insns-lower-recursion-depth.patch @@ -0,0 +1,47 @@ +The patched test is a recursion function. We have lower stack size, +so we hit SystemStackError sooner than on other platforms. + + #361 test_insns.rb:389:in `block in ': + # recursive once + def once n + return %r/#{ + if n == 0 + true + else + once(n-1) # here + end + }/ox + end + x = once(128); x = once(7); x = once(16); + x =~ "true" && $~ + #=> "" (expected "true") once + Stderr output is not empty + bootstraptest.tmp.rb:3:in `once': stack level too deep (SystemStackError) + from bootstraptest.tmp.rb:7:in `block in once' + from bootstraptest.tmp.rb:3:in `once' + from bootstraptest.tmp.rb:7:in `block in once' + from bootstraptest.tmp.rb:3:in `once' + from bootstraptest.tmp.rb:7:in `block in once' + from bootstraptest.tmp.rb:3:in `once' + from bootstraptest.tmp.rb:7:in `block in once' + from bootstraptest.tmp.rb:3:in `once' + ... 125 levels... + from bootstraptest.tmp.rb:3:in `once' + from bootstraptest.tmp.rb:7:in `block in once' + from bootstraptest.tmp.rb:3:in `once' + from bootstraptest.tmp.rb:11:in `
' + Test_insns.rb FAIL 1/187 + FAIL 1/1197 tests failed + Make: *** [uncommon.mk:666: yes-btest-ruby] Error 1 + +--- a/bootstraptest/test_insns.rb ++++ b/bootstraptest/test_insns.rb +@@ -274,7 +274,7 @@ + end + }/ox + end +- x = once(128); x = once(7); x = once(16); ++ x = once(32); x = once(7); x = once(16); + x =~ "true" && $~ + }, + [ 'once', <<-'},', ], # { From cfdd98d12f0608b5fedc8d12d1b4ac0f862d5f49 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Fri, 9 Aug 2024 22:28:04 -0400 Subject: [PATCH 08/38] archives/ruby3.2-bundler: new aport --- archives/ruby3.2-bundler/APKBUILD | 51 +++++++++++++++++++++++++ archives/ruby3.2-bundler/manpages.patch | 37 ++++++++++++++++++ 2 files changed, 88 insertions(+) create mode 100644 archives/ruby3.2-bundler/APKBUILD create mode 100644 archives/ruby3.2-bundler/manpages.patch diff --git a/archives/ruby3.2-bundler/APKBUILD b/archives/ruby3.2-bundler/APKBUILD new file mode 100644 index 0000000..b21a7d8 --- /dev/null +++ b/archives/ruby3.2-bundler/APKBUILD @@ -0,0 +1,51 @@ +# Maintainer: Jakub Jirutka +pkgname=ruby3.2-bundler +_gemname=bundler +pkgver=2.3.26 +pkgrel=0 +pkgdesc="Manage an application's gem dependencies" +url="https://bundler.io/" +arch="noarch" +license="MIT" +depends="ruby3.2" +makedepends="ruby3.2-rake" +subpackages="$pkgname-doc" +source="https://github.com/rubygems/rubygems/archive/bundler-v$pkgver.tar.gz + manpages.patch + " +builddir="$srcdir/rubygems-bundler-v$pkgver/bundler" +options="!check" # tests require deps not available in main repo + +build() { + rake build_metadata + gem build $_gemname.gemspec +} + +package() { + local gemdir="$pkgdir/$(ruby -e 'puts Gem.default_dir')" + + gem install \ + --local \ + --install-dir "$gemdir" \ + --bindir "$pkgdir/usr/bin" \ + --ignore-dependencies \ + --no-document \ + --verbose \ + $_gemname + + local n; for n in 1 5; do + mkdir -p "$pkgdir"/usr/share/man/man$n + mv "$gemdir"/gems/$_gemname-$pkgver/lib/bundler/man/*.$n "$pkgdir"/usr/share/man/man$n/ + done + + rm -rf "$gemdir"/cache \ + "$gemdir"/build_info \ + "$gemdir"/doc \ + "$gemdir"/gems/$_gemname-$pkgver/man \ + "$gemdir"/gems/$_gemname-$pkgver/*.md +} + +sha512sums=" +0a02d5130ecb8ca96e1850fc409a55d9f07481bbb8ec9b20554cdc6f3b3d3aada67717ab17dd30835615e4c228f39f895bd9b6f55bc22d4dbd88caef9cc105ba bundler-v2.3.26.tar.gz +77a36e61ed205aeea6114b1039dfbe29fcaf916eeae3f91785aa53b3ac534e004aa257e218534d927f39e3673eebbfb3ef9ee17f04ed81f74117799b88e53cf4 manpages.patch +" diff --git a/archives/ruby3.2-bundler/manpages.patch b/archives/ruby3.2-bundler/manpages.patch new file mode 100644 index 0000000..cc11b02 --- /dev/null +++ b/archives/ruby3.2-bundler/manpages.patch @@ -0,0 +1,37 @@ +From: Jakub Jirutka +Date: Fri, 26 Mar 2021 23:17:29 +0100 +Subject: [PATCH] Fix --help when man pages are moved out + +* Allow to move man pages from the gem's directory to the standard + system location (/usr/share/man) without breaking `bundler --help`. +* Fallback to the bundled ronn pages when the man command is available, + but the bundler man pages are not (i.e. ruby-bundler-doc is not + installed). +* Execute man with '-c' option to print the man page to the terminal + instead of using pager. + +--- a/lib/bundler/cli.rb ++++ b/lib/bundler/cli.rb +@@ -118,16 +118,17 @@ + end + + man_path = File.expand_path("man", __dir__) +- man_pages = Hash[Dir.glob(File.join(man_path, "**", "*")).grep(/.*\.\d*\Z/).collect do |f| +- [File.basename(f, ".*"), f] ++ man_pages = Hash[Dir.glob(File.join(man_path, "**", "*")).grep(/.*\.\d*\.ronn\Z/).collect do |f| ++ man_name = File.basename(f, ".ronn") ++ [File.basename(man_name, ".*"), man_name] + end] + + if man_pages.include?(command) + man_page = man_pages[command] +- if Bundler.which("man") && man_path !~ %r{^file:/.+!/META-INF/jruby.home/.+} +- Kernel.exec "man #{man_page}" ++ if Bundler.which("man") && Kernel.system("man -w #{command} >/dev/null 2>&1") && man_path !~ %r{^file:/.+!/META-INF/jruby.home/.+} ++ Kernel.exec "man -c #{command}" + else +- puts File.read("#{man_path}/#{File.basename(man_page)}.ronn") ++ puts File.read("#{man_path}/#{man_page}.ronn") + end + elsif command_path = Bundler.which("bundler-#{cli}") + Kernel.exec(command_path, "--help") From 8497be9439e29ca1582b732d371b2e92439853a3 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Fri, 9 Aug 2024 22:28:07 -0400 Subject: [PATCH 09/38] archives/ruby3.2-minitest: new aport --- archives/ruby3.2-minitest/APKBUILD | 66 +++++++++++++++++++++++++ archives/ruby3.2-minitest/gemspec.patch | 15 ++++++ 2 files changed, 81 insertions(+) create mode 100644 archives/ruby3.2-minitest/APKBUILD create mode 100644 archives/ruby3.2-minitest/gemspec.patch diff --git a/archives/ruby3.2-minitest/APKBUILD b/archives/ruby3.2-minitest/APKBUILD new file mode 100644 index 0000000..a3193fb --- /dev/null +++ b/archives/ruby3.2-minitest/APKBUILD @@ -0,0 +1,66 @@ +# Contributor: Jakub Jirutka +# Maintainer: Jakub Jirutka +pkgname=ruby3.2-minitest +_gemname=minitest +# Keep version in sync with "Bundled gems" (https://stdgems.org) for the +# packaged Ruby version. +pkgver=5.15.0 +pkgrel=1 +pkgdesc="Suite of testing facilities supporting TDD, BDD, mocking, and benchmarking for Ruby" +url="https://github.com/minitest/minitest" +arch="noarch" +license="MIT" +depends="ruby3.2" +makedepends="ruby3.2-rdoc" +subpackages="$pkgname-doc" +source="https://github.com/minitest/minitest/archive/v$pkgver/$_gemname-$pkgver.tar.gz + https://rubygems.org/downloads/$_gemname-$pkgver.gem + " +builddir="$srcdir/$_gemname-$pkgver" + +prepare() { + default_prepare + + # Generate gemspec (there's no gemspec in the source). + gem specification -l --ruby "$srcdir"/$_gemname-$pkgver.gem \ + > "$builddir"/$_gemname.gemspec +} + +build() { + gem build $_gemname.gemspec +} + +check() { + ruby -Ilib -Itest -e "Dir.glob('./test/**/test_*.rb', &method(:require))" +} + +package() { + local gemdir="$pkgdir/$(ruby -e 'puts Gem.default_dir')" + local geminstdir="$gemdir/gems/$_gemname-$pkgver" + + gem install \ + --local \ + --install-dir "$gemdir" \ + --ignore-dependencies \ + --document ri \ + --verbose \ + $_gemname + + # Remove unnessecary files + cd "$gemdir" + rm -rf build_info cache extensions plugins + + cd "$geminstdir" + rm -rf History.* Manifest.* README.* Rakefile test/ +} + +doc() { + pkgdesc="$pkgdesc (ri docs)" + + amove "$(ruby -e 'puts Gem.default_dir')"/doc +} + +sha512sums=" +194d074fa83a87b21f551f86d2bb682bcbac53d5a23d4e0f81fbf570427c5cdfcb27e10618bea69037f9e55bea637ed96e52a10808c586ab4020d788556bda71 minitest-5.15.0.tar.gz +5e97a7aa616966ffc60e10cdc0ba123a7e793f10283ec3b6bf36066177036788cb950ad566fbac49e613b93f08b9846534f463017cde966b4890c3a34a2286be minitest-5.15.0.gem +" diff --git a/archives/ruby3.2-minitest/gemspec.patch b/archives/ruby3.2-minitest/gemspec.patch new file mode 100644 index 0000000..a21a0c5 --- /dev/null +++ b/archives/ruby3.2-minitest/gemspec.patch @@ -0,0 +1,15 @@ +--- a/webrick.gemspec ++++ b/webrick.gemspec +@@ -14,12 +14,6 @@ + + s.require_path = %w{lib} + s.files = [ +- "Gemfile", +- "LICENSE.txt", +- "README.md", +- "Rakefile", +- "bin/console", +- "bin/setup", + "lib/webrick.rb", + "lib/webrick/accesslog.rb", + "lib/webrick/cgi.rb", From f7eea066ee5583e3038987067b4860321ff0fce0 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Fri, 9 Aug 2024 22:28:09 -0400 Subject: [PATCH 10/38] archives/ruby3.2-power_assert: new aport --- archives/ruby3.2-power_assert/APKBUILD | 62 +++++++++++++++++++++ archives/ruby3.2-power_assert/gemspec.patch | 9 +++ 2 files changed, 71 insertions(+) create mode 100644 archives/ruby3.2-power_assert/APKBUILD create mode 100644 archives/ruby3.2-power_assert/gemspec.patch diff --git a/archives/ruby3.2-power_assert/APKBUILD b/archives/ruby3.2-power_assert/APKBUILD new file mode 100644 index 0000000..24d62b2 --- /dev/null +++ b/archives/ruby3.2-power_assert/APKBUILD @@ -0,0 +1,62 @@ +# Contributor: Jakub Jirutka +# Maintainer: Jakub Jirutka +pkgname=ruby3.2-power_assert +_gemname=power_assert +# Keep version in sync with "Bundled gems" (https://stdgems.org) for the +# packaged Ruby version. +pkgver=2.0.3 +pkgrel=0 +pkgdesc="Debug tool for Ruby that displays intermediate results of a method chain" +url="https://github.com/ruby/power_assert" +arch="noarch" +license="BSD-2-Clause AND Ruby" +depends="ruby3.2" +checkdepends="ruby3.2-pry ruby3.2-rake ruby3.2-test-unit" +makedepends="ruby3.2-rdoc" +subpackages="$pkgname-doc" +source="https://github.com/ruby/power_assert/archive/v$pkgver/$_gemname-$pkgver.tar.gz + gemspec.patch + " +builddir="$srcdir/$_gemname-$pkgver" +# Avoid circular dependency with ruby-test-unit. +options="!check" + +prepare() { + default_prepare + sed -i '/require .bundler/d' Rakefile +} + +build() { + gem build $_gemname.gemspec +} + +check() { + rake test +} + +package() { + local gemdir="$pkgdir/$(ruby -e 'puts Gem.default_dir')" + + gem install \ + --local \ + --install-dir "$gemdir" \ + --ignore-dependencies \ + --document ri \ + --verbose \ + $_gemname + + # Remove unnessecary files + cd "$gemdir" + rm -rf build_info cache extensions plugins +} + +doc() { + pkgdesc="$pkgdesc (ri docs)" + + amove "$(ruby -e 'puts Gem.default_dir')"/doc +} + +sha512sums=" +f5658d18b3b78e7757ddfc1ccdabc011076c009a7343eaad2748ca7aeb4d112bf19c70621cb938e7dcf1582c8bb7c5512017885ea51503b3ed274980b7d7c0b1 power_assert-2.0.3.tar.gz +eb4321b8ce33476e21f0cd6da92f1f2be93e0892f5e6043d6d5f5578160f1793993b10645c0b06b3b2df3e8190a10c83e5325c367001e222d98b290222c2edfe gemspec.patch +" diff --git a/archives/ruby3.2-power_assert/gemspec.patch b/archives/ruby3.2-power_assert/gemspec.patch new file mode 100644 index 0000000..ace46ba --- /dev/null +++ b/archives/ruby3.2-power_assert/gemspec.patch @@ -0,0 +1,9 @@ +--- a/power_assert.gemspec ++++ b/power_assert.gemspec +@@ -15,5 +15 @@ +- s.files = `git ls-files -z`.split("\x0").reject do |f| +- f.match(%r{\A(?:test|spec|features|benchmark|bin)/}) +- end +- s.bindir = 'exe' +- s.executables = s.files.grep(%r{^exe/}) { |f| File.basename(f) } ++ s.files = Dir['lib/**/*.rb'] From d696cbb52567789d5c230786afd23b56f4e71ef8 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Fri, 9 Aug 2024 22:28:11 -0400 Subject: [PATCH 11/38] archives/ruby3.2-rake: new aport --- archives/ruby3.2-rake/APKBUILD | 58 ++++++++++++++++++++++++++++++++++ 1 file changed, 58 insertions(+) create mode 100644 archives/ruby3.2-rake/APKBUILD diff --git a/archives/ruby3.2-rake/APKBUILD b/archives/ruby3.2-rake/APKBUILD new file mode 100644 index 0000000..6c34011 --- /dev/null +++ b/archives/ruby3.2-rake/APKBUILD @@ -0,0 +1,58 @@ +# Contributor: Jakub Jirutka +# Maintainer: Jakub Jirutka +pkgname=ruby3.2-rake +_gemname=rake +# Keep version in sync with "Bundled gems" (https://stdgems.org) for the +# packaged Ruby version. +pkgver=13.0.6 +pkgrel=1 +pkgdesc="A Ruby task runner, inspired by make" +url="https://github.com/ruby/rake" +arch="noarch" +license="MIT" +depends="ruby3.2" +checkdepends="ruby3.2-minitest" +makedepends="ruby3.2-rdoc" +subpackages="$pkgname-doc" +source="https://github.com/ruby/rake/archive/v$pkgver/$_gemname-$pkgver.tar.gz" +builddir="$srcdir/$_gemname-$pkgver" + +build() { + gem build $_gemname.gemspec +} + +check() { + # FIXME: Fix test_signal_propagation_in_tests + ruby -Ilib -Itest -e "Dir.glob('./test/**/test_*.rb', &method(:require))" -- \ + --exclude=test_signal_propagation_in_tests +} + +package() { + local gemdir="$pkgdir/$(ruby -e 'puts Gem.default_dir')" + + gem install \ + --local \ + --install-dir "$gemdir" \ + --bindir "$pkgdir/usr/bin" \ + --ignore-dependencies \ + --document ri \ + --verbose \ + $_gemname + + # Remove unnessecary files + cd "$gemdir" + rm -rf build_info cache extensions plugins + + cd gems/rake-* + rm -rf doc ./*.rdoc MIT-LICENSE +} + +doc() { + pkgdesc="$pkgdesc (ri docs)" + + amove "$(ruby -e 'puts Gem.default_dir')"/doc +} + +sha512sums=" +1b438be96d8cedaf70e961b0bbd2217692b0b5537b2e1d5f599158e7da3e300cf6ab0c5f0c52fea89be0beb675effbbf563d24e55c84fb673b4982013355e03c rake-13.0.6.tar.gz +" From b598842f29a1028c36860f0bd24867726d125138 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Fri, 9 Aug 2024 22:28:14 -0400 Subject: [PATCH 12/38] archives/ruby3.2-test-unit: new aport --- archives/ruby3.2-test-unit/APKBUILD | 54 ++++++++++++++++++++++++ archives/ruby3.2-test-unit/gemspec.patch | 8 ++++ 2 files changed, 62 insertions(+) create mode 100644 archives/ruby3.2-test-unit/APKBUILD create mode 100644 archives/ruby3.2-test-unit/gemspec.patch diff --git a/archives/ruby3.2-test-unit/APKBUILD b/archives/ruby3.2-test-unit/APKBUILD new file mode 100644 index 0000000..6e30887 --- /dev/null +++ b/archives/ruby3.2-test-unit/APKBUILD @@ -0,0 +1,54 @@ +# Contributor: Jakub Jirutka +# Maintainer: Jakub Jirutka +pkgname=ruby3.2-test-unit +_gemname=test-unit +# Keep version in sync with "Bundled gems" (https://stdgems.org) for the +# packaged Ruby version. +pkgver=3.5.7 +pkgrel=0 +pkgdesc="An xUnit family unit testing framework for Ruby" +url="https://test-unit.github.io" +arch="noarch" +license="BSD-2-Clause AND Python-2.0 AND Ruby" +depends="ruby3.2 ruby3.2-power_assert" +makedepends="ruby3.2-rdoc" +subpackages="$pkgname-doc" +source="https://github.com/test-unit/test-unit/archive/$pkgver/$_gemname-$pkgver.tar.gz + gemspec.patch + " +builddir="$srcdir/$_gemname-$pkgver" + +build() { + gem build $_gemname.gemspec +} + +check() { + ruby test/run-test.rb +} + +package() { + local gemdir="$pkgdir/$(ruby -e 'puts Gem.default_dir')" + + gem install \ + --local \ + --install-dir "$gemdir" \ + --ignore-dependencies \ + --document ri \ + --verbose \ + $_gemname + + # Remove unnessecary files + cd "$gemdir" + rm -rf build_info cache extensions plugins +} + +doc() { + pkgdesc="$pkgdesc (ri docs)" + + amove "$(ruby -e 'puts Gem.default_dir')"/doc +} + +sha512sums=" +af678a89590c9305eeac3a4e5c7e99354df5b49157de573ee3ff312dad9f12dbcaef3dfe7ffc256194e39e0438625acdd9ab3e9686d7e2c58b2cf225f7f1f74c test-unit-3.5.7.tar.gz +22f54fcf272856a9455d5a7276896ec329377b106ab47e3d376158eee72cf570f4487dd87606d730d061e7b06e5d7a0ff561cd8d279a64d8af0ac04e0f2dba92 gemspec.patch +" diff --git a/archives/ruby3.2-test-unit/gemspec.patch b/archives/ruby3.2-test-unit/gemspec.patch new file mode 100644 index 0000000..f2beca1 --- /dev/null +++ b/archives/ruby3.2-test-unit/gemspec.patch @@ -0,0 +1,8 @@ +--- a/test-unit.gemspec ++++ b/test-unit.gemspec +@@ -27,4 +27 @@ +- spec.files = ["README.md", "Rakefile"] +- spec.files += ["COPYING", "BSDL", "PSFL"] +- spec.files += Dir.glob("{lib,sample}/**/*.rb") +- spec.files += Dir.glob("doc/text/**/*.*") ++ spec.files += Dir.glob("lib/**/*.rb") From 58be187bd157e478a712fcc60954924345406a4b Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Fri, 9 Aug 2024 22:28:17 -0400 Subject: [PATCH 13/38] archives/ruby3.2-webrick: new aport --- archives/ruby3.2-webrick/APKBUILD | 58 ++++++++++++++++++++++++++ archives/ruby3.2-webrick/gemspec.patch | 13 ++++++ 2 files changed, 71 insertions(+) create mode 100644 archives/ruby3.2-webrick/APKBUILD create mode 100644 archives/ruby3.2-webrick/gemspec.patch diff --git a/archives/ruby3.2-webrick/APKBUILD b/archives/ruby3.2-webrick/APKBUILD new file mode 100644 index 0000000..a0c0b1e --- /dev/null +++ b/archives/ruby3.2-webrick/APKBUILD @@ -0,0 +1,58 @@ +# Contributor: omni +# Maintainer: Jakub Jirutka +pkgname=ruby3.2-webrick +_gemname=webrick +pkgver=1.8.1 +pkgrel=0 +pkgdesc="HTTP server toolkit for Ruby" +url="https://github.com/ruby/webrick" +arch="noarch" +license="BSD-2-Clause" +depends="ruby3.2" +checkdepends="ruby3.2-rake ruby3.2-test-unit" +makedepends="ruby3.2-rdoc" +subpackages="$pkgname-doc" +source="https://github.com/ruby/webrick/archive/v$pkgver/ruby-webrick-$pkgver.tar.gz + gemspec.patch + " +builddir="$srcdir/$_gemname-$pkgver" + +prepare() { + default_prepare + sed -i '/require .bundler/d' Rakefile +} + +build() { + gem build $_gemname.gemspec +} + +check() { + rake test +} + +package() { + local gemdir="$pkgdir/$(ruby -e 'puts Gem.default_dir')" + + gem install \ + --local \ + --install-dir "$gemdir" \ + --ignore-dependencies \ + --document ri \ + --verbose \ + $_gemname + + # Remove unnessecary files + cd "$gemdir" + rm -rf build_info cache extensions plugins +} + +doc() { + pkgdesc="$pkgdesc (ri docs)" + + amove "$(ruby -e 'puts Gem.default_dir')"/doc +} + +sha512sums=" +21cb396887025f85cfe04868e7fa7ef039809ca42a3acadfe1decb4dcd02eeeb3c9163e970324b56a9e0eb6202d971370af56e200c69de2d224c1941f866400c ruby-webrick-1.8.1.tar.gz +5c657602228ba5aef4c272b75bc5d7c42855876811a49a7736bfa72b00d65a2bb550ea76ffcc2bc1e2ef9575796f5981eadd97cc92b1f3bf06c0105b8d166222 gemspec.patch +" diff --git a/archives/ruby3.2-webrick/gemspec.patch b/archives/ruby3.2-webrick/gemspec.patch new file mode 100644 index 0000000..db18f02 --- /dev/null +++ b/archives/ruby3.2-webrick/gemspec.patch @@ -0,0 +1,13 @@ +--- a/webrick.gemspec ++++ b/webrick.gemspec +@@ -14,10 +14,6 @@ + + s.require_path = %w{lib} + s.files = [ +- "Gemfile", +- "LICENSE.txt", +- "README.md", +- "Rakefile", + "lib/webrick.rb", + "lib/webrick/accesslog.rb", + "lib/webrick/cgi.rb", From 84fc5eb42739e10ba1f2e3d0aba2eece8962ac63 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Fri, 9 Aug 2024 22:28:19 -0400 Subject: [PATCH 14/38] ilot/authentik: new aport --- ilot/authentik/APKBUILD | 258 ++++++++++++++++++ ilot/authentik/authentik-ldap.conf | 3 + ilot/authentik/authentik-ldap.openrc | 24 ++ ilot/authentik/authentik-manage.sh | 11 + ilot/authentik/authentik-worker.openrc | 32 +++ ilot/authentik/authentik.openrc | 30 ++ ilot/authentik/authentik.post-install | 39 +++ ilot/authentik/authentik.post-upgrade | 1 + ilot/authentik/authentik.pre-install | 26 ++ ilot/authentik/fix-ak-bash.patch | 10 + .../root-settings-csrf_trusted_origins.patch | 12 + 11 files changed, 446 insertions(+) create mode 100644 ilot/authentik/APKBUILD create mode 100644 ilot/authentik/authentik-ldap.conf create mode 100644 ilot/authentik/authentik-ldap.openrc create mode 100644 ilot/authentik/authentik-manage.sh create mode 100644 ilot/authentik/authentik-worker.openrc create mode 100644 ilot/authentik/authentik.openrc create mode 100755 ilot/authentik/authentik.post-install create mode 120000 ilot/authentik/authentik.post-upgrade create mode 100644 ilot/authentik/authentik.pre-install create mode 100644 ilot/authentik/fix-ak-bash.patch create mode 100644 ilot/authentik/root-settings-csrf_trusted_origins.patch diff --git a/ilot/authentik/APKBUILD b/ilot/authentik/APKBUILD new file mode 100644 index 0000000..d10a575 --- /dev/null +++ b/ilot/authentik/APKBUILD @@ -0,0 +1,258 @@ +# Contributor: Antoine Martin (ayakael) +# Maintainer: Antoine Martin (ayakael) +pkgname=authentik +pkgver=2024.4.3 +pkgrel=1 +pkgdesc="An open-source Identity Provider focused on flexibility and versatility" +url="https://github.com/goauthentik/authentik" +# s390x: missing py3-celery py3-flower and py3-kombu +# armhf/armv7/x86: out of memory error when building goauthentik +# ppc64le: not supported by Rollup build +arch="aarch64 x86_64" +license="MIT" +depends=" + libcap-setcap + nginx + postgresql + procps + pwgen + py3-aiohttp + py3-aiosignal + py3-amqp + py3-anyio + py3-asgiref + py3-asn1 + py3-asn1crypto + py3-async-timeout + py3-attrs + py3-autobahn + py3-automat + py3-bcrypt + py3-billiard + py3-cachetools + py3-cbor2 + py3-celery + py3-certifi + py3-cffi + py3-channels + py3-channels_redis + py3-charset-normalizer + py3-click + py3-click-didyoumean + py3-click-plugins + py3-click-repl + py3-codespell + py3-colorama + py3-constantly + py3-cparser + py3-cryptography + py3-dacite + py3-daphne + py3-dateutil + py3-deepmerge + py3-defusedxml + py3-deprecated + py3-dnspython + py3-django + py3-django-filter + py3-django-guardian + py3-django-model-utils + py3-django-otp + py3-django-prometheus + py3-django-redis + py3-django-rest-framework~=3.14.0 + py3-django-rest-framework-guardian + py3-django-storages + py3-django-tenants + py3-docker-py + py3-dotenv + py3-dumb-init + py3-duo_client + py3-drf-spectacular + py3-email-validator + py3-facebook-sdk + py3-fido2 + py3-flower + py3-frozenlist + py3-geoip2 + py3-google-auth + py3-gunicorn + py3-h11 + py3-httptools + py3-humanize + py3-hyperlink + py3-idna + py3-incremental + py3-inflection + py3-jsonschema + py3-jsonpatch + py3-jwt + py3-kombu + py3-kubernetes + py3-ldap3 + py3-lxml + py3-maxminddb + py3-msgpack + py3-multidict + py3-oauthlib + py3-opencontainers + py3-openssl + py3-packaging + py3-paramiko + py3-parsing + py3-prometheus-client + py3-prompt_toolkit + py3-psycopg + py3-psycopg-c + py3-pydantic-scim + py3-pynacl + py3-pyrsistent + py3-python-jwt + py3-redis + py3-requests + py3-requests-oauthlib + py3-rsa + py3-scim2-filter-parser + py3-setproctitle + py3-sentry-sdk + py3-service_identity + py3-setuptools + py3-six + py3-sniffio + py3-sqlparse + py3-structlog + py3-swagger-spec-validator + py3-tornado + py3-twilio + py3-twisted + py3-txaio + py3-tenant-schemas-celery + py3-typing-extensions + py3-tz + py3-ua-parser + py3-uritemplate + py3-urllib3-secure-extra + py3-uvloop + py3-vine + py3-watchdog + py3-watchfiles + py3-wcwidth + py3-webauthn + py3-websocket-client + py3-websockets + py3-wrapt + py3-wsproto + py3-xmlsec + py3-yaml + py3-yarl + py3-zope-interface + py3-zxcvbn + redis + uvicorn + " +makedepends="go npm" +# checkdepends scooped up by poetry due to number +checkdepends="poetry py3-coverage" +# tests disabled for now +options="!check" +install="$pkgname.post-install $pkgname.post-upgrade $pkgname.pre-install" +source=" + $pkgname-$pkgver.tar.gz::https://github.com/goauthentik/authentik/archive/refs/tags/version/$pkgver.tar.gz + authentik.openrc + authentik-worker.openrc + authentik-ldap.openrc + authentik-ldap.conf + authentik-manage.sh + fix-ak-bash.patch + root-settings-csrf_trusted_origins.patch + " +builddir="$srcdir/"authentik-version-$pkgver +subpackages="$pkgname-openrc $pkgname-doc" +pkgusers="authentik" +pkggroups="authentik" + +export GOPATH=$srcdir/go +export GOCACHE=$srcdir/go-build +export GOTMPDIR=$srcdir + +build() { + msg "Building authentik-ldap" + go build -o ldap cmd/ldap/main.go + msg "Building authentik-proxy" + go build -o proxy cmd/proxy/main.go + msg "Building authentik-radius" + go build -o radius cmd/proxy/main.go + msg "Building authentik-server" + go build -o server cmd/server/*.go + + msg "Building authentik-web" + cd web + npm ci --no-audit + npm run build + cd .. + + msg "Building website" + cd website + npm ci --no-audit + npm run build +} + +package() { + msg "Packaging $pkgname" + mkdir -p "$pkgdir"/usr/share/webapps/authentik/web + mkdir -p "$pkgdir"/usr/share/webapps/authentik/website + mkdir -p "$pkgdir"/var/lib/authentik + mkdir -p "$pkgdir"/usr/share/doc + mkdir -p "$pkgdir"/usr/bin + cp -dr "$builddir"/authentik "$pkgdir"/usr/share/webapps/authentik + cp -dr "$builddir"/web/dist "$pkgdir"/usr/share/webapps/authentik/web/dist + cp -dr "$builddir"/web/authentik "$pkgdir"/usr/share/webapps/authentik/web/authentik + cp -dr "$builddir"/website/build "$pkgdir"/usr/share/doc/authentik + cp -dr "$builddir"/tests "$pkgdir"/usr/share/webapps/authentik/tests + cp -dr "$builddir"/lifecycle "$pkgdir"/usr/share/webapps/authentik/lifecycle + cp -dr "$builddir"/locale "$pkgdir"/usr/share/webapps/authentik/locale + cp -dr "$builddir"/blueprints "$pkgdir"/var/lib/authentik/blueprints + install -Dm755 "$builddir"/manage.py "$pkgdir"/usr/share/webapps/authentik/manage.py + install -Dm755 "$builddir"/server "$pkgdir"/usr/share/webapps/authentik/server + ln -s "/etc/authentik/config.yml" "$pkgdir"/usr/share/webapps/authentik/local.env.yml + + install -Dm755 "$builddir"/proxy "$pkgdir"/usr/bin/authentik-proxy + install -Dm755 "$builddir"/ldap "$pkgdir"/usr/bin/authentik-ldap + install -Dm755 "$builddir"/radius "$pkgdir"/usr/bin/authentik-radius + + install -Dm755 "$srcdir"/$pkgname.openrc \ + "$pkgdir"/etc/init.d/$pkgname + install -Dm755 "$srcdir"/$pkgname-worker.openrc \ + "$pkgdir"/etc/init.d/$pkgname-worker + install -Dm755 "$srcdir"/$pkgname-ldap.openrc \ + "$pkgdir"/etc/init.d/$pkgname-ldap + install -Dm640 "$srcdir"/$pkgname-ldap.conf \ + "$pkgdir"/etc/conf.d/$pkgname-ldap + install -Dm640 "$builddir"/authentik/lib/default.yml \ + "$pkgdir"/etc/authentik/config.yml + chown root:www-data "$pkgdir"/etc/authentik/config.yml + + mv "$pkgdir"/usr/share/webapps/authentik/web/dist/custom.css "$pkgdir"/etc/authentik/custom.css + ln -s "/etc/authentik/custom.css" "$pkgdir"/usr/share/webapps/authentik/web/dist/custom.css + chown root:www-data "$pkgdir"/etc/authentik/custom.css + + sed -i 's|cert_discovery_dir.*|cert_discovery_dir: /var/lib/authentik/certs|' "$pkgdir"/etc/authentik/config.yml + sed -i 's|blueprints_dir.*|blueprints_dir: /var/lib/authentik/blueprints|' "$pkgdir"/etc/authentik/config.yml + sed -i 's|template_dir.*|template_dir: /var/lib/authentik/templates|' "$pkgdir"/etc/authentik/config.yml + printf "\ncsrf:\n trusted_origins: ['auth.example.com']" >> "$pkgdir"/etc/authentik/config.yml + printf "\nsecret_key: '@@SECRET_KEY@@'" >> "$pkgdir"/etc/authentik/config.yml + + # Install wrapper script to /usr/bin. + install -m755 -D "$srcdir"/authentik-manage.sh "$pkgdir"/usr/bin/authentik-manage +} + +sha512sums=" +121ed925d81a5cb2a14fed8ec8b324352e40b1fcbba83573bfdc1d1f66a91d9670cd64d7ef752c8a2df6c34fc3e19e8aec5c6752d33e87b487a462a590212ab0 authentik-2024.4.3.tar.gz +4defb4fe3a4230f4aa517fbecd5e5b8bcef2a64e1b40615660ae9eec33597310a09df5e126f4d39ce7764bd1716c0a7040637699135c103cbc1879593c6c06f1 authentik.openrc +6cb03b9b69df39bb4539fe05c966536314d766b2e9307a92d87070ba5f5b7e7ab70f1b5ee1ab3c0c50c23454f9c5a4caec29e63fdf411bbb7a124ad687569b89 authentik-worker.openrc +351e6920d987861f8bf0d7ab2f942db716a8dbdad1f690ac662a6ef29ac0fd46cf817cf557de08f1c024703503d36bc8b46f0d9eb1ecaeb399dce4c3bb527d17 authentik-ldap.openrc +89ee5f0ffdade1c153f3a56ff75b25a7104aa81d8c7a97802a8f4b0eab34850cee39f874dabe0f3c6da3f71d6a0f938f5e8904169e8cdd34d407c8984adee6b0 authentik-ldap.conf +f1a3cb215b6210fa7d857a452a9f2bc4dc0520e49b9fa7027547cff093d740a7e2548f1bf1f8831f7d5ccb80c8e523ee0c8bafcc4dc42d2788725f2137d21bee authentik-manage.sh +3e47db684a3f353dcecdb7bab8836b9d5198766735d77f676a51d952141a0cf9903fcb92e6306c48d2522d7a1f3028b37247fdc1dc74d4d6e043da7eb4f36d49 fix-ak-bash.patch +5c60e54b6a7829d611af66f5cb8184a002b5ae927efbd024c054a7c176fcb9efcfbe5685279ffcf0390b0f0abb3bb03e02782c6867c2b38d1ad2d508aae83fa0 root-settings-csrf_trusted_origins.patch +" diff --git a/ilot/authentik/authentik-ldap.conf b/ilot/authentik/authentik-ldap.conf new file mode 100644 index 0000000..c31e819 --- /dev/null +++ b/ilot/authentik/authentik-ldap.conf @@ -0,0 +1,3 @@ +AUTHENTIK_HOST=https://example.com +AUTHENTIK_TOKEN=your-authentik-token +AUTHENTIK_INSECURE=true diff --git a/ilot/authentik/authentik-ldap.openrc b/ilot/authentik/authentik-ldap.openrc new file mode 100644 index 0000000..fc033be --- /dev/null +++ b/ilot/authentik/authentik-ldap.openrc @@ -0,0 +1,24 @@ +#!/sbin/openrc-run + +name="$RC_SVCNAME" +cfgfile="/etc/conf.d/$RC_SVCNAME" +pidfile="/run/$RC_SVCNAME.pid" +working_directory="/usr/share/webapps/authentik" +command="/usr/bin/authentik-ldap" +command_user="authentik" +command_group="authentik" +start_stop_daemon_args="" +command_background="yes" +output_log="/var/log/authentik/$RC_SVCNAME.log" +error_log="/var/log/authentik/$RC_SVCNAME.err" + +depend() { + need authentik +} + +start_pre() { + cd "$working_directory" + checkpath --directory --owner $command_user:$command_group --mode 0775 \ + /var/log/authentik + export AUTHENTIK_HOST AUTHENTIK_TOKEN AUTHENTIK_INSECURE AUTHENTIK_DEBUG +} diff --git a/ilot/authentik/authentik-manage.sh b/ilot/authentik/authentik-manage.sh new file mode 100644 index 0000000..ef7357d --- /dev/null +++ b/ilot/authentik/authentik-manage.sh @@ -0,0 +1,11 @@ +#!/bin/sh + +BUNDLE_DIR='/usr/share/webapps/authentik' + +cd $BUNDLE_DIR + +if [ "$(id -un)" != 'authentik' ]; then + exec su authentik -c '"$0" "$@"' -- ./manage.py "$@" +else + exec ./manage.py "$@" +fi diff --git a/ilot/authentik/authentik-worker.openrc b/ilot/authentik/authentik-worker.openrc new file mode 100644 index 0000000..f0fa964 --- /dev/null +++ b/ilot/authentik/authentik-worker.openrc @@ -0,0 +1,32 @@ +#!/sbin/openrc-run + +name="$RC_SVCNAME" +cfgfile="/etc/conf.d/$RC_SVCNAME.conf" +pidfile="/run/$RC_SVCNAME.pid" +working_directory="/usr/share/webapps/authentik" +command="/usr/bin/authentik-manage" +command_args="worker" +command_user="authentik" +command_group="authentik" +start_stop_daemon_args="" +command_background="yes" +output_log="/var/log/authentik/$RC_SVCNAME.log" +error_log="/var/log/authentik/$RC_SVCNAME.err" + +depend() { + need redis + need postgresql +} + +start_pre() { + cd "$working_directory" + checkpath --directory --owner $command_user:$command_group --mode 0775 \ + /var/log/authentik \ + /var/lib/authentik/certs \ + /var/lib/authentik/blueprints +} + +stop_pre() { + ebegin "Killing child processes" + kill $(ps -o pid= --ppid $(cat $pidfile)) || true +} diff --git a/ilot/authentik/authentik.openrc b/ilot/authentik/authentik.openrc new file mode 100644 index 0000000..a036393 --- /dev/null +++ b/ilot/authentik/authentik.openrc @@ -0,0 +1,30 @@ +#!/sbin/openrc-run + +name="$RC_SVCNAME" +cfgfile="/etc/conf.d/$RC_SVCNAME.conf" +pidfile="/run/$RC_SVCNAME.pid" +working_directory="/usr/share/webapps/authentik" +command="/usr/share/webapps/authentik/server" +command_user="authentik" +command_group="authentik" +start_stop_daemon_args="" +command_background="yes" +output_log="/var/log/authentik/$RC_SVCNAME.log" +error_log="/var/log/authentik/$RC_SVCNAME.err" + +depend() { + need redis + need postgresql +} + +start_pre() { + cd "$working_directory" + checkpath --directory --owner $command_user:$command_group --mode 0775 \ + /var/log/authentik \ + /var/lib/authentik/certs +} + +stop_pre() { + ebegin "Killing child processes" + kill $(ps -o pid= --ppid $(cat $pidfile)) || true +} diff --git a/ilot/authentik/authentik.post-install b/ilot/authentik/authentik.post-install new file mode 100755 index 0000000..a715d20 --- /dev/null +++ b/ilot/authentik/authentik.post-install @@ -0,0 +1,39 @@ +#!/bin/sh +set -eu + +group=authentik +config_file='/etc/authentik/config.yml' + +setcap 'cap_net_bind_service=+ep' /usr/share/webapps/authentik/server + +if [ $(grep '@@SECRET_KEY@@' "$config_file") ]; then + echo "* Generating random secret in $config_file" >&2 + + secret_key="$(pwgen -s 50 1)" + sed -i "s|@@SECRET_KEY@@|$secret_key|" "$config_file" + chown root:$group "$config_file" +fi + +if [ "${0##*.}" = 'post-upgrade' ]; then + cat >&2 <<-EOF + * + * To finish Authentik upgrade run: + * + * authentik-manage migrate + * + EOF +else + cat >&2 <<-EOF + * + * 1. Adjust settings in /etc/authentik/config.yml. + * + * 2. Create database for Authentik: + * + * psql -c "CREATE ROLE authentik PASSWORD 'top-secret' INHERIT LOGIN;" + * psql -c "CREATE DATABASE authentik OWNER authentik ENCODING 'UTF-8';" + * + * 3. Run "authentik-manage migrate" + * 4. Setup admin user at https:///if/flow/initial-setup/ + * + EOF +fi diff --git a/ilot/authentik/authentik.post-upgrade b/ilot/authentik/authentik.post-upgrade new file mode 120000 index 0000000..d310dd8 --- /dev/null +++ b/ilot/authentik/authentik.post-upgrade @@ -0,0 +1 @@ +authentik.post-install \ No newline at end of file diff --git a/ilot/authentik/authentik.pre-install b/ilot/authentik/authentik.pre-install new file mode 100644 index 0000000..792f304 --- /dev/null +++ b/ilot/authentik/authentik.pre-install @@ -0,0 +1,26 @@ +#!/bin/sh +# It's very important to set user/group correctly. + +authentik_dir='/var/lib/authentik' + +if ! getent group authentik 1>/dev/null; then + echo '* Creating group authentik' 1>&2 + + addgroup -S authentik +fi + +if ! id authentik 2>/dev/null 1>&2; then + echo '* Creating user authentik' 1>&2 + + adduser -DHS -G authentik -h "$authentik_dir" -s /bin/sh \ + -g "added by apk for authentik" authentik + passwd -u authentik 1>/dev/null # unlock +fi + +if ! id -Gn authentik | grep -Fq redis; then + echo '* Adding user authentik to group redis' 1>&2 + + addgroup authentik redis +fi + +exit 0 diff --git a/ilot/authentik/fix-ak-bash.patch b/ilot/authentik/fix-ak-bash.patch new file mode 100644 index 0000000..c6afafb --- /dev/null +++ b/ilot/authentik/fix-ak-bash.patch @@ -0,0 +1,10 @@ +diff --git a/lifecycle/ak.orig b/lifecycle/ak +index 615bfe9..1646274 100755 +--- a/lifecycle/ak.orig ++++ b/lifecycle/ak +@@ -1,4 +1,4 @@ +-#!/usr/bin/env -S bash -e ++#!/usr/bin/env bash + MODE_FILE="${TMPDIR}/authentik-mode" + + function log { diff --git a/ilot/authentik/root-settings-csrf_trusted_origins.patch b/ilot/authentik/root-settings-csrf_trusted_origins.patch new file mode 100644 index 0000000..4c235f9 --- /dev/null +++ b/ilot/authentik/root-settings-csrf_trusted_origins.patch @@ -0,0 +1,12 @@ +diff --git a/authentik/root/settings.py b/authentik/root/settings.py +index 15e689b06..8b0c1d744 100644 +--- a/authentik/root/settings.py ++++ b/authentik/root/settings.py +@@ -33,6 +33,7 @@ AUTH_USER_MODEL = "authentik_core.User" + + CSRF_COOKIE_NAME = "authentik_csrf" + CSRF_HEADER_NAME = "HTTP_X_AUTHENTIK_CSRF" ++CSRF_TRUSTED_ORIGINS = CONFIG.get("csrf.trusted_origins") + LANGUAGE_COOKIE_NAME = "authentik_language" + SESSION_COOKIE_NAME = "authentik_session" + SESSION_COOKIE_DOMAIN = CONFIG.get("cookie_domain", None) From 7c7a4486cd1dc93f63e7fc7b8387cecd9526ba1b Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Fri, 9 Aug 2024 22:28:22 -0400 Subject: [PATCH 15/38] ilot/freescout: new aport --- ilot/freescout/APKBUILD | 82 +++++++ ilot/freescout/freescout-manage.sh | 11 + ilot/freescout/freescout.nginx | 56 +++++ ilot/freescout/freescout.post-install | 48 ++++ ilot/freescout/freescout.post-upgrade | 1 + ilot/freescout/freescout.pre-install | 25 ++ .../rename-client-to-membre-fr-en.patch | 220 ++++++++++++++++++ 7 files changed, 443 insertions(+) create mode 100644 ilot/freescout/APKBUILD create mode 100644 ilot/freescout/freescout-manage.sh create mode 100644 ilot/freescout/freescout.nginx create mode 100755 ilot/freescout/freescout.post-install create mode 120000 ilot/freescout/freescout.post-upgrade create mode 100755 ilot/freescout/freescout.pre-install create mode 100644 ilot/freescout/rename-client-to-membre-fr-en.patch diff --git a/ilot/freescout/APKBUILD b/ilot/freescout/APKBUILD new file mode 100644 index 0000000..1fd520c --- /dev/null +++ b/ilot/freescout/APKBUILD @@ -0,0 +1,82 @@ +# Maintainer: Antoine Martin (ayakael) +# Contributor: Antoine Martin (ayakael) +pkgname=freescout +pkgver=1.8.139 +pkgrel=0 +pkgdesc="Free self-hosted help desk & shared mailbox" +arch="noarch" +url="freescout.net" +license="AGPL-3.0" +_php=php83 +_php_mods="-fpm -mbstring -xml -imap -zip -gd -curl -intl -tokenizer -pdo_pgsql -openssl -session -iconv -fileinfo -dom -pcntl" +depends="$_php ${_php_mods//-/$_php-} nginx postgresql pwgen" +makedepends="composer pcre" +install="$pkgname.post-install $pkgname.post-upgrade $pkgname.pre-install" +source=" + $pkgname-$pkgver.tar.gz::https://github.com/freescout-helpdesk/freescout/archive/refs/tags/$pkgver.tar.gz + freescout.nginx + freescout-manage.sh + rename-client-to-membre-fr-en.patch + " +pkgusers="freescout" +pkggroups="freescout" + +build() { + composer install --ignore-platform-reqs +} + +package() { + local logdir="/var/log/$pkgname" + local datadir="/var/lib/$pkgname" + local wwwdir="/usr/share/webapps/$pkgname" + local confdir="/etc/$pkgname" + + # Make directories + install -dm 755 \ + "$pkgdir"/$wwwdir \ + "$pkgdir"/$confdir \ + "$pkgdir"/$logdir \ + "$pkgdir"/$datadir + + # Copy and ln operations + cp $builddir/* -R "$pkgdir"/$wwwdir/. + for i in storage/app storage/framework bootstrap/cache \ + public/css/builds public/js/builds public/modules Modules; do + + if [ -d "$pkgdir"$wwwdir/$i ]; then + if [ ! -d "$pkgdir"/$datadir/${i%/*} ]; then + mkdir -p "$pkgdir"/$datadir/${i%/*} + fi + mv "$pkgdir"$wwwdir/$i "$pkgdir"/$datadir/$i + else + mkdir -p "$pkgdir"/$datadir/$i + fi + ln -s $datadir/$i "$pkgdir"/$wwwdir/$i + done + ln -s /etc/freescout/freescout.conf "$pkgdir"/usr/share/webapps/freescout/.env + ln -s $wwwdir/storage/app/public "$pkgdir"/$wwwdir/public/storage + + # log dir + rm -R "$pkgdir"/$wwwdir/storage/logs + ln -s "$logdir" "$pkgdir"/$wwwdir/storage/logs + + # Permission settings + chown -R freescout:www-data "$pkgdir"/$datadir "$pkgdir"/$logdir + + # config files + install -Dm644 "$srcdir"/freescout.nginx \ + "$pkgdir"/etc/nginx/http.d/freescout.conf + install -Dm640 "$builddir"/.env.example \ + "$pkgdir"/etc/freescout/freescout.conf + sed -i 's|APP_KEY.*|APP_KEY=@@SECRET_KEY@@|' "$pkgdir"/etc/freescout/freescout.conf + chown root:www-data "$pkgdir"/etc/freescout/freescout.conf + + # Install wrapper script to /usr/bin. + install -m755 -D "$srcdir"/freescout-manage.sh "$pkgdir"/usr/bin/freescout-manage +} +sha512sums=" +11d81fa670bd67a7db9f5bff3a067a1d1cf3c812a34c805a3fc83edc978ded3accc8334581eca1e73cf0ad95f8e289278add57de096528728e2989135b3057a3 freescout-1.8.139.tar.gz +e4af6c85dc12f694bef2a02e4664e31ed50b2c109914d7ffad5001c2bbd764ef25b17ecaa59ff55ef41bccf17169bf910d1a08888364bdedd0ecc54d310e661f freescout.nginx +7ce9b3ee3a979db44f5e6d7daa69431e04a5281f364ae7be23e5a0a0547f96abc858d2a8010346be2fb99bd2355fb529e7030ed20d54f310249e61ed5db4d0ba freescout-manage.sh +3416da98d71aea5a7093913ea34e783e21ff05dca90bdc5ff3d00c548db5889f6d0ec98441cd65ab9f590be5cd59fdd0d7f1c98b5deef7bb3adbc8db435ec9bf rename-client-to-membre-fr-en.patch +" diff --git a/ilot/freescout/freescout-manage.sh b/ilot/freescout/freescout-manage.sh new file mode 100644 index 0000000..9367807 --- /dev/null +++ b/ilot/freescout/freescout-manage.sh @@ -0,0 +1,11 @@ +#!/bin/sh + +BUNDLE_DIR='/usr/share/webapps/freescout' + +cd $BUNDLE_DIR + +if [ "$(id -un)" != 'freescout' ]; then + exec su freescout -c '"$0" "$@"' -- php artisan "$@" +else + exec php artisan "$@" +fi diff --git a/ilot/freescout/freescout.nginx b/ilot/freescout/freescout.nginx new file mode 100644 index 0000000..15f2161 --- /dev/null +++ b/ilot/freescout/freescout.nginx @@ -0,0 +1,56 @@ +server { + listen 80; + listen [::]:80; + + server_name example.com www.example.com; + + root /usr/share/webapps/freescout/public; + + index index.php index.html index.htm; + + error_log /var/www/html/storage/logs/web-server.log; + + # Max. attachment size. + # It must be also set in PHP.ini via "upload_max_filesize" and "post_max_size" directives. + client_max_body_size 20M; + + location / { + try_files $uri $uri/ /index.php?$query_string; + } + location ~ \.php$ { + fastcgi_split_path_info ^(.+\.php)(/.+)$; + fastcgi_pass unix:/run/php/php8.0-fpm.sock; + fastcgi_index index.php; + fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name; + include fastcgi_params; + } + # Uncomment this location if you want to improve attachments downloading speed. + # Also make sure to set APP_DOWNLOAD_ATTACHMENTS_VIA=nginx in the .env file. + #location ^~ /storage/app/attachment/ { + # internal; + # alias /var/www/html/storage/app/attachment/; + #} + location ~* ^/storage/attachment/ { + expires 1M; + access_log off; + try_files $uri $uri/ /index.php?$query_string; + } + location ~* ^/(?:css|js)/.*\.(?:css|js)$ { + expires 2d; + access_log off; + add_header Cache-Control "public, must-revalidate"; + } + # The list should be in sync with /storage/app/public/uploads/.htaccess and /config/app.php + location ~* ^/storage/.*\.((?!(jpg|jpeg|jfif|pjpeg|pjp|apng|bmp|gif|ico|cur|png|tif|tiff|webp|pdf|txt|diff|patch|json|mp3|wav|ogg|wma)).)*$ { + add_header Content-disposition "attachment; filename=$2"; + default_type application/octet-stream; + } + location ~* ^/(?:css|fonts|img|installer|js|modules|[^\\\]+\..*)$ { + expires 1M; + access_log off; + add_header Cache-Control "public"; + } + location ~ /\. { + deny all; + } +} diff --git a/ilot/freescout/freescout.post-install b/ilot/freescout/freescout.post-install new file mode 100755 index 0000000..467962b --- /dev/null +++ b/ilot/freescout/freescout.post-install @@ -0,0 +1,48 @@ +#!/bin/sh +set -eu + +group=www-data +config_file='/etc/freescout/freescout.conf' + +if [ $(grep '@@SECRET_KEY@@' "$config_file") ]; then + echo "* Generating random secret in $config_file" >&2 + + secret_key="$(freescout-manage key:generate --show)" + sed -i "s|@@SECRET_KEY@@|$secret_key|" "$config_file" +fi + +if [ "${0##*.}" = 'post-upgrade' ]; then + cat >&2 <<-EOF + * + * To finish Freescout upgrade run: + * + * freescout-manage freescout:after-app-update + * + EOF +else + cat >&2 <<-EOF + * + * 1. Adjust settings in /etc/freescout/freescout.conf + * + * 2. Make sure cgi.fix_pathinfo=0 is set in /etc/php8x/php.ini is set + * + * 3. Create database for Freescout: + * + * psql -c "CREATE ROLE freescout PASSWORD 'top-secret' INHERIT LOGIN;" + * psql -c "CREATE DATABASE freescout OWNER freescout ENCODING 'UTF-8';" + * + * 4. Clear application cache and apply .env file changes: + * + * freescout-manage freescout:clear-cache + * + * 5. Create tables: + * + * freescout-manage migrate + * + * 6. Create admin user + * + * freescout-manage freescout:create-user + * + EOF +fi + diff --git a/ilot/freescout/freescout.post-upgrade b/ilot/freescout/freescout.post-upgrade new file mode 120000 index 0000000..d53f932 --- /dev/null +++ b/ilot/freescout/freescout.post-upgrade @@ -0,0 +1 @@ +freescout.post-install \ No newline at end of file diff --git a/ilot/freescout/freescout.pre-install b/ilot/freescout/freescout.pre-install new file mode 100755 index 0000000..6332408 --- /dev/null +++ b/ilot/freescout/freescout.pre-install @@ -0,0 +1,25 @@ +#!/bin/sh + +freescout_dir='/var/lib/freescout' + +if ! getent group freescout 1>/dev/null; then + echo '* Creating group freescout' 1>&2 + + addgroup -S freescout +fi + +if ! id freescout 2>/dev/null 1>&2; then + echo '* Creating user freescout' 1>&2 + + adduser -DHS -G freescout -h "$freescout_dir" -s /bin/sh \ + -g "added by apk for freescout" freescout + passwd -u freescout 1>/dev/null # unlock +fi + +if ! id -Gn freescout | grep -Fq www-data; then + echo '* Adding user freescout to group www-data' 1>&2 + + addgroup freescout www-data +fi + +exit 0 diff --git a/ilot/freescout/rename-client-to-membre-fr-en.patch b/ilot/freescout/rename-client-to-membre-fr-en.patch new file mode 100644 index 0000000..097e503 --- /dev/null +++ b/ilot/freescout/rename-client-to-membre-fr-en.patch @@ -0,0 +1,220 @@ +diff --git a/resources/lang/en.json b/resources/lang/en.json +new file mode 100644 +index 00000000..82d26052 +--- /dev/null ++++ b/resources/lang/en.json +@@ -0,0 +1,32 @@ ++{ ++ ":person changed the customer to :customer": ":person changed the member to :customer", ++ ":person changed the customer to :customer in conversation #:conversation_number": ":person changed the member to :customer in conversation #:conversation_number", ++ "Auto reply to customer": "Auto reply to member", ++ "Change Customer": "Change Member", ++ "Change the customer to :customer_email?": "Change the member to :customer_email?", ++ "Create a new customer": "Create a new member", ++ "Customer": "Member", ++ "Customer Name": "Member Name", ++ "Customer Profile": "Member Profile", ++ "Customer changed": "Member changed", ++ "Customer saved successfully.": "Member saved successfully", ++ "Customer viewed :when": "Member viewed :when", ++ "Customers": "Members", ++ "Customers email this address for help (e.g. support@domain.com)": "Members email this address for help (e.g. support@domain.com)", ++ "Email :tag_email_begin:email:tag_email_end has been moved from another customer: :a_begin:customer:a_end.": "Email :tag_email_begin:email:tag_email_end has been moved from another member: :a_begin:customer:a_end.", ++ "Email to customer": "Email to member", ++ "Emails to Customers": "Emails to Members", ++ "Error sending email to customer": "Error sending email to member", ++ "Message not sent to customer": "Message not sent to member", ++ "Name that will appear in the From<\/strong> field when a customer views your email.": "Name that will appear in the From<\/strong> field when a member views your email.", ++ "No customers found": "No members found", ++ "No customers found. Would you like to create one?": "No members found. Would you like to create one?", ++ "Notify :person when a customer replies…": "Notify :person when a member replies…", ++ "Notify me when a customer replies…": "Notify me when a member replies…", ++ "Search for a customer by name or email": "Search for a member by name or email", ++ "Sending emails need to be configured for the mailbox in order to send emails to customers and support agents": "Sending emails need to be configured for the mailbox in order to send emails to members and support agents", ++ "This number is not visible to customers. It is only used to track conversations within :app_name": "This number is not visible to members. It is only used to track conversations within :app_name", ++ "This reply will go to the customer. :%switch_start%Switch to a note:switch_end if you are replying to :user_name.": "This reply will go to the member. :%switch_start%Switch to a note:switch_end if you are replying to :user_name.", ++ "This text will be added to the beginning of each email reply sent to a customer.": "This text will be added to the beginning of each email reply sent to a member.", ++ "When a customer emails this mailbox, application can send an auto reply to the customer immediately.Only one auto reply is sent per new conversation.": "When a member emails this mailbox, application can send an auto reply to the member immediately.Only one auto reply is sent per new conversation." ++} +\ No newline at end of file +diff --git a/resources/lang/fr.json.orig b/resources/lang/fr.json +index ff8d9d4..98d158f 100644 +--- a/resources/lang/fr.json.orig ++++ b/resources/lang/fr.json +@@ -26,8 +26,8 @@ + ":person added a note to conversation #:conversation_number": ":person a ajouté une note à la conversation #:conversation_number", + ":person assigned :assignee conversation #:conversation_number": ":person a assigné :assignee à la conversation #:conversation_number", + ":person assigned to :assignee": ":person a assigné :assignee", +- ":person changed the customer to :customer": ":person a changé le client en :customer", +- ":person changed the customer to :customer in conversation #:conversation_number": ":person a changé le client en :customer dans la conversation #:conversation_number", ++ ":person changed the customer to :customer": ":person a changé le membre en :customer", ++ ":person changed the customer to :customer in conversation #:conversation_number": ":person a changé le membre en :customer dans la conversation #:conversation_number", + ":person created a draft": ":person a créé un brouillon", + ":person deleted": ":person supprimée", + ":person edited :creator's draft": ":person a modifié brouillon de :creator", +@@ -112,7 +112,7 @@ + "Auto Reply": "Réponse Automatique", + "Auto Reply status saved": "Statut de réponse automatique enregistré", + "Auto replies don't include your mailbox signature, so be sure to add your contact information if necessary.": "Les réponses automatiques n'incluent pas la signature de votre boîte aux lettres, assurez-vous d'ajouter vos coordonnées si nécessaire.", +- "Auto reply to customer": "Réponse automatique au client", ++ "Auto reply to customer": "Réponse automatique au membre", + "Back": "Retour", + "Back to folder": "Retour au dossier", + "Background Jobs": "Emplois d'arrière-plan", +@@ -123,10 +123,10 @@ + "Cancel": "Annuler", + "Cc": "Cc", + "Change": "Modifier", +- "Change Customer": "Changer de client", ++ "Change Customer": "Changer de membre", + "Change address in mailbox settings": "Modifier l'adresse dans les paramètres de la boîte aux lettres", + "Change default redirect": "Modifier la redirection par défaut", +- "Change the customer to :customer_email?": "Changer le client en :customer_email ?", ++ "Change the customer to :customer_email?": "Changer le membre en :customer_email ?", + "Change your password": "Changer votre mot de passe", + "Chat": "Tchat", + "Check Connection": "Vérifier la connexion", +@@ -182,7 +182,7 @@ + "Create a New User": "Créer un nouvel utilisateur", + "Create a Password": "Créer un mot de passe", + "Create a mailbox": "Créer une boîte de réception", +- "Create a new customer": "Créer un nouveau client", ++ "Create a new customer": "Créer un nouveau membre", + "Create symlink manually": "Créer un lien symbolique manuellement", + "Created At": "Créé à", + "Created by :person": "Créé par :person", +@@ -190,14 +190,14 @@ + "Current Password": "Mot de passe actuel", + "Custom From Name": "Nom de l'expéditeur personnalisé", + "Custom Name": "Nom personnalisé", +- "Customer": "Client", +- "Customer Name": "Nom du client", +- "Customer Profile": "Profil client", +- "Customer changed": "Client changé", +- "Customer saved successfully.": "Client enregistré avec succès.", +- "Customer viewed :when": "Client vu :when", +- "Customers": "Clients", +- "Customers email this address for help (e.g. support@domain.com)": "Les clients utilisent cette adresse par e-mail pour obtenir de l'aide (par exemple, support@domain.com)", ++ "Customer": "Membre", ++ "Customer Name": "Nom du membre", ++ "Customer Profile": "Profil membre", ++ "Customer changed": "Membre changé", ++ "Customer saved successfully.": "Membre enregistré avec succès.", ++ "Customer viewed :when": "Membre vu :when", ++ "Customers": "Membres", ++ "Customers email this address for help (e.g. support@domain.com)": "Les membres utilisent cette adresse par e-mail pour obtenir de l'aide (par exemple, support@domain.com)", + "Daily": "Quotidien", + "Dashboard": "Tableau de bord", + "Date": "Date", +@@ -247,15 +247,15 @@ + "Edit User": "Modifier l'utilisateur", + "Edited by :whom :when": "Édité par :whom :when", + "Email": "Email", +- "Email :tag_email_begin:email:tag_email_end has been moved from another customer: :a_begin:customer:a_end.": "Email :tag_email_begin:email:tag_email_end a été déplacé depuis un autre client : :a_begin:customer:a_end.", ++ "Email :tag_email_begin:email:tag_email_end has been moved from another customer: :a_begin:customer:a_end.": "Email :tag_email_begin:email:tag_email_end a été déplacé depuis un autre membre : :a_begin:customer:a_end.", + "Email Address": "Adresse e-mail", + "Email Alerts For Administrators": "Envoyez des alertes par e-mail aux administrateurs", + "Email Header": "En-tête de l'e-mail", + "Email Signature": "Signature e-mail", + "Email Template": "Modèle d'e-mail", + "Email passed for delivery. If you don't receive a test email, check your mail server logs.": "E-mail transmis pour livraison. Si vous ne recevez pas d'e-mail de test, consultez les journaux de votre serveur de messagerie.", +- "Email to customer": "Courriel au client", +- "Emails to Customers": "Emails aux clients", ++ "Email to customer": "Courriel au membre", ++ "Emails to Customers": "Emails aux membres", + "Empty Trash": "Vider la corbeille", + "Empty license key": "Clé de licence vide", + "Enable Auto Reply": "Activer la réponse automatique", +@@ -276,7 +276,7 @@ + "Error occurred. Please try again later.": "Erreur est survenue. Veuillez réessayer plus tard.", + "Error occurred. Please try again or try another :%a_start%update method:%a_end%": "Erreur est survenue. Veuillez réessayer ou en essayer une autre :%a_start% méthode de mise à jour:%a_end%", + "Error sending alert": "Erreur lors de l'envoi de l'alerte", +- "Error sending email to customer": "Erreur lors de l'envoi d'un e-mail au client", ++ "Error sending email to customer": "Erreur lors de l'envoi d'un e-mail au membre", + "Error sending email to the user who replied to notification from wrong email": "Erreur lors de l'envoi d'un e-mail à l'utilisateur qui a répondu à la notification d'un mauvais e-mail", + "Error sending email to user": "Erreur lors de l'envoi d'un e-mail à l'utilisateur", + "Error sending invitation email to user": "Erreur lors de l'envoi d'un e-mail d'invitation à l'utilisateur", +@@ -419,7 +419,7 @@ + "Message bounced (:link)": "Message renvoyé (:link)", + "Message cannot be empty": "Le message ne peut pas être vide", + "Message has been already sent. Please discard this draft.": "Le message a déjà été envoyé. Veuillez effacer ce brouillon.", +- "Message not sent to customer": "Message non envoyé au client", ++ "Message not sent to customer": "Message non envoyé au membre", + "Method": "Méthode", + "Migrate DB": "Migrer la base de données", + "Mine": "Mes conversations", +@@ -439,7 +439,7 @@ + "My Apps": "Mes Applications", + "My open conversations": "Mes conversations ouvertes", + "Name": "Nom", +- "Name that will appear in the From<\/strong> field when a customer views your email.": "Nom qui apparaîtra dans le champ De<\/strong> lorsqu'un client consulte votre e-mail.", ++ "Name that will appear in the From<\/strong> field when a customer views your email.": "Nom qui apparaîtra dans le champ De<\/strong> lorsqu'un membre consulte votre e-mail.", + "New Conversation": "Nouvelle conversation", + "New Mailbox": "Nouvelle boîte de réception", + "New Password": "Nouveau mot de passe", +@@ -451,8 +451,8 @@ + "Next active conversation": "Conversation active suivante", + "No": "Non", + "No activations left for this license key": "Il ne reste aucune activation pour cette clé de licence", +- "No customers found": "Aucun client trouvé", +- "No customers found. Would you like to create one?": "Aucun client trouvé. Souhaitez-vous en créer un?", ++ "No customers found": "Aucun membre trouvé", ++ "No customers found. Would you like to create one?": "Aucun membre trouvé. Souhaitez-vous en créer un?", + "No invite was found. Please contact your administrator to have a new invite email sent.": "Aucune invitation trouvée. Veuillez contacter votre administrateur pour qu'il envoie une nouvelle invitation par email.", + "Non-writable files found": "Fichiers non-inscriptibles trouvés", + "None": "Aucun", +@@ -471,10 +471,10 @@ + "Notifications": "Notifications", + "Notifications saved successfully": "Notifications enregistrées", + "Notifications will start showing up here soon": "Les notifications commenceront bientôt à apparaître ici", +- "Notify :person when a customer replies…": "Avertir :person lorsqu'un client répond…", ++ "Notify :person when a customer replies…": "Avertir :person lorsqu'un membre répond…", + "Notify :person when another :app_name user replies or adds a note…": "Notifier :person quand un autre utilisateur :app_name répond ou ajoute une note…", + "Notify :person when…": "Avertir :person lorsque…", +- "Notify me when a customer replies…": "M'avertir lorsqu'un client répond…", ++ "Notify me when a customer replies…": "M'avertir lorsqu'un membre répond…", + "Notify me when another :app_name user replies or adds a note…": "M'avertir lorsqu'un autre utilisateur :app_name répond ou ajoute une note…", + "Notify me when…": "Prévenez-moi quand…", + "Number": "Numéro", +@@ -587,7 +587,7 @@ + "Search": "Recherche", + "Search Conversation by Number": "Rechercher une conversation par identifiant", + "Search Users": "Rechercher des utilisateurs", +- "Search for a customer by name or email": "Rechercher un client par nom ou par e-mail", ++ "Search for a customer by name or email": "Rechercher un membre par nom ou par e-mail", + "See logs": "Voir les journaux", + "Select Mailbox": "Sélectionnez une boîte aux lettres", + "Selected Users have access to this mailbox:": "Les utilisateurs sélectionnés ont accès à cette boîte aux lettres:", +@@ -613,7 +613,7 @@ + "Sending": "Envoi en cours", + "Sending Emails": "Sending Emails", + "Sending can not be undone": "L'envoie ne peut être annulé", +- "Sending emails need to be configured for the mailbox in order to send emails to customers and support agents": "L'envoi d'e-mails doit être configuré pour la boîte aux lettres afin d'envoyer des e-mails aux clients et aux agents de support", ++ "Sending emails need to be configured for the mailbox in order to send emails to customers and support agents": "L'envoi d'e-mails doit être configuré pour la boîte aux lettres afin d'envoyer des e-mails aux membre et aux agents de support", + "Sendmail": "Exécutable Sendmail", + "Separate each email with a comma.": "Séparez chaque e-mail par une virgule", + "Server": "Serveur", +@@ -670,11 +670,11 @@ + "This is a test mail sent by :app_name. It means that outgoing email settings of your :mailbox mailbox are fine.": "Il s'agit d'un mail de test envoyé par :app_name. Cela signifie que les paramètres de courrier électronique sortant de votre boîte aux lettres :mailbox sont corrects.", + "This is a test system mail sent by :app_name. It means that mail settings are fine.": "Il s'agit d'un e-mail du système de test envoyé par :app_name. Cela signifie que les paramètres de messagerie sont corrects.", + "This may take several minutes": "Cela peut prendre plusieurs minutes", +- "This number is not visible to customers. It is only used to track conversations within :app_name": "Ce numéro n'est pas visible pour les clients. Il est uniquement utilisé pour suivre les conversations dans :app_name", ++ "This number is not visible to customers. It is only used to track conversations within :app_name": "Ce numéro n'est pas visible pour les membres. Il est uniquement utilisé pour suivre les conversations dans :app_name", + "This password is incorrect.": "Ce mot de passe est incorrect.", +- "This reply will go to the customer. :%switch_start%Switch to a note:switch_end if you are replying to :user_name.": "Cette réponse ira au client. :%switch_start%Passez à une note:switch_end si vous répondez à :user_name.", ++ "This reply will go to the customer. :%switch_start%Switch to a note:switch_end if you are replying to :user_name.": "Cette réponse ira au membre. :%switch_start%Passez à une note:switch_end si vous répondez à :user_name.", + "This setting gives you control over what page loads after you perform an action (send a reply, add a note, change conversation status or assignee).": "Ce paramètre vous permet de contrôler la page qui se charge après avoir effectué une action (envoyer une réponse, ajouter une note, etc.).", +- "This text will be added to the beginning of each email reply sent to a customer.": "Ce texte sera ajouté au début de chaque réponse par e-mail envoyée à un client.", ++ "This text will be added to the beginning of each email reply sent to a customer.": "Ce texte sera ajouté au début de chaque réponse par e-mail envoyée à un membre.", + "Thread is not in a draft state": "Le fil n'est pas à l'état de brouillon", + "Thread not found": "Fil non trouvé", + "Time Format": "Format de l'heure", +@@ -751,7 +751,7 @@ + "Welcome to :company_name!": "Bienvenue chez :company_name !", + "Welcome to :company_name, :first_name!": "Bienvenue chez :company_name, :first_name!", + "Welcome to the team!": "Bienvenue dans l'équipe !", +- "When a customer emails this mailbox, application can send an auto reply to the customer immediately.Only one auto reply is sent per new conversation.": "Lorsqu'un client envoie un e-mail à cette boîte aux lettres, l'application peut envoyer immédiatement une réponse automatique au client. Une seule réponse automatique est envoyée par nouvelle conversation.", ++ "When a customer emails this mailbox, application can send an auto reply to the customer immediately.Only one auto reply is sent per new conversation.": "Lorsqu'un membre envoie un e-mail à cette boîte aux lettres, l'application peut envoyer immédiatement une réponse automatique au membre. Une seule réponse automatique est envoyée par nouvelle conversation.", + "Which mailboxes will user use?": "Quelles boîtes aux lettres l'utilisateur utilisera-t-il?", + "Who Else Will Use This Mailbox": "Qui d'autre utilisera cette boîte aux lettres", + "Work": "Professionnel", From cbd0cc098aa3f87bd9aa224fec9a019415726177 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Fri, 9 Aug 2024 22:28:24 -0400 Subject: [PATCH 16/38] ilot/listmonk: new aport --- ilot/listmonk/APKBUILD | 71 +++++++++++++++++++++++++++++ ilot/listmonk/listmonk.openrc | 29 ++++++++++++ ilot/listmonk/listmonk.post-install | 27 +++++++++++ ilot/listmonk/listmonk.post-upgrade | 1 + ilot/listmonk/listmonk.pre-install | 21 +++++++++ ilot/listmonk/listmonk.sh | 12 +++++ 6 files changed, 161 insertions(+) create mode 100644 ilot/listmonk/APKBUILD create mode 100644 ilot/listmonk/listmonk.openrc create mode 100644 ilot/listmonk/listmonk.post-install create mode 120000 ilot/listmonk/listmonk.post-upgrade create mode 100644 ilot/listmonk/listmonk.pre-install create mode 100644 ilot/listmonk/listmonk.sh diff --git a/ilot/listmonk/APKBUILD b/ilot/listmonk/APKBUILD new file mode 100644 index 0000000..00951f0 --- /dev/null +++ b/ilot/listmonk/APKBUILD @@ -0,0 +1,71 @@ +# Contributor: Antoine Martin (ayakael) +# Maintainer: Antoine Martin (ayakael) +pkgname=listmonk +pkgver=3.0.0 +pkgrel=0 +pkgdesc='Self-hosted newsletter and mailing list manager with a modern dashboard' +arch="all" +url=https://listmonk.app +license="AGPL3" +depends=" + libcap-setcap + postgresql + procps + " +makedepends="go npm nodejs yarn" +source=" + $pkgname-$pkgver.tar.gz::https://github.com/knadh/listmonk/archive/v$pkgver.tar.gz + listmonk.sh + listmonk.openrc + " +install="$pkgname.pre-install $pkgname.post-install $pkgname.post-upgrade" +subpackages="$pkgname-openrc" +pkgusers="listmonk" +pkggroups="listmonk" + +build() { + go build \ + -trimpath \ + -buildmode=pie \ + -mod=readonly \ + -modcacherw \ + -ldflags "-extldflags '$LDFLAGS' -X 'main.buildString=Alpine Linux v$pkgver-$pkgrel' -X 'main.versionString=v$pkgver'" \ + -o $pkgname \ + cmd/*.go + + ( + cd frontend + export YARN_CACHE_FOLDER="$srcdir/node_modules" + export VUE_APP_VERSION="v$pkgver" + yarn install --frozen-lockfile + yarn build + ) +} + +check() { + go test ./... +} + +package() { + install -Dm755 "$srcdir"/listmonk.sh "$pkgdir"/usr/bin/listmonk + install -Dm644 config.toml.sample "$pkgdir"/etc/listmonk/config.toml + install -Dm644 -t "$pkgdir"/usr/share/webapps/listmonk/ \ + schema.sql \ + queries.sql \ + config.toml.sample + install -Dm755 listmonk "$pkgdir"/usr/share/webapps/listmonk/ + install -Dm644 -t "$pkgdir"/usr/share/webapps/listmonk/frontend/dist/ \ + frontend/dist/static/favicon.png + cp -a frontend/dist/static "$pkgdir"/usr/share/webapps/listmonk/frontend/dist/static + cp -a frontend/dist/index.html "$pkgdir"/usr/share/webapps/listmonk/frontend/dist/index.html + cp -a static "$pkgdir"/usr/share/webapps/listmonk/ + cp -a i18n "$pkgdir"/usr/share/webapps/listmonk/ + install -Dm755 "$srcdir"/$pkgname.openrc \ + "$pkgdir"/etc/init.d/$pkgname + ln -s /etc/listmonk/config.toml "$pkgdir"/usr/share/webapps/listmonk/config.toml +} +sha512sums=" +afd0ea1d4d2b2753c3043526590cf09c45a541a2d818f5d1581644ffd10818326fd553a3b04bca59494860a7bb6e96364b08afd33d337a9fc5c71bedd1a5ee6c listmonk-3.0.0.tar.gz +939450af4b23708e3d23a5a88fad4c24b957090bdd21351a6dd520959e52e45e5fcac117a3eafa280d9506616dae39ad3943589571f008cac5abe1ffd8062424 listmonk.sh +8e9c0b1f335c295fb741418246eb17c7566e5e4200a284c6483433e8ddbf5250aa692435211cf062ad1dfcdce3fae9148def28f03f2492d33fe5e66cbeebd4bd listmonk.openrc +" diff --git a/ilot/listmonk/listmonk.openrc b/ilot/listmonk/listmonk.openrc new file mode 100644 index 0000000..e2ccb5b --- /dev/null +++ b/ilot/listmonk/listmonk.openrc @@ -0,0 +1,29 @@ +#!/sbin/openrc-run + +name="$RC_SVCNAME" +cfgfile="/etc/conf.d/$RC_SVCNAME.conf" +pidfile="/run/$RC_SVCNAME.pid" +working_directory="/usr/share/webapps/listmonk" +command="/usr/share/webapps/listmonk/listmonk" +command_user="listmonk" +command_group="listmonk" +start_stop_daemon_args="" +command_background="yes" +output_log="/var/log/listmonk/$RC_SVCNAME.log" +error_log="/var/log/listmonk/$RC_SVCNAME.err" + +depend() { + need postgresql +} + +start_pre() { + cd "$working_directory" + checkpath --directory --owner $command_user:$command_group --mode 0775 \ + /var/log/listmonk \ + /var/lib/listmonk +} + +stop_pre() { + ebegin "Killing child processes" + kill $(ps -o pid= --ppid $(cat $pidfile)) || true +} diff --git a/ilot/listmonk/listmonk.post-install b/ilot/listmonk/listmonk.post-install new file mode 100644 index 0000000..fe3cc8d --- /dev/null +++ b/ilot/listmonk/listmonk.post-install @@ -0,0 +1,27 @@ +#!/bin/sh +set -eu + +setcap 'cap_net_bind_service=+ep' /usr/share/webapps/listmonk/listmonk + +if [ "${0##*.}" = 'post-upgrade' ]; then + cat >&2 <<-EOF + * + * To finish Listmonk upgrade run: + * + * listmonk --upgrade + * + EOF +else + cat >&2 <<-EOF + * + * 1. Adjust settings in /etc/listmonk/config.toml. + * + * 2. Create database for Listmonk: + * + * psql -c "CREATE ROLE listmonk PASSWORD 'top-secret' INHERIT LOGIN;" + * psql -c "CREATE DATABASE listmonk OWNER listmonk ENCODING 'UTF-8';" + * + * 3. Run "listmonk --install" + * + EOF +fi diff --git a/ilot/listmonk/listmonk.post-upgrade b/ilot/listmonk/listmonk.post-upgrade new file mode 120000 index 0000000..0b729b1 --- /dev/null +++ b/ilot/listmonk/listmonk.post-upgrade @@ -0,0 +1 @@ +listmonk.post-install \ No newline at end of file diff --git a/ilot/listmonk/listmonk.pre-install b/ilot/listmonk/listmonk.pre-install new file mode 100644 index 0000000..71eb3a0 --- /dev/null +++ b/ilot/listmonk/listmonk.pre-install @@ -0,0 +1,21 @@ +#!/bin/sh +# It's very important to set user/group correctly. + +listmonk_dir='/var/lib/listmonk' + +if ! getent group listmonk 1>/dev/null; then + echo '* Creating group listmonk' 1>&2 + + addgroup -S listmonk +fi + +if ! id listmonk 2>/dev/null 1>&2; then + echo '* Creating user listmonk' 1>&2 + + adduser -DHS -G listmonk -h "$listmonk_dir" -s /bin/sh \ + -g "added by apk for listmonk" listmonk + passwd -u listmonk 1>/dev/null # unlock +fi + + +exit 0 diff --git a/ilot/listmonk/listmonk.sh b/ilot/listmonk/listmonk.sh new file mode 100644 index 0000000..d89ca52 --- /dev/null +++ b/ilot/listmonk/listmonk.sh @@ -0,0 +1,12 @@ + +#!/bin/sh + +BUNDLE_DIR='/usr/share/webapps/listmonk' + +cd $BUNDLE_DIR + +if [ "$(id -un)" != 'listmonk' ]; then + exec su listmonk -c '"$0" "$@"' -- ./listmonk "$@" +else + exec ./listmonk "$@" +fi From 9306c27137beb430dd8f81af469948b3bd1ed40a Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Fri, 9 Aug 2024 22:28:26 -0400 Subject: [PATCH 17/38] ilot/loomio: new aport --- ilot/loomio/APKBUILD | 196 +++++++++++++++++++++++++++++++ ilot/loomio/bin-wrapper.in | 15 +++ ilot/loomio/loomio.confd | 32 +++++ ilot/loomio/loomio.initd | 39 ++++++ ilot/loomio/loomio.logrotate | 11 ++ ilot/loomio/loomio.post-install | 32 +++++ ilot/loomio/loomio.post-upgrade | 1 + ilot/loomio/loomio.pre-install | 26 ++++ ilot/loomio/loomio.sidekiq.initd | 32 +++++ ilot/loomio/loomio.vue.initd | 31 +++++ 10 files changed, 415 insertions(+) create mode 100644 ilot/loomio/APKBUILD create mode 100644 ilot/loomio/bin-wrapper.in create mode 100644 ilot/loomio/loomio.confd create mode 100644 ilot/loomio/loomio.initd create mode 100644 ilot/loomio/loomio.logrotate create mode 100755 ilot/loomio/loomio.post-install create mode 120000 ilot/loomio/loomio.post-upgrade create mode 100644 ilot/loomio/loomio.pre-install create mode 100644 ilot/loomio/loomio.sidekiq.initd create mode 100644 ilot/loomio/loomio.vue.initd diff --git a/ilot/loomio/APKBUILD b/ilot/loomio/APKBUILD new file mode 100644 index 0000000..38eb631 --- /dev/null +++ b/ilot/loomio/APKBUILD @@ -0,0 +1,196 @@ +# Maintainer: Antoine Martin (ayakael) +# Contributor: Jakub Jirutka +# Contributor: Antoine Martin (ayakael) +pkgname=loomio +pkgver=2.21.4 +_gittag=v$pkgver +pkgrel=0 +pkgdesc="A collaborative decision making tool" +url="https://github.com/loomio/loomio" +arch="x86_64" +license="MIT" +depends=" + postgresql + postgresql-contrib + python3 + redis + ruby3.2 + ruby3.2-bundler + ruby3.2-grpc + vips + npm + procps-ng + " +makedepends=" + cmd:chrpath + ruby3.2-dev + nodejs + openssl-dev + readline-dev + zlib-dev + libpq-dev + libffi-dev + imagemagick-dev + " +pkgusers="loomio" +pkggroups="loomio www-data" +install="$pkgname.pre-install $pkgname.post-install $pkgname.post-upgrade" +subpackages="$pkgname-openrc" +source=" + $pkgname-$pkgver.tar.gz::https://github.com/loomio/loomio/archive/refs/tags/v$pkgver.tar.gz + bin-wrapper.in + loomio.confd + loomio.logrotate + loomio.sidekiq.initd + loomio.vue.initd + loomio.initd + " +_prefix="usr/lib/webapps/loomio" + +export BUNDLE_DEPLOYMENT=true +export BUNDLE_FORCE_RUBY_PLATFORM=true +export BUNDLE_FROZEN=true +export BUNDLE_JOBS=${JOBS:-2} + +prepare() { + local sysgemdir=$(ruby -e 'puts Gem.default_dir') + + default_prepare + + # Allow use of any bundler + sed -i -e '/BUNDLED/,+1d' Gemfile.lock + + # Allow use of any platform + sed -i -e 's/PLATFORMS/PLATFORMS\n ruby/' Gemfile.lock + + # Some gems are broken, so we copy our fixed version + # instead of installing it from RubyGems using Bundler. + mkdir -p vendor/gems/grpc/src/ruby/lib/grpc + cp -r "$sysgemdir"/gems/grpc-*/* vendor/gems/grpc/ + cp "$sysgemdir"/specifications/grpc-*.gemspec \ + vendor/gems/grpc/grpc.gemspec + cp "$sysgemdir"/extensions/*/*/grpc-*/grpc/*.so \ + vendor/gems/grpc/src/ruby/lib/grpc/ +} + +build() { + local bundle_without='exclude development test' + + bundle config --local build.ffi --enable-system-libffi + bundle config --local build.vips --enable-system-libraries + bundle config --local build.nokogiri --use-system-libraries \ + --with-xml2-include=/usr/include/libxml2 \ + --with-xslt-include=/usr/include/libxslt + bundle config --local build.google-protobuf '-- --with-cflags=-D__va_copy=va_copy' + + msg "Installing Ruby gems..." + bundle config --local without "$bundle_without" + bundle config --local path "vendor/bundle" + + bundle install --no-cache + + msg "Precompiling static assets..." + bundle exec bootsnap precompile --gemfile app/ lib/ + + # Create executables in bin/*. + # See also https://github.com/bundler/bundler/issues/6149. + bundle binstubs --force bundler puma sidekiq + + # Remove faulty RPATH. + chrpath -d vendor/bundle/ruby/*/gems/*/lib/nokogiri/*/nokogiri.so + + # cp grpc so + cp vendor/gems/grpc/src/ruby/lib/grpc/grpc_c.so vendor/bundle/ruby/*/gems/grpc*/src/ruby/lib/grpc/. + rm -R vendor/bundle/ruby/*/gems/grpc*/src/ruby/lib/grpc/3* vendor/bundle/ruby/*/gems/grpc*/src/ruby/lib/grpc/2* + + msg "Installing npm modules..." + cd vue + # force as vite-plugin-yaml hasn't updated their peerDependencies list yet + npm ci --force + npm run build +} + +package() { + local destdir="$pkgdir/$_prefix" + local datadir="$pkgdir/var/lib/loomio" + local file dest + + # Make directories + install -dm 755 \ + "$(dirname $destdir)" \ + "$datadir" + + mkdir -p "$(dirname $destdir)" + cp -R "$builddir" "$destdir" + + cd "$destdir"/vendor/bundle/ruby/*/ + + # Remove tests, documentations and other useless files. + find gems/ \( -name 'doc' \ + -o -name 'spec' \ + -o -name 'test' \) \ + -type d -maxdepth 2 -exec rm -fr "{}" + + find gems/ \( -name 'README*' \ + -o -name 'CHANGELOG*' \ + -o -name 'CONTRIBUT*' \ + -o -name '*LICENSE*' \ + -o -name 'Rakefile' \ + -o -name '.*' \) \ + -type f -delete + + # Remove build logs and cache. + rm -rf build_info/ cache/ + find extensions/ \( -name gem_make.out -o -name mkmf.log \) -delete + + cd "$destdir" + + # Install and symlink config files. + for file in database.yml.postgresql puma.rb sidekiq.yml; do + dest="$(basename "${file/.postgresql/}")" + install -m640 -g loomio -D config/$file "$pkgdir"/etc/loomio/$dest + ln -sf /etc/loomio/$dest "$pkgdir"/$_prefix/config/${file/.postgrewsql/} + done + + # This file will be generated by the post-install script, just prepare symlink. + ln -sf /etc/loomio/secrets.yml config/secrets.yml + # These shouldn't be necessary, they are all configurable, but OmniBus + + cat > "$datadir"/.profile <<-EOF + export RAILS_ENV=production + export NODE_ENV=production + export EXECJS_RUNTIME=Disabled + EOF + + # Install wrapper scripts to /usr/bin. + local name; for name in rake rails; do + sed "s/__COMMAND__/$name/g" "$srcdir"/bin-wrapper.in \ + > "$builddir"/loomio-$name + install -m755 -D "$builddir"/loomio-$name "$pkgdir"/usr/bin/loomio-$name + done + + for file in $pkgname $pkgname.sidekiq $pkgname.vue; do + install -m755 -D "$srcdir"/$file.initd "$pkgdir"/etc/init.d/$file + done + + install -m644 -D "$srcdir"/loomio.confd \ + "$pkgdir"/etc/conf.d/loomio + + install -m644 -D "$srcdir"/loomio.logrotate \ + "$pkgdir"/etc/logrotate.d/loomio +} + +assets() { + depends="" + + amove $_prefix/public/assets +} + +sha512sums=" +72a1238c1eaa3b963bd20a09d4fc2e52798264779bdf06d3f32891f2880d246059c77381329d1274bfa5979a35740017f0ced324f88b205369e77335b403ffba loomio-2.21.4.tar.gz +6cd4bb030660a9f4697eeb7c6de3f7509558aab3651e68218583dfeea56634f3b9f58acb50c7c9a4188a38c19434a815dd6c347e30207c4c0ae028c8dcb6ccaf bin-wrapper.in +0f1c91fbd4b8099f0a115705d5af799e4492fa2a0fd54175f3bfbfb5be1122bd7fd73a7709695c7caf2dcc667f3b8715051c24f424472e1115753e43a38fdf50 loomio.confd +1ecb0717cd5f04b894467b21d226b98d8f83b8f62afbf8da7edd57973aeabb13d121e9061cc48aec7572b1c710e82c8b44a1cedc0a924efd4bc4a124b3afe9a8 loomio.logrotate +c5dae2b6f9a23853c3c7ac068d97a7b0269b1775f6e0169c3d8999ec67c2baf3545515ea21037e882d900b15a7abf9061dd5a584bdc82c347b54d8c134f6d7a4 loomio.sidekiq.initd +f774954d8b06aacab27af9593b1b12fbe18ec2d0593dd4f82e4d3dfbc7e325fb1a423347fd974a2ec6665776a6cfe85f255f4fd7493c97eb840f34eb7fbdb329 loomio.vue.initd +645637c4112ec91ec2ea6022713e77a8ee76c0f0a81f9adf1f9210b52a578e94b5b02f0b6244b173905f580f72dc362b5434c714aae11e3619f73af223891bb8 loomio.initd +" diff --git a/ilot/loomio/bin-wrapper.in b/ilot/loomio/bin-wrapper.in new file mode 100644 index 0000000..fad9737 --- /dev/null +++ b/ilot/loomio/bin-wrapper.in @@ -0,0 +1,15 @@ +#!/bin/sh + +BUNDLE_DIR='/usr/lib/webapps/loomio' +export RAILS_ENV='production' +export NODE_ENV='production' +export EXECJS_RUNTIME='Disabled' + +cd $BUNDLE_DIR +install -m 700 -o loomio -g loomio -d "$(readlink ./tmp)" + +if [ "$(id -un)" != 'loomio' ]; then + exec su loomio -c '"$0" "$@"' -- bin/__COMMAND__ "$@" +else + exec bin/__COMMAND__ "$@" +fi diff --git a/ilot/loomio/loomio.confd b/ilot/loomio/loomio.confd new file mode 100644 index 0000000..890ad21 --- /dev/null +++ b/ilot/loomio/loomio.confd @@ -0,0 +1,32 @@ +# Configuration file for /etc/init.d/loomio and +# /etc/init.d/loomio.{vue,sidekiq} + +# Specify how many processes to create using sidekiq-cluster and which queue +# they should handle. Each whitespace-separated item equates to one additional +# Sidekiq process, and comma-separated values in each item determine the queues +# it works on. The special queue name "*" means all queues. +# Example: "* gitlab_shell process_commit,post_receive" +# See https://docs.gitlab.com/ee/administration/sidekiq/extra_sidekiq_processes.html. +#sidekiq_queue_groups="*" + +# Maximum threads to use with Sidekiq (default: 50, 0 to disable). +#sidekiq_max_concurrency= + +# Minimum threads to use with Sidekiq (default: 0). +#sidekiq_min_concurrency= + +# The number of seconds to wait between worker checks. +#sidekiq_interval= + +# Graceful timeout for all running processes. +#sidekiq_shutdown_timeout= + +# Run workers for all queues in sidekiq_queues.yml except the given ones. +#sidekiq_negate=no + +# Run workers based on the provided selector. +#sidekiq_queue_selector=no + +# Memory limit (in MiB) for the Sidekiq process. If the RSS (Resident Set Size) +# of the Sidekiq process exceeds this limit, a delayed shutdown is triggered. +#sidekiq_memkiller_max_rss=2000 diff --git a/ilot/loomio/loomio.initd b/ilot/loomio/loomio.initd new file mode 100644 index 0000000..864d102 --- /dev/null +++ b/ilot/loomio/loomio.initd @@ -0,0 +1,39 @@ +#!/sbin/openrc-run + +name="Loomio" +description="Meta script for starting/stopping all the Loomio components" +subservices="loomio.sidekiq loomio.vue" + +depend() { + use net +} + +start() { + local ret=0 + + ebegin "Starting all Loomio components" + local svc; for svc in $subservices; do + service $svc start || ret=1 + done + eend $ret +} + +stop() { + local ret=0 + + ebegin "Stopping all Loomio components" + local svc; for svc in $subservices; do + service $svc stop || ret=1 + done + eend $ret +} + +status() { + local ret=0 + + local svc; for svc in $subservices; do + echo "$svc:" + service $svc status || ret=1 + done + eend $ret +} diff --git a/ilot/loomio/loomio.logrotate b/ilot/loomio/loomio.logrotate new file mode 100644 index 0000000..f7fd264 --- /dev/null +++ b/ilot/loomio/loomio.logrotate @@ -0,0 +1,11 @@ +/var/log/loomio/*.log { + compress + copytruncate + delaycompress + maxsize 10M + minsize 1M + missingok + sharedscripts + rotate 10 + weekly +} diff --git a/ilot/loomio/loomio.post-install b/ilot/loomio/loomio.post-install new file mode 100755 index 0000000..2e2fb10 --- /dev/null +++ b/ilot/loomio/loomio.post-install @@ -0,0 +1,32 @@ +#!/bin/sh +set -eu + +group=loomio +config_file='/etc/loomio/config.yml' + +#if [ $(grep '@@SECRET_KEY@@' "$config_file") ]; then +# echo "* Generating random secret in $config_file" >&2 + +# secret_key="$(pwgen -s 50 1)" +# sed -i "s|@@SECRET_KEY@@|$secret_key|" "$config_file" +#fi + +if [ "${0##*.}" = 'post-upgrade' ]; then + cat >&2 <<-EOF + * + * To finish Loomio upgrade run: + * + * + EOF +else + cat >&2 <<-EOF + * + * 1. Adjust settings in /etc/loomio/config.yml. + * + * 2. Create database for loomio: + * + * psql -c "CREATE ROLE loomio PASSWORD 'top-secret' INHERIT LOGIN;" + * psql -c "CREATE DATABASE loomio OWNER loomio ENCODING 'UTF-8';" + * + EOF +fi diff --git a/ilot/loomio/loomio.post-upgrade b/ilot/loomio/loomio.post-upgrade new file mode 120000 index 0000000..ec5bf9b --- /dev/null +++ b/ilot/loomio/loomio.post-upgrade @@ -0,0 +1 @@ +loomio.post-install \ No newline at end of file diff --git a/ilot/loomio/loomio.pre-install b/ilot/loomio/loomio.pre-install new file mode 100644 index 0000000..612ce4c --- /dev/null +++ b/ilot/loomio/loomio.pre-install @@ -0,0 +1,26 @@ +#!/bin/sh +# It's very important to set user/group correctly. + +loomio_dir='/var/lib/loomio' + +if ! getent group loomio 1>/dev/null; then + echo '* Creating group loomio' 1>&2 + + addgroup -S loomio +fi + +if ! id loomio 2>/dev/null 1>&2; then + echo '* Creating user loomio' 1>&2 + + adduser -DHS -G loomio -h "$loomio_dir" -s /bin/sh \ + -g "added by apk for loomio" loomio + passwd -u loomio 1>/dev/null # unlock +fi + +if ! id -Gn loomio | grep -Fq redis; then + echo '* Adding user loomio to group www-data' 1>&2 + + addgroup loomio www-data +fi + +exit 0 diff --git a/ilot/loomio/loomio.sidekiq.initd b/ilot/loomio/loomio.sidekiq.initd new file mode 100644 index 0000000..fd3dd2d --- /dev/null +++ b/ilot/loomio/loomio.sidekiq.initd @@ -0,0 +1,32 @@ +#!/sbin/openrc-run + +name="Loomio background workers Service" +root="/usr/share/webapps/loomio" +pidfile="/run/loomio-sidekiq.pid" +logfile="/var/log/loomio/sidekiq.log" + +depend() { + use net + need redis +} + +start() { + ebegin "Starting Loomio background workers" + + cd $root + + start-stop-daemon --start --background \ + --chdir "${root}" \ + --user="loomio" \ + --make-pidfile --pidfile="${pidfile}" \ + -1 "${logfile}" -2 "${logfile}" \ + --exec /usr/bin/env -- RAILS_ENV=production bundle exec rails s + eend $? +} + +stop() { + ebegin "Stopping Loomio background workers" + start-stop-daemon --stop \ + --pidfile=${pidfile} \ + eend $? +} diff --git a/ilot/loomio/loomio.vue.initd b/ilot/loomio/loomio.vue.initd new file mode 100644 index 0000000..8fffb40 --- /dev/null +++ b/ilot/loomio/loomio.vue.initd @@ -0,0 +1,31 @@ +#!/sbin/openrc-run + +name="$RC_SVCNAME" +cfgfile="/etc/conf.d/$RC_SVCNAME.conf" +pidfile="/run/$RC_SVCNAME.pid" +working_directory="/usr/share/bundles/loomio" +command="npm" +command_args="run serve" +command_user="loomio" +command_group="loomio" +start_stop_daemon_args="" +command_background="yes" +output_log="/var/log/loomio/$RC_SVCNAME.log" +error_log="/var/log/loomio/$RC_SVCNAME.err" + +depend() { + need redis + need postgresql +} + +start_pre() { + cd "$working_directory" + checkpath --directory --owner $command_user:$command_group --mode 0775 \ + /var/log/loomio \ + /var/lib/loomio +} + +stop_pre() { + ebegin "Killing child processes" + kill $(ps -o pid= --ppid $(cat $pidfile)) || true +} From 838f490ff0d2bd7b9043fdae932cbc4ea8f46330 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Fri, 9 Aug 2024 22:28:30 -0400 Subject: [PATCH 18/38] ilot/peertube: new aport --- ilot/peertube/APKBUILD | 81 +++++++++++++++++++++++++++++ ilot/peertube/peertube-manage.sh | 15 ++++++ ilot/peertube/peertube.conf | 2 + ilot/peertube/peertube.openrc | 34 ++++++++++++ ilot/peertube/peertube.post-install | 41 +++++++++++++++ ilot/peertube/peertube.post-upgrade | 1 + ilot/peertube/peertube.pre-install | 25 +++++++++ 7 files changed, 199 insertions(+) create mode 100644 ilot/peertube/APKBUILD create mode 100644 ilot/peertube/peertube-manage.sh create mode 100644 ilot/peertube/peertube.conf create mode 100644 ilot/peertube/peertube.openrc create mode 100755 ilot/peertube/peertube.post-install create mode 120000 ilot/peertube/peertube.post-upgrade create mode 100755 ilot/peertube/peertube.pre-install diff --git a/ilot/peertube/APKBUILD b/ilot/peertube/APKBUILD new file mode 100644 index 0000000..f50d1c1 --- /dev/null +++ b/ilot/peertube/APKBUILD @@ -0,0 +1,81 @@ +# Maintainer: Antoine Martin (ayakael) +# Contributor: Antoine Martin (ayakael) +pkgname=peertube +pkgver=6.0.2 +pkgrel=0 +pkgdesc="ActivityPub-federated video streaming platform using P2P directly in your web browser" +arch="x86_64" +url="https://joinpeertube.org/" +license="AGPL" +depends=" + nodejs + ffmpeg + postgresql + openssl + redis + npm + procps-ng + " +makedepends=" + yarn + " +source=" + $pkgname-$pkgver.tar.gz::https://github.com/Chocobozzz/PeerTube/archive/refs/tags/v$pkgver.tar.gz + peertube-manage.sh + peertube.conf + peertube.openrc + " +builddir="$srcdir"/PeerTube-$pkgver +install="$pkgname.post-install $pkgname.pre-install $pkgname.post-upgrade" +subpackages="$pkgname-doc $pkgname-openrc" + +build() { + # need to fetch devel depencencies to build + yarn install --pure-lockfile + npm run build + rm -Rf "$builddir"/node_modules + yarn install --production --pure-lockfile +} + +package() { + install -dm 755 \ + "$pkgdir"/usr/share/webapps \ + "$pkgdir"/usr/share/doc \ + "$pkgdir"/usr/share/licenses/peertube \ + "$pkgdir"/etc/init.d \ + "$pkgdir"/etc/conf.d + + # install + cp -a "$builddir" "$pkgdir/usr/share/webapps/peertube" + + # wrapper script + install -Dm755 "$srcdir"/peertube-manage.sh "$pkgdir"/usr/bin/peertube-manage + + # openrc + install -Dm755 "$srcdir"/peertube.openrc "$pkgdir"/etc/init.d/peertube + install -Dm644 "$srcdir"/peertube.conf "$pkgdir"/etc/conf.d/peertube + + # config file setup + rm -R "$pkgdir"/usr/share/webapps/peertube/config + install -Dm644 "$builddir"/config/production.yaml.example "$pkgdir"/etc/peertube/production.yaml + install -Dm644 "$builddir"/config/default.yaml "$pkgdir"/etc/peertube/default.yaml + sed -i "s|/var/www/peertube/storage|/var/lib/peertube|g" "$pkgdir"/etc/peertube/production.yaml "$pkgdir"/etc/peertube/default.yaml + sed -i "s| tmp:.*| tmp: '/tmp/peertube/'|" "$pkgdir"/etc/peertube/production.yaml "$pkgdir"/etc/peertube/default.yaml + sed -i "s|tmp_persistent:.*|tmp_persistent: '/var/tmp/peertube/'|" "$pkgdir"/etc/peertube/production.yaml "$pkgdir"/etc/peertube/default.yaml + sed -i "s|logs:.*|logs: '/var/log/peertube/'|" "$pkgdir"/etc/peertube/production.yaml "$pkgdir"/etc/peertube/default.yaml + sed -i "s| peertube: ''| peertube: '@@SECRET_KEY@@'|" "$pkgdir"/etc/peertube/production.yaml + + # docs and licenses + mv "$pkgdir"/usr/share/webapps/peertube/support/doc "$pkgdir"/usr/share/doc/$pkgname + mv "$pkgdir"/usr/share/webapps/peertube/*.md "$pkgdir"/usr/share/doc/peertube/. + mv "$pkgdir"/usr/share/webapps/peertube/LICENSE "$pkgdir"/usr/share/licenses/peertube/. + + # delete arm64 prebuild + rm "$pkgdir"/usr/share/webapps/$pkgname/node_modules/fs-native-extensions/prebuilds/linux-arm64/node.napi.node +} +sha512sums=" +91bcec34902f171ffe9ab3f27ab4422319f91430cab22965a5cf9887c5293152f7f85c6fc0f355820000daea0a49327aa66f20bb4cff3850e5e3d192f347c926 peertube-6.0.2.tar.gz +92de1155410848937eeff3bef480c4a074875b4236ce0b6bf4cd7213d00173e7766d130408419c85c4432a8445a03f5d4525e4283384d906d781510cc4fd8fc0 peertube-manage.sh +494bb4daf98fcd62b354eb6fae18ccff19bef1243de083a93e438680deef1d9039e30eff8870b6955c3c7b10638e6df6cbeb4fbdb7539979466f502bcc72c843 peertube.conf +5b4d3f47d0dc2ce991971ff61c604a1566811612cff91f7e6ed19b65d0830695649ddef9afff474d916a5e6764d74bb4fa6b5c12eb5e753d8fc381cdd38ab179 peertube.openrc +" diff --git a/ilot/peertube/peertube-manage.sh b/ilot/peertube/peertube-manage.sh new file mode 100644 index 0000000..70bc387 --- /dev/null +++ b/ilot/peertube/peertube-manage.sh @@ -0,0 +1,15 @@ +#!/bin/sh + +BUNDLE_DIR='/usr/share/webapps/peertube' + +cd $BUNDLE_DIR + +if [ "$(id -un)" != 'peertube' ]; then + source /etc/conf.d/peertube + export NODE_ENV NODE_CONFIG_DIR + exec su peertube -c '"$0" "$@"' -- npm run "$@" +else + source /etc/conf.d/peertube + export NODE_ENV NODE_CONFIG_DIR + exec npm run "$@" +fi diff --git a/ilot/peertube/peertube.conf b/ilot/peertube/peertube.conf new file mode 100644 index 0000000..8a7d014 --- /dev/null +++ b/ilot/peertube/peertube.conf @@ -0,0 +1,2 @@ +NODE_CONFIG_DIR=/etc/peertube +NODE_ENV=production diff --git a/ilot/peertube/peertube.openrc b/ilot/peertube/peertube.openrc new file mode 100644 index 0000000..8f03ba0 --- /dev/null +++ b/ilot/peertube/peertube.openrc @@ -0,0 +1,34 @@ +#!/sbin/openrc-run + +name="$RC_SVCNAME" +cfgfile="/etc/conf.d/$RC_SVCNAME.conf" +pidfile="/run/$RC_SVCNAME.pid" +working_directory="/usr/share/webapps/peertube" +command="/usr/bin/node" +command_args="dist/server.js" +command_user="peertube" +command_group="peertube" +start_stop_daemon_args="" +command_background="yes" +output_log="/var/log/peertube/$RC_SVCNAME.log" + +depend() { + need redis + need postgresql +} + +start_pre() { + cd "$working_directory" + checkpath --directory --owner $command_user:$command_group --mode 0775 \ + /var/log/peertube \ + /var/lib/peertube \ + /var/tmp/peertube \ + /tmp/peertube + + export NODE_ENV NODE_CONFIG_DIR +} + +stop_pre() { + ebegin "Killing child processes" + kill $(ps -o pid= --ppid $(cat $pidfile)) || true +} diff --git a/ilot/peertube/peertube.post-install b/ilot/peertube/peertube.post-install new file mode 100755 index 0000000..a83bb10 --- /dev/null +++ b/ilot/peertube/peertube.post-install @@ -0,0 +1,41 @@ +#!/bin/sh +set -eu + +group=www-data +config_file='/etc/peertube/production.yaml' + +if grep '@@SECRET_KEY@@' "$config_file" >/dev/null; then + echo "* Generating random secret in $config_file" >&2 + + secret_key="$(openssl rand -hex 32)" + sed -i "s|@@SECRET_KEY@@|$secret_key|" "$config_file" +fi + +if [ "${0##*.}" = 'post-upgrade' ]; then + cat >&2 <<-EOF + * + * To finish Peertube upgrade run: + * + * + EOF +else + cat >&2 <<-EOF + * + * 1. Adjust settings in /etc/peertube/production.yaml + * + * 2. Create database for Peertube: + * + * psql -c "CREATE ROLE peertube PASSWORD 'top-secret' INHERIT LOGIN;" + * psql -c "CREATE DATABASE peertube OWNER peertube ENCODING 'UTF-8';" + * + * 3. Start Peertube + * + * service peertube start + * + * 4. Create admin user + * + * peertube-manage reset-password -- -u root + * + EOF +fi + diff --git a/ilot/peertube/peertube.post-upgrade b/ilot/peertube/peertube.post-upgrade new file mode 120000 index 0000000..2dd117d --- /dev/null +++ b/ilot/peertube/peertube.post-upgrade @@ -0,0 +1 @@ +peertube.post-install \ No newline at end of file diff --git a/ilot/peertube/peertube.pre-install b/ilot/peertube/peertube.pre-install new file mode 100755 index 0000000..2572d9c --- /dev/null +++ b/ilot/peertube/peertube.pre-install @@ -0,0 +1,25 @@ +#!/bin/sh + +DATADIR='/var/lib/peertube' + +if ! getent group peertube 1>/dev/null; then + echo '* Creating group peertube' 1>&2 + + addgroup -S peertube +fi + +if ! id peertube 2>/dev/null 1>&2; then + echo '* Creating user peertube' 1>&2 + + adduser -DHS -G peertube -h "$DATADIR" -s /bin/sh \ + -g "added by apk for peertube" peertube + passwd -u peertube 1>/dev/null # unlock +fi + +if ! id -Gn peertube | grep -Fq www-data; then + echo '* Adding user peertube to group www-data' 1>&2 + + addgroup peertube www-data +fi + +exit 0 From 732a6cc9da6abeb9f5aa4ac5c7f522cc17b4f1f9 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Fri, 9 Aug 2024 22:28:32 -0400 Subject: [PATCH 19/38] ilot/php82-pecl-inotify: new aport --- ilot/php82-pecl-inotify/APKBUILD | 35 ++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) create mode 100644 ilot/php82-pecl-inotify/APKBUILD diff --git a/ilot/php82-pecl-inotify/APKBUILD b/ilot/php82-pecl-inotify/APKBUILD new file mode 100644 index 0000000..44903a1 --- /dev/null +++ b/ilot/php82-pecl-inotify/APKBUILD @@ -0,0 +1,35 @@ +# Contributor: Fabio Ribeiro +# Maintainer: Andy Postnikov +pkgname=php82-pecl-inotify +_extname=inotify +pkgver=3.0.0 +pkgrel=0 +pkgdesc="Inotify bindings for PHP 8.3" +url="https://pecl.php.net/package/inotify" +arch="all" +license="PHP-3.01" +depends="php82-common" +makedepends="php82-dev" +source="php-pecl-$_extname-$pkgver.tgz::https://pecl.php.net/get/$_extname-$pkgver.tgz" +builddir="$srcdir"/$_extname-$pkgver + +build() { + phpize82 + ./configure --prefix=/usr --with-php-config=php-config82 + make +} + +check() { + make NO_INTERACTION=1 REPORT_EXIT_STATUS=1 test +} + +package() { + make INSTALL_ROOT="$pkgdir" install + local _confdir="$pkgdir"/etc/php82/conf.d + install -d $_confdir + echo "extension=$_extname" > $_confdir/70_$_extname.ini +} + +sha512sums=" +f8b29f8611f16b92136ab8de89181c254bba1abee1e61cac2344440567a3155aae4b9b54b10fdb1b0254fd7a96da8c14b7dc5c9f7f08a03db30ab1645aca1eee php-pecl-inotify-3.0.0.tgz +" From 1279f9642e80c34bad3a336f8e5b0b1a540e9474 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Fri, 9 Aug 2024 22:28:35 -0400 Subject: [PATCH 20/38] ilot/php83-pecl-inotify: new aport --- ilot/php83-pecl-inotify/APKBUILD | 35 ++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) create mode 100644 ilot/php83-pecl-inotify/APKBUILD diff --git a/ilot/php83-pecl-inotify/APKBUILD b/ilot/php83-pecl-inotify/APKBUILD new file mode 100644 index 0000000..771466f --- /dev/null +++ b/ilot/php83-pecl-inotify/APKBUILD @@ -0,0 +1,35 @@ +# Contributor: Fabio Ribeiro +# Maintainer: Andy Postnikov +pkgname=php83-pecl-inotify +_extname=inotify +pkgver=3.0.0 +pkgrel=0 +pkgdesc="Inotify bindings for PHP 8.3" +url="https://pecl.php.net/package/inotify" +arch="all" +license="PHP-3.01" +depends="php83-common" +makedepends="php83-dev" +source="php-pecl-$_extname-$pkgver.tgz::https://pecl.php.net/get/$_extname-$pkgver.tgz" +builddir="$srcdir"/$_extname-$pkgver + +build() { + phpize83 + ./configure --prefix=/usr --with-php-config=php-config83 + make +} + +check() { + make NO_INTERACTION=1 REPORT_EXIT_STATUS=1 test +} + +package() { + make INSTALL_ROOT="$pkgdir" install + local _confdir="$pkgdir"/etc/php83/conf.d + install -d $_confdir + echo "extension=$_extname" > $_confdir/70_$_extname.ini +} + +sha512sums=" +f8b29f8611f16b92136ab8de89181c254bba1abee1e61cac2344440567a3155aae4b9b54b10fdb1b0254fd7a96da8c14b7dc5c9f7f08a03db30ab1645aca1eee php-pecl-inotify-3.0.0.tgz +" From 3282acd59f6ceb85fa881c7635d1b5d71952a522 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Fri, 9 Aug 2024 22:28:37 -0400 Subject: [PATCH 21/38] ilot/py3-django-rest-framework: new aport --- ilot/py3-django-rest-framework/APKBUILD | 59 +++++++++++++++++++++++++ 1 file changed, 59 insertions(+) create mode 100644 ilot/py3-django-rest-framework/APKBUILD diff --git a/ilot/py3-django-rest-framework/APKBUILD b/ilot/py3-django-rest-framework/APKBUILD new file mode 100644 index 0000000..4a82cb3 --- /dev/null +++ b/ilot/py3-django-rest-framework/APKBUILD @@ -0,0 +1,59 @@ +# Contributor: Leonardo Arena +# Contributor: Justin Berthault +# Maintainer: Antoine Martin (ayakael) +pkgname=py3-django-rest-framework +_pkgname=django-rest-framework +pkgver=3.14.0 +pkgrel=0 +pkgdesc="Web APIs for Django" +url="https://github.com/encode/django-rest-framework" +arch="noarch" +license="Custom" +depends=" + py3-django + py3-tz +" +makedepends=" + py3-setuptools + py3-gpep517 + py3-wheel +" +checkdepends=" + py3-pytest-django + py3-pytest-cov + py3-core-api + py3-jinja2 + py3-uritemplate + py3-django-guardian + py3-psycopg2 + py3-markdown + py3-yaml + py3-inflection +" +subpackages="$pkgname-pyc" +source="$pkgname-$pkgver.tar.gz::https://github.com/encode/$_pkgname/archive/$pkgver.tar.gz" +options="!check" # Failing tests +builddir="$srcdir"/$_pkgname-$pkgver + +build() { + gpep517 build-wheel \ + --wheel-dir .dist \ + --output-fd 3 3>&1 >&2 +} + +check() { + python3 -m venv --clear --without-pip --system-site-packages .testenv + .testenv/bin/python3 -m installer "$builddir"/.dist/*.whl + # test_urlpatterns: AssertionError: assert [] is not [] + # test_markdown: rather hard to decipher assertion error + .testenv/bin/python3 -m pytest -v -k 'not test_urlpatterns and not test_markdown' +} + +package() { + python3 -m installer -d "$pkgdir" \ + .dist/*.whl +} + +sha512sums=" +c1012c656b427e0318b2056e2f984ddc75a5b4e85f375c76fba165ad06e285848eee1bc6dc76c097daec57d780efb2551110199d62ce636a03951aec13ab4013 py3-django-rest-framework-3.14.0.tar.gz +" From ee490115e4df7b478be9facf0cb92bf40e8d3cd9 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Fri, 9 Aug 2024 22:28:40 -0400 Subject: [PATCH 22/38] ilot/py3-django-tenants: new aport --- .../997_update-from-pgclone-schema.patch | 3823 +++++++++++++++++ ilot/py3-django-tenants/APKBUILD | 43 + 2 files changed, 3866 insertions(+) create mode 100644 ilot/py3-django-tenants/997_update-from-pgclone-schema.patch create mode 100644 ilot/py3-django-tenants/APKBUILD diff --git a/ilot/py3-django-tenants/997_update-from-pgclone-schema.patch b/ilot/py3-django-tenants/997_update-from-pgclone-schema.patch new file mode 100644 index 0000000..b2999d2 --- /dev/null +++ b/ilot/py3-django-tenants/997_update-from-pgclone-schema.patch @@ -0,0 +1,3823 @@ +From 07e14a3442d080bd4e873dc74e441296b8291ae2 Mon Sep 17 00:00:00 2001 +From: Marc 'risson' Schmitt +Date: Thu, 16 Nov 2023 13:26:16 +0100 +Subject: [PATCH 1/3] clone: update from pg-clone-schema + +Signed-off-by: Marc 'risson' Schmitt +--- + django_tenants/clone.py | 3407 ++++++++++++++++++++++++++++++++++----- + 1 file changed, 2977 insertions(+), 430 deletions(-) + +diff --git a/django_tenants/clone.py b/django_tenants/clone.py +index 426e81b8..3afce109 100644 +--- a/django_tenants/clone.py ++++ b/django_tenants/clone.py +@@ -6,24 +6,592 @@ + from django_tenants.utils import schema_exists + + CLONE_SCHEMA_FUNCTION = r""" +--- https://github.com/denishpatel/pg-clone-schema/ rev 0d3b522 ++-- https://github.com/denishpatel/pg-clone-schema/ rev 073922e + -- https://github.com/tomturner/django-tenants/issues/322 + +--- Function: clone_schema(text, text, boolean, boolean) ++do $$ ++<> ++DECLARE ++ cnt int; ++BEGIN ++ DROP TYPE IF EXISTS public.cloneparms CASCADE; ++ CREATE TYPE public.cloneparms AS ENUM ('DATA', 'NODATA','DDLONLY','NOOWNER','NOACL','VERBOSE','DEBUG','FILECOPY'); ++ -- END IF; ++end first_block $$; ++ ++ ++-- select * from public.get_insert_stmt_ddl('clone1','sample','address'); ++CREATE OR REPLACE FUNCTION public.get_insert_stmt_ddl( ++ source_schema text, ++ target_schema text, ++ atable text, ++ bTextCast boolean default False ++) ++RETURNS text ++LANGUAGE plpgsql VOLATILE ++AS ++$$ ++ DECLARE ++ -- the ddl we're building ++ v_insert_ddl text := ''; ++ v_cols text := ''; ++ v_cols_sel text := ''; ++ v_cnt int := 0; ++ v_colrec record; ++ v_schema text; ++ BEGIN ++ FOR v_colrec IN ++ SELECT c.column_name, c.data_type, c.udt_name, c.udt_schema, c.character_maximum_length, c.is_nullable, c.column_default, c.numeric_precision, c.numeric_scale, c.is_identity, c.identity_generation, c.is_generated ++ FROM information_schema.columns c WHERE (table_schema, table_name) = (source_schema, atable) ORDER BY ordinal_position ++ LOOP ++ IF v_colrec.udt_schema = 'public' THEN ++ v_schema = 'public'; ++ ELSE ++ v_schema = target_schema; ++ END IF; ++ ++ v_cnt = v_cnt + 1; ++ IF v_colrec.is_identity = 'YES' OR v_colrec.is_generated = 'ALWAYS' THEN ++ -- skip ++ continue; ++ END IF; ++ ++ IF v_colrec.data_type = 'USER-DEFINED' THEN ++ IF v_cols = '' THEN ++ v_cols = v_colrec.column_name; ++ IF bTextCast THEN ++ -- v_cols_sel = v_colrec.column_name || '::text::' || v_schema || '.' || v_colrec.udt_name; ++ IF v_schema = 'public' THEN ++ v_cols_sel = v_colrec.column_name || '::' || v_schema || '.' || v_colrec.udt_name; ++ ELSE ++ v_cols_sel = v_colrec.column_name || '::text::' || v_colrec.udt_name; ++ END IF; ++ ELSE ++ v_cols_sel = v_colrec.column_name || '::' || v_schema || '.' || v_colrec.udt_name; ++ END IF; ++ ELSE ++ v_cols = v_cols || ', ' || v_colrec.column_name; ++ IF bTextCast THEN ++ -- v_cols_sel = v_cols_sel || ', ' || v_colrec.column_name || '::text::' || v_schema || '.' || v_colrec.udt_name; ++ IF v_schema = 'public' THEN ++ v_cols_sel = v_cols_sel || ', ' || v_colrec.column_name || '::' || v_schema || '.' || v_colrec.udt_name; ++ ELSE ++ v_cols_sel = v_cols_sel || ', ' || v_colrec.column_name || '::text::' || v_colrec.udt_name; ++ END IF; ++ ELSE ++ v_cols_sel = v_cols_sel || ', ' || v_colrec.column_name || '::' || v_schema || '.' || v_colrec.udt_name; ++ END IF; ++ END IF; ++ ELSE ++ IF v_cols = '' THEN ++ v_cols = v_colrec.column_name; ++ v_cols_sel = v_colrec.column_name; ++ ELSE ++ v_cols = v_cols || ', ' || v_colrec.column_name; ++ v_cols_sel = v_cols_sel || ', ' || v_colrec.column_name; ++ END IF; ++ END IF; ++ END LOOP; ++ ++ -- put it all together and return the insert statement ++ -- INSERT INTO clone1.address2 (id2, id3, addr) SELECT id2::text::clone1.udt_myint, id3::text::clone1.udt_myint, addr FROM sample.address; ++ v_insert_ddl = 'INSERT INTO ' || target_schema || '.' || atable || ' (' || v_cols || ') ' || 'SELECT ' || v_cols_sel || ' FROM ' || source_schema || '.' || atable || ';'; ++ RETURN v_insert_ddl; ++ END; ++$$; ++ ++ ++CREATE OR REPLACE FUNCTION public.get_table_ddl_complex( ++ src_schema text, ++ dst_schema text, ++ in_table text, ++ sq_server_version_num integer ++) ++RETURNS text ++LANGUAGE plpgsql VOLATILE ++AS ++$$ ++ DECLARE ++ v_table_ddl text; ++ v_buffer1 text; ++ v_buffer2 text; ++ ++ BEGIN ++ IF sq_server_version_num < 110000 THEN ++ SELECT 'CREATE TABLE ' ++ || quote_ident(dst_schema) ++ || '.' ++ || pc.relname ++ || E'(\n' ++ || string_agg( ++ pa.attname ++ || ' ' ++ || pg_catalog.format_type(pa.atttypid, pa.atttypmod) ++ || coalesce( ++ ' DEFAULT ' ++ || ( ++ SELECT pg_catalog.pg_get_expr(d.adbin, d.adrelid) ++ FROM pg_catalog.pg_attrdef d ++ WHERE d.adrelid = pa.attrelid ++ AND d.adnum = pa.attnum ++ AND pa.atthasdef ++ ), ++ '' ++ ) ++ || ' ' ++ || CASE pa.attnotnull ++ WHEN TRUE THEN 'NOT NULL' ++ ELSE 'NULL' ++ END, ++ E',\n' ++ ) ++ || coalesce( ++ ( ++ SELECT ++ E',\n' ++ || string_agg( ++ 'CONSTRAINT ' ++ || pc1.conname ++ || ' ' ++ || pg_get_constraintdef(pc1.oid), ++ E',\n' ++ ORDER BY pc1.conindid ++ ) ++ FROM pg_constraint pc1 ++ --Issue#103: do not return FKEYS for partitions since we assume it is implied by the one done on the parent table, otherwise error for trying to define it again. ++ WHERE pc1.conrelid = pa.attrelid ++ ), ++ '' ++ ) ++ INTO v_buffer1 ++ FROM pg_catalog.pg_attribute pa ++ JOIN pg_catalog.pg_class pc ON pc.oid = pa.attrelid ++ AND pc.relname = quote_ident(in_table) ++ JOIN pg_catalog.pg_namespace pn ON pn.oid = pc.relnamespace ++ AND pn.nspname = quote_ident(src_schema) ++ WHERE pa.attnum > 0 ++ AND NOT pa.attisdropped ++ GROUP BY pn.nspname, pc.relname, pa.attrelid; ++ ++ ELSE ++ SELECT 'CREATE TABLE ' ++ || quote_ident(dst_schema) ++ || '.' ++ || pc.relname ++ || E'(\n' ++ || string_agg( ++ pa.attname ++ || ' ' ++ || pg_catalog.format_type(pa.atttypid, pa.atttypmod) ++ || coalesce( ++ ' DEFAULT ' ++ || ( ++ SELECT pg_catalog.pg_get_expr(d.adbin, d.adrelid) ++ FROM pg_catalog.pg_attrdef d ++ WHERE d.adrelid = pa.attrelid ++ AND d.adnum = pa.attnum ++ AND pa.atthasdef ++ ), ++ '' ++ ) ++ || ' ' ++ || CASE pa.attnotnull ++ WHEN TRUE THEN 'NOT NULL' ++ ELSE 'NULL' ++ END, ++ E',\n' ++ ) ++ || coalesce( ++ ( ++ SELECT ++ E',\n' ++ || string_agg( ++ 'CONSTRAINT ' ++ || pc1.conname ++ || ' ' ++ || pg_get_constraintdef(pc1.oid), ++ E',\n' ++ ORDER BY pc1.conindid ++ ) ++ FROM pg_constraint pc1 ++ --Issue#103: do not return FKEYS for partitions since we assume it is implied by the one done on the parent table, otherwise error for trying to define it again. ++ WHERE pc1.conrelid = pa.attrelid AND pc1.conparentid = 0 ++ ), ++ '' ++ ) ++ INTO v_buffer1 ++ FROM pg_catalog.pg_attribute pa ++ JOIN pg_catalog.pg_class pc ON pc.oid = pa.attrelid ++ AND pc.relname = quote_ident(in_table) ++ JOIN pg_catalog.pg_namespace pn ON pn.oid = pc.relnamespace ++ AND pn.nspname = quote_ident(src_schema) ++ WHERE pa.attnum > 0 ++ AND NOT pa.attisdropped ++ GROUP BY pn.nspname, pc.relname, pa.attrelid; ++ END IF; ++ ++ -- append partition keyword to it ++ SELECT pg_catalog.pg_get_partkeydef(c.oid::pg_catalog.oid) into v_buffer2 ++ FROM pg_catalog.pg_class c LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace ++ WHERE c.relname = quote_ident(in_table) COLLATE pg_catalog.default AND n.nspname = quote_ident(src_schema) COLLATE pg_catalog.default; ++ ++ v_table_ddl := v_buffer1 || ') PARTITION BY ' || v_buffer2 || ';'; ++ ++ RETURN v_table_ddl; ++ END; ++$$; ++ ++ ++-- SELECT * FROM public.get_table_ddl('sample', 'address', True); ++CREATE OR REPLACE FUNCTION public.get_table_ddl( ++ in_schema varchar, ++ in_table varchar, ++ bfkeys boolean ++) ++RETURNS text ++LANGUAGE plpgsql VOLATILE ++AS ++$$ ++ DECLARE ++ -- the ddl we're building ++ v_table_ddl text; ++ ++ -- data about the target table ++ v_table_oid int; ++ ++ -- records for looping ++ v_colrec record; ++ v_constraintrec record; ++ v_indexrec record; ++ v_primary boolean := False; ++ v_constraint_name text; ++ v_src_path_old text := ''; ++ v_src_path_new text := ''; ++ v_dummy text; ++ v_partbound text; ++ v_pgversion int; ++ v_parent text := ''; ++ v_relopts text := ''; ++ v_tablespace text; ++ v_partition_key text := ''; ++ v_temp text; ++ bPartitioned bool := False; ++ bInheritance bool := False; ++ bRelispartition bool; ++ constraintarr text[] := '{{}}'; ++ constraintelement text; ++ bSkip boolean; ++ ++ BEGIN ++ SELECT c.oid, ( ++ SELECT setting ++ FROM pg_settings ++ WHERE name = 'server_version_num') INTO v_table_oid, v_pgversion ++ FROM pg_catalog.pg_class c ++ LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace ++ WHERE c.relkind IN ('r', 'p') ++ AND c.relname = in_table ++ AND n.nspname = in_schema; ++ IF (v_table_oid IS NULL) THEN ++ RAISE EXCEPTION 'table does not exist'; ++ END IF; ++ ++ -- get user-defined tablespaces if applicable ++ SELECT TABLESPACE INTO v_temp ++ FROM pg_tables ++ WHERE schemaname = in_schema ++ AND tablename = in_table ++ AND TABLESPACE IS NOT NULL; ++ -- Issue#99 Fix: simple coding error! ++ -- IF v_tablespace IS NULL THEN ++ IF v_temp IS NULL THEN ++ v_tablespace := 'TABLESPACE pg_default'; ++ ELSE ++ v_tablespace := 'TABLESPACE ' || v_temp; ++ END IF; ++ -- also see if there are any SET commands for this table, ie, autovacuum_enabled=off, fillfactor=70 ++ WITH relopts AS ( ++ SELECT unnest(c.reloptions) relopts ++ FROM pg_class c, pg_namespace n ++ WHERE n.nspname = in_schema ++ AND n.oid = c.relnamespace ++ AND c.relname = in_table ++ ) ++ SELECT string_agg(r.relopts, ', ') AS relopts INTO v_temp ++ FROM relopts r; ++ IF v_temp IS NULL THEN ++ v_relopts := ''; ++ ELSE ++ v_relopts := ' WITH (' || v_temp || ')'; ++ END IF; ++ ++ -- Issue#61 FIX: set search_path = public before we do anything to force explicit schema qualification but dont forget to set it back before exiting... ++ SELECT setting INTO v_src_path_old FROM pg_settings WHERE name = 'search_path'; ++ ++ SELECT REPLACE(REPLACE(setting, '"$user"', '$user'), '$user', '"$user"') INTO v_src_path_old ++ FROM pg_settings ++ WHERE name = 'search_path'; ++ -- RAISE INFO 'DEBUG tableddl: saving old search_path: ***%***', v_src_path_old; ++ EXECUTE 'SET search_path = "public"'; ++ SELECT setting INTO v_src_path_new FROM pg_settings WHERE name = 'search_path'; ++ ++ -- grab the oid of the table; https://www.postgresql.org/docs/8.3/catalog-pg-class.html ++ SELECT c.oid INTO v_table_oid ++ FROM pg_catalog.pg_class c ++ LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace ++ WHERE 1 = 1 ++ AND c.relkind = 'r' ++ AND c.relname = in_table ++ AND n.nspname = in_schema; ++ ++ IF (v_table_oid IS NULL) THEN ++ -- Dont give up yet. It might be a partitioned table ++ SELECT c.oid INTO v_table_oid ++ FROM pg_catalog.pg_class c ++ LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace ++ WHERE 1 = 1 ++ AND c.relkind = 'p' ++ AND c.relname = in_table ++ AND n.nspname = in_schema; ++ ++ IF (v_table_oid IS NULL) THEN ++ RAISE EXCEPTION 'table does not exist'; ++ END IF; ++ bPartitioned := True; ++ END IF; ++ IF v_pgversion < 100000 THEN ++ SELECT c2.relname parent INTO v_parent ++ FROM pg_class c1, pg_namespace n, pg_inherits i, pg_class c2 ++ WHERE n.nspname = in_schema ++ AND n.oid = c1.relnamespace ++ AND c1.relname = in_table ++ AND c1.oid = i.inhrelid ++ AND i.inhparent = c2.oid ++ AND c1.relkind = 'r'; ++ ++ IF (v_parent IS NOT NULL) THEN ++ bPartitioned := True; ++ bInheritance := True; ++ END IF; ++ ELSE ++ SELECT c2.relname parent, c1.relispartition, pg_get_expr(c1.relpartbound, c1.oid, TRUE) INTO v_parent, bRelispartition, v_partbound ++ FROM pg_class c1, pg_namespace n, pg_inherits i, pg_class c2 ++ WHERE n.nspname = in_schema ++ AND n.oid = c1.relnamespace ++ AND c1.relname = in_table ++ AND c1.oid = i.inhrelid ++ AND i.inhparent = c2.oid ++ AND c1.relkind = 'r'; ++ ++ IF (v_parent IS NOT NULL) THEN ++ bPartitioned := True; ++ IF bRelispartition THEN ++ bInheritance := False; ++ ELSE ++ bInheritance := True; ++ END IF; ++ END IF; ++ END IF; ++ -- RAISE NOTICE 'version=% schema=% parent=% relopts=% tablespace=% partitioned=% inherited=% relispartition=%',v_pgversion, in_schema, v_parent, v_relopts, v_tablespace, bPartitioned, bInheritance, bRelispartition; ++ ++ -- start the create definition ++ v_table_ddl := 'CREATE TABLE ' || in_schema || '.' || in_table || ' (' || E'\n'; ++ ++ -- define all of the columns in the table; https://stackoverflow.com/a/8153081/3068233 ++ FOR v_colrec IN ++ SELECT c.column_name, c.data_type, c.udt_name, c.udt_schema, c.character_maximum_length, c.is_nullable, c.column_default, c.numeric_precision, c.numeric_scale, c.is_identity, c.identity_generation ++ FROM information_schema.columns c ++ WHERE (table_schema, table_name) = (in_schema, in_table) ++ ORDER BY ordinal_position ++ LOOP ++ v_table_ddl := v_table_ddl || ' ' -- note: two char spacer to start, to indent the column ++ || v_colrec.column_name || ' ' ++ -- FIX #82, FIX #100 as well by adding 'citext' to the list ++ -- FIX #105 by overriding the previous fixes (#82, #100), which presumed "public" was always the schema for extensions. It could be a custom schema. ++ -- so assume udt_schema for all USER-DEFINED datatypes ++ -- || CASE WHEN v_colrec.udt_name in ('geometry', 'box2d', 'box2df', 'box3d', 'geography', 'geometry_dump', 'gidx', 'spheroid', 'valid_detail','citext') ++ -- THEN v_colrec.udt_name ++ || CASE WHEN v_colrec.data_type = 'USER-DEFINED' ++ -- THEN in_schema || '.' || v_colrec.udt_name ELSE v_colrec.data_type END ++ THEN v_colrec.udt_schema || '.' || v_colrec.udt_name ELSE v_colrec.data_type END ++ || CASE WHEN v_colrec.is_identity = 'YES' ++ THEN ++ CASE WHEN v_colrec.identity_generation = 'ALWAYS' ++ THEN ' GENERATED ALWAYS AS IDENTITY' ELSE ' GENERATED BY DEFAULT AS IDENTITY' END ELSE '' END ++ || CASE WHEN v_colrec.character_maximum_length IS NOT NULL ++ THEN ('(' || v_colrec.character_maximum_length || ')') ++ WHEN v_colrec.numeric_precision > 0 AND v_colrec.numeric_scale > 0 ++ THEN '(' || v_colrec.numeric_precision || ',' || v_colrec.numeric_scale || ')' ++ ELSE '' END || ' ' ++ || CASE WHEN v_colrec.is_nullable = 'NO' ++ THEN 'NOT NULL' ELSE 'NULL' END ++ || CASE WHEN v_colrec.column_default IS NOT null ++ THEN (' DEFAULT ' || v_colrec.column_default) ELSE '' END ++ || ',' || E'\n'; ++ END LOOP; ++ -- define all the constraints in the; https://www.postgresql.org/docs/9.1/catalog-pg-constraint.html && https://dba.stackexchange.com/a/214877/75296 ++ -- Issue#103: do not get foreign keys for partitions since they are defined on the parent and this will cause an "already exists" error otherwise ++ -- Also conparentid is not in V10, so bypass since we do not have FKEYS in partitioned tables in V10 ++ IF v_pgversion < 110000 THEN ++ FOR v_constraintrec IN ++ SELECT ++ con.conname as constraint_name, ++ con.contype as constraint_type, ++ CASE ++ WHEN con.contype = 'p' THEN 1 -- primary key constraint ++ WHEN con.contype = 'u' THEN 2 -- unique constraint ++ WHEN con.contype = 'f' THEN 3 -- foreign key constraint ++ WHEN con.contype = 'c' THEN 4 ++ ELSE 5 ++ END as type_rank, ++ pg_get_constraintdef(con.oid) as constraint_definition ++ FROM pg_catalog.pg_constraint con ++ JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid ++ JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace ++ WHERE nsp.nspname = in_schema ++ AND rel.relname = in_table ++ ORDER BY type_rank ++ LOOP ++ -- Issue#85 fix ++ -- constraintarr := constraintarr || v_constraintrec.constraint_name; ++ constraintarr := constraintarr || v_constraintrec.constraint_name::text; ++ IF v_constraintrec.type_rank = 1 THEN ++ v_primary := True; ++ v_constraint_name := v_constraintrec.constraint_name; ++ END IF; ++ IF NOT bfkeys AND v_constraintrec.constraint_type = 'f' THEN ++ continue; ++ END IF; ++ v_table_ddl := v_table_ddl || ' ' -- note: two char spacer to start, to indent the column ++ || 'CONSTRAINT' || ' ' ++ || v_constraintrec.constraint_name || ' ' ++ || v_constraintrec.constraint_definition ++ || ',' || E'\n'; ++ END LOOP; ++ ELSE ++ FOR v_constraintrec IN ++ SELECT ++ con.conname as constraint_name, ++ con.contype as constraint_type, ++ CASE ++ WHEN con.contype = 'p' THEN 1 -- primary key constraint ++ WHEN con.contype = 'u' THEN 2 -- unique constraint ++ WHEN con.contype = 'f' THEN 3 -- foreign key constraint ++ WHEN con.contype = 'c' THEN 4 ++ ELSE 5 ++ END as type_rank, ++ pg_get_constraintdef(con.oid) as constraint_definition ++ FROM pg_catalog.pg_constraint con ++ JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid ++ JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace ++ WHERE nsp.nspname = in_schema ++ AND rel.relname = in_table ++ -- Issue#103: do not get partitioned tables ++ AND con.conparentid = 0 ++ ORDER BY type_rank ++ LOOP ++ -- Issue#85 fix ++ -- constraintarr := constraintarr || v_constraintrec.constraint_name; ++ constraintarr := constraintarr || v_constraintrec.constraint_name::text; ++ IF v_constraintrec.type_rank = 1 THEN ++ v_primary := True; ++ v_constraint_name := v_constraintrec.constraint_name; ++ END IF; ++ IF NOT bfkeys AND v_constraintrec.constraint_type = 'f' THEN ++ continue; ++ END IF; ++ v_table_ddl := v_table_ddl || ' ' -- note: two char spacer to start, to indent the column ++ || 'CONSTRAINT' || ' ' ++ || v_constraintrec.constraint_name || ' ' ++ || v_constraintrec.constraint_definition ++ || ',' || E'\n'; ++ END LOOP; ++ END IF; ++ ++ -- drop the last comma before ending the create statement ++ v_table_ddl = substr(v_table_ddl, 0, length(v_table_ddl) - 1) || E'\n'; ++ -- end the create table def but add inherits clause if valid ++ IF bPartitioned and bInheritance THEN ++ v_table_ddl := v_table_ddl || ') INHERITS (' || in_schema || '.' || v_parent || ') ' || v_relopts || ' ' || v_tablespace || ';' || E'\n'; ++ ELSIF v_pgversion >= 100000 AND bPartitioned and NOT bInheritance THEN ++ -- See if this is a partitioned table (pg_class.relkind = 'p') and add the partitioned key ++ SELECT pg_get_partkeydef (c1.oid) AS partition_key INTO v_partition_key ++ FROM pg_class c1 ++ JOIN pg_namespace n ON (n.oid = c1.relnamespace) ++ LEFT JOIN pg_partitioned_table p ON (c1.oid = p.partrelid) ++ WHERE n.nspname = in_schema ++ AND n.oid = c1.relnamespace ++ AND c1.relname = in_table ++ AND c1.relkind = 'p'; ++ END IF; ++ IF v_partition_key IS NOT NULL AND v_partition_key <> '' THEN ++ -- add partition clause ++ -- NOTE: cannot specify default tablespace for partitioned relations ++ v_table_ddl := v_table_ddl || ') PARTITION BY ' || v_partition_key || ';' || E'\n'; ++ ELSIF bPartitioned AND not bInheritance THEN ++ IF v_relopts <> '' THEN ++ v_table_ddl := 'CREATE TABLE ' || in_schema || '.' || in_table || ' PARTITION OF ' || in_schema || '.' || v_parent || ' ' || v_partbound || v_relopts || ' ' || v_tablespace || '; ' || E'\n'; ++ ELSE ++ v_table_ddl := 'CREATE TABLE ' || in_schema || '.' || in_table || ' PARTITION OF ' || in_schema || '.' || v_parent || ' ' || v_partbound || ' ' || v_tablespace || '; ' || E'\n'; ++ END IF; ++ ELSIF bPartitioned and bInheritance THEN ++ -- we already did this above ++ v_table_ddl := v_table_ddl; ++ ELSIF v_relopts <> '' THEN ++ v_table_ddl := v_table_ddl || ') ' || v_relopts || ' ' || v_tablespace || ';' || E'\n'; ++ ELSE ++ v_table_ddl := v_table_ddl || ') ' || v_tablespace || ';' || E'\n'; ++ END IF; ++ -- suffix create statement with all of the indexes on the table ++ FOR v_indexrec IN ++ SELECT indexdef, indexname ++ FROM pg_indexes ++ WHERE (schemaname, tablename) = (in_schema, in_table) ++ LOOP ++ -- Issue#83 fix: loop through constraints and skip ones already defined ++ bSkip = False; ++ FOREACH constraintelement IN ARRAY constraintarr ++ LOOP ++ IF constraintelement = v_indexrec.indexname THEN ++ bSkip = True; ++ EXIT; ++ END IF; ++ END LOOP; ++ if bSkip THEN CONTINUE; END IF; ++ v_table_ddl := v_table_ddl ++ || v_indexrec.indexdef ++ || ';' || E'\n'; ++ END LOOP; ++ ++ -- reset search_path back to what it was ++ IF v_src_path_old = '' THEN ++ SELECT set_config('search_path', '', false) into v_dummy; ++ ELSE ++ EXECUTE 'SET search_path = ' || v_src_path_old; ++ END IF; ++ -- RAISE NOTICE 'DEBUG tableddl: reset search_path back to ***%***', v_src_path_old; ++ ++ -- return the ddl ++ RETURN v_table_ddl; ++ END; ++$$; + +--- DROP FUNCTION clone_schema(text, text, boolean, boolean); + ++-- Function: clone_schema(text, text, boolean, boolean, boolean) ++-- DROP FUNCTION clone_schema(text, text, boolean, boolean, boolean); ++-- DROP FUNCTION IF EXISTS public.clone_schema(text, text, boolean, boolean); ++ ++DROP FUNCTION IF EXISTS public.clone_schema(text, text, cloneparms[]); + CREATE OR REPLACE FUNCTION public.clone_schema( + source_schema text, + dest_schema text, +- include_recs boolean, +- ddl_only boolean) ++ VARIADIC arr public.cloneparms[] DEFAULT '{{}}':: public.cloneparms[]) + RETURNS void AS + $BODY$ + + -- This function will clone all sequences, tables, data, views & functions from any existing schema to a new one + -- SAMPLE CALL: +--- SELECT clone_schema('public', 'new_schema', True, False); ++-- SELECT clone_schema('sample', 'sample_clone2'); + + DECLARE + src_oid oid; +@@ -32,20 +600,37 @@ + object text; + buffer text; + buffer2 text; ++ buffer3 text; + srctbl text; ++ aname text; + default_ text; + column_ text; + qry text; + ix_old_name text; + ix_new_name text; ++ relpersist text; ++ udt_name text; ++ udt_schema text; ++ bRelispart bool; ++ bChild bool; ++ relknd text; ++ data_type text; ++ ocomment text; ++ adef text; + dest_qry text; + v_def text; ++ part_range text; + src_path_old text; ++ src_path_new text; + aclstr text; ++ -- issue#80 initialize arrays properly ++ tblarray text[] := '{{}}'; ++ tblarray2 text[] := '{{}}'; ++ tblarray3 text[] := '{{}}'; ++ tblelement text; + grantor text; + grantee text; + privs text; +- records_count bigint; + seqval bigint; + sq_last_value bigint; + sq_max_value bigint; +@@ -53,16 +638,28 @@ + sq_increment_by bigint; + sq_min_value bigint; + sq_cache_value bigint; +- sq_is_called boolean; ++ sq_is_called boolean := True; + sq_is_cycled boolean; ++ is_prokind boolean; ++ abool boolean; + sq_data_type text; + sq_cycled char(10); ++ sq_owned text; ++ sq_version text; ++ sq_server_version text; ++ sq_server_version_num integer; ++ bWindows boolean; + arec RECORD; + cnt integer; ++ cnt1 integer; + cnt2 integer; +- seq_cnt integer; ++ cnt3 integer; ++ cnt4 integer; + pos integer; ++ tblscopied integer := 0; ++ l_child integer; + action text := 'N/A'; ++ tblname text; + v_ret text; + v_diag1 text; + v_diag2 text; +@@ -70,48 +667,209 @@ + v_diag4 text; + v_diag5 text; + v_diag6 text; ++ v_dummy text; ++ spath text; ++ spath_tmp text; ++ -- issue#86 fix ++ isGenerated text; ++ ++ -- issue#91 fix ++ tblowner text; ++ func_owner text; ++ func_name text; ++ func_args text; ++ func_argno integer; ++ view_owner text; ++ ++ -- issue#92 ++ calleruser text; ++ ++ -- issue#94 ++ bData boolean := False; ++ bDDLOnly boolean := False; ++ bVerbose boolean := False; ++ bDebug boolean := False; ++ bNoACL boolean := False; ++ bNoOwner boolean := False; ++ arglen integer; ++ vargs text; ++ avarg public.cloneparms; ++ ++ -- issue#98 ++ mvarray text[] := '{{}}'; ++ mvscopied integer := 0; ++ ++ -- issue#99 tablespaces ++ tblspace text; ++ ++ -- issue#101 ++ bFileCopy boolean := False; ++ ++ t timestamptz := clock_timestamp(); ++ r timestamptz; ++ s timestamptz; ++ lastsql text := ''; ++ v_version text := '1.19 September 07, 2023'; + + BEGIN ++ -- Make sure NOTICE are shown ++ SET client_min_messages = 'notice'; ++ RAISE NOTICE 'clone_schema version %', v_version; ++ ++ IF 'DEBUG' = ANY ($3) THEN bDebug = True; END IF; ++ IF 'VERBOSE' = ANY ($3) THEN bVerbose = True; END IF; ++ ++ -- IF bVerbose THEN RAISE NOTICE 'START: %',clock_timestamp() - t; END IF; ++ ++ arglen := array_length($3, 1); ++ IF arglen IS NULL THEN ++ -- nothing to do, so defaults are assumed ++ NULL; ++ ELSE ++ -- loop thru args ++ -- IF 'NO_TRIGGERS' = ANY ($3) ++ -- select array_to_string($3, ',', '***') INTO vargs; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: arguments=%', $3; END IF; ++ FOREACH avarg IN ARRAY $3 LOOP ++ IF bDebug THEN RAISE NOTICE 'DEBUG: arg=%', avarg; END IF; ++ IF avarg = 'DATA' THEN ++ bData = True; ++ ELSEIF avarg = 'NODATA' THEN ++ -- already set to that by default ++ bData = False; ++ ELSEIF avarg = 'DDLONLY' THEN ++ bDDLOnly = True; ++ ELSEIF avarg = 'NOACL' THEN ++ bNoACL = True; ++ ELSEIF avarg = 'NOOWNER' THEN ++ bNoOwner = True; ++ -- issue#101 fix ++ ELSEIF avarg = 'FILECOPY' THEN ++ bFileCopy = True; ++ END IF; ++ END LOOP; ++ IF bData and bDDLOnly THEN ++ RAISE WARNING 'You can only specify DDLONLY or DATA, but not both.'; ++ RETURN; ++ END IF; ++ END IF; ++ ++ -- Get server version info to handle certain things differently based on the version. ++ SELECT setting INTO sq_server_version ++ FROM pg_settings ++ WHERE name = 'server_version'; ++ SELECT version() INTO sq_version; ++ ++ IF POSITION('compiled by Visual C++' IN sq_version) > 0 THEN ++ bWindows = True; ++ RAISE NOTICE 'Windows: %', sq_version; ++ ELSE ++ bWindows = False; ++ RAISE NOTICE 'Linux: %', sq_version; ++ END IF; ++ SELECT setting INTO sq_server_version_num ++ FROM pg_settings ++ WHERE name = 'server_version_num'; ++ ++ IF sq_server_version_num < 100000 THEN ++ IF sq_server_version_num > 90600 THEN ++ RAISE WARNING 'Server Version:% Number:% PG Versions older than v10 are not supported. Will try however for PG 9.6...', sq_server_version, sq_server_version_num; ++ ELSE ++ RAISE WARNING 'Server Version:% Number:% PG Versions older than v10 are not supported. You need to be at minimum version 9.6 to at least try', sq_server_version, sq_server_version_num; ++ RETURN; ++ END IF; ++ END IF; + + -- Check that source_schema exists + SELECT oid INTO src_oid +- FROM pg_namespace +- WHERE nspname = quote_ident(source_schema); ++ FROM pg_namespace ++ WHERE nspname = quote_ident(source_schema); ++ + IF NOT FOUND + THEN +- RAISE NOTICE 'source schema % does not exist!', source_schema; ++ RAISE NOTICE ' source schema % does not exist!', source_schema; + RETURN ; + END IF; + ++ -- Check for case-sensitive target schemas and reject them for now. ++ SELECT lower(dest_schema) = dest_schema INTO abool; ++ IF not abool THEN ++ RAISE NOTICE 'Case-sensitive target schemas are not supported at this time.'; ++ RETURN; ++ END IF; ++ + -- Check that dest_schema does not yet exist + PERFORM nspname +- FROM pg_namespace +- WHERE nspname = quote_ident(dest_schema); ++ FROM pg_namespace ++ WHERE nspname = quote_ident(dest_schema); ++ + IF FOUND + THEN +- RAISE NOTICE 'dest schema % already exists!', dest_schema; ++ RAISE NOTICE ' dest schema % already exists!', dest_schema; + RETURN ; + END IF; +- IF ddl_only and include_recs THEN ++ IF bDDLOnly and bData THEN + RAISE WARNING 'You cannot specify to clone data and generate ddl at the same time.'; + RETURN ; + END IF; + ++ -- Issue#92 ++ SELECT current_user into calleruser; ++ + -- Set the search_path to source schema. Before exiting set it back to what it was before. +- SELECT setting INTO src_path_old FROM pg_settings WHERE name='search_path'; ++ -- In order to avoid issues with the special schema name "$user" that may be ++ -- returned unquoted by some applications, we ensure it remains double quoted. ++ -- MJV FIX: #47 ++ SELECT setting INTO v_dummy FROM pg_settings WHERE name='search_path'; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: search_path=%', v_dummy; END IF; ++ ++ SELECT REPLACE(REPLACE(setting, '"$user"', '$user'), '$user', '"$user"') INTO src_path_old ++ FROM pg_settings WHERE name = 'search_path'; ++ ++ IF bDebug THEN RAISE NOTICE 'DEBUG: src_path_old=%', src_path_old; END IF; ++ + EXECUTE 'SET search_path = ' || quote_ident(source_schema) ; +- -- RAISE NOTICE 'Using source search_path=%', buffer; ++ SELECT setting INTO src_path_new FROM pg_settings WHERE name='search_path'; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: new search_path=%', src_path_new; END IF; + + -- Validate required types exist. If not, create them. +- select a.objtypecnt, b.permtypecnt INTO cnt, cnt2 FROM +- (SELECT count(*) as objtypecnt FROM pg_catalog.pg_type t LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace +- WHERE (t.typrelid = 0 OR (SELECT c.relkind = 'c' FROM pg_catalog.pg_class c WHERE c.oid = t.typrelid)) +- AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type el WHERE el.oid = t.typelem AND el.typarray = t.oid) +- AND n.nspname <> 'pg_catalog' AND n.nspname <> 'information_schema' AND pg_catalog.pg_type_is_visible(t.oid) AND pg_catalog.format_type(t.oid, NULL) = 'obj_type') a, +- (SELECT count(*) as permtypecnt FROM pg_catalog.pg_type t LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace +- WHERE (t.typrelid = 0 OR (SELECT c.relkind = 'c' FROM pg_catalog.pg_class c WHERE c.oid = t.typrelid)) +- AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type el WHERE el.oid = t.typelem AND el.typarray = t.oid) +- AND n.nspname <> 'pg_catalog' AND n.nspname <> 'information_schema' AND pg_catalog.pg_type_is_visible(t.oid) AND pg_catalog.format_type(t.oid, NULL) = 'perm_type') b; ++ SELECT a.objtypecnt, b.permtypecnt INTO cnt, cnt2 ++ FROM ( ++ SELECT count(*) AS objtypecnt ++ FROM pg_catalog.pg_type t ++ LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace ++ WHERE (t.typrelid = 0 ++ OR ( ++ SELECT c.relkind = 'c' ++ FROM pg_catalog.pg_class c ++ WHERE c.oid = t.typrelid)) ++ AND NOT EXISTS ( ++ SELECT 1 ++ FROM pg_catalog.pg_type el ++ WHERE el.oid = t.typelem ++ AND el.typarray = t.oid) ++ AND n.nspname <> 'pg_catalog' ++ AND n.nspname <> 'information_schema' ++ AND pg_catalog.pg_type_is_visible(t.oid) ++ AND pg_catalog.format_type(t.oid, NULL) = 'obj_type') a, ( ++ SELECT count(*) AS permtypecnt ++ FROM pg_catalog.pg_type t ++ LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace ++ WHERE (t.typrelid = 0 ++ OR ( ++ SELECT c.relkind = 'c' ++ FROM pg_catalog.pg_class c ++ WHERE c.oid = t.typrelid)) ++ AND NOT EXISTS ( ++ SELECT 1 ++ FROM pg_catalog.pg_type el ++ WHERE el.oid = t.typelem ++ AND el.typarray = t.oid) ++ AND n.nspname <> 'pg_catalog' ++ AND n.nspname <> 'information_schema' ++ AND pg_catalog.pg_type_is_visible(t.oid) ++ AND pg_catalog.format_type(t.oid, NULL) = 'perm_type') b; ++ + IF cnt = 0 THEN + CREATE TYPE obj_type AS ENUM ('TABLE','VIEW','COLUMN','SEQUENCE','FUNCTION','SCHEMA','DATABASE'); + END IF; +@@ -119,53 +877,148 @@ + CREATE TYPE perm_type AS ENUM ('SELECT','INSERT','UPDATE','DELETE','TRUNCATE','REFERENCES','TRIGGER','USAGE','CREATE','EXECUTE','CONNECT','TEMPORARY'); + END IF; + +- IF ddl_only THEN +- RAISE NOTICE 'Only generating DDL, not actually creating anything...'; ++ -- Issue#95 ++ SELECT pg_catalog.pg_get_userbyid(nspowner) INTO buffer FROM pg_namespace WHERE nspname = quote_ident(source_schema); ++ ++ IF bDDLOnly THEN ++ RAISE NOTICE ' Only generating DDL, not actually creating anything...'; ++ -- issue#95 ++ IF bNoOwner THEN ++ RAISE INFO 'CREATE SCHEMA %;', quote_ident(dest_schema); ++ ELSE ++ RAISE INFO 'CREATE SCHEMA % AUTHORIZATION %;', quote_ident(dest_schema), buffer; ++ END IF; ++ RAISE NOTICE 'SET search_path=%;', quote_ident(dest_schema); ++ ELSE ++ -- issue#95 ++ IF bNoOwner THEN ++ EXECUTE 'CREATE SCHEMA ' || quote_ident(dest_schema) ; ++ ELSE ++ EXECUTE 'CREATE SCHEMA ' || quote_ident(dest_schema) || ' AUTHORIZATION ' || buffer; ++ END IF; + END IF; + +- IF ddl_only THEN +- RAISE NOTICE '%', 'CREATE SCHEMA ' || quote_ident(dest_schema); ++ -- Do system table validations for subsequent system table queries ++ -- Issue#65 Fix ++ SELECT count(*) into cnt ++ FROM pg_attribute ++ WHERE attrelid = 'pg_proc'::regclass AND attname = 'prokind'; ++ ++ IF cnt = 0 THEN ++ is_prokind = False; + ELSE +- EXECUTE 'CREATE SCHEMA ' || quote_ident(dest_schema) ; ++ is_prokind = True; + END IF; + + -- MV: Create Collations + action := 'Collations'; + cnt := 0; +- FOR arec IN +- SELECT n.nspname as schemaname, a.rolname as ownername , c.collname, c.collprovider, c.collcollate as locale, +- 'CREATE COLLATION ' || quote_ident(dest_schema) || '."' || c.collname || '" (provider = ' || CASE WHEN c.collprovider = 'i' THEN 'icu' WHEN c.collprovider = 'c' THEN 'libc' ELSE '' END || ', locale = ''' || c.collcollate || ''');' as COLL_DDL +- FROM pg_collation c JOIN pg_namespace n ON (c.collnamespace = n.oid) JOIN pg_roles a ON (c.collowner = a.oid) WHERE n.nspname = quote_ident(source_schema) order by c.collname +- LOOP +- BEGIN +- cnt := cnt + 1; +- IF ddl_only THEN +- RAISE INFO '%', arec.coll_ddl; +- ELSE +- EXECUTE arec.coll_ddl; +- END IF; +- END; +- END LOOP; ++ -- Issue#96 Handle differently based on PG Versions (PG15 rely on colliculocale, not collcolocate) ++ -- perhaps use this logic instead: COALESCE(c.collcollate, c.colliculocale) AS lc_collate, COALESCE(c.collctype, c.colliculocale) AS lc_type ++ IF sq_server_version_num > 150000 THEN ++ FOR arec IN ++ SELECT n.nspname AS schemaname, a.rolname AS ownername, c.collname, c.collprovider, c.collcollate AS locale, ++ 'CREATE COLLATION ' || quote_ident(dest_schema) || '."' || c.collname || '" (provider = ' || ++ CASE WHEN c.collprovider = 'i' THEN 'icu' WHEN c.collprovider = 'c' THEN 'libc' ELSE '' END || ++ ', locale = ''' || c.colliculocale || ''');' AS COLL_DDL ++ FROM pg_collation c ++ JOIN pg_namespace n ON (c.collnamespace = n.oid) ++ JOIN pg_roles a ON (c.collowner = a.oid) ++ WHERE n.nspname = quote_ident(source_schema) ++ ORDER BY c.collname ++ LOOP ++ BEGIN ++ cnt := cnt + 1; ++ IF bDDLOnly THEN ++ RAISE INFO '%', arec.coll_ddl; ++ ELSE ++ EXECUTE arec.coll_ddl; ++ END IF; ++ END; ++ END LOOP; ++ ELSIF sq_server_version_num > 100000 THEN ++ FOR arec IN ++ SELECT n.nspname AS schemaname, a.rolname AS ownername, c.collname, c.collprovider, c.collcollate AS locale, ++ 'CREATE COLLATION ' || quote_ident(dest_schema) || '."' || c.collname || '" (provider = ' || ++ CASE WHEN c.collprovider = 'i' THEN 'icu' WHEN c.collprovider = 'c' THEN 'libc' ELSE '' END || ++ ', locale = ''' || c.collcollate || ''');' AS COLL_DDL ++ FROM pg_collation c ++ JOIN pg_namespace n ON (c.collnamespace = n.oid) ++ JOIN pg_roles a ON (c.collowner = a.oid) ++ WHERE n.nspname = quote_ident(source_schema) ++ ORDER BY c.collname ++ LOOP ++ BEGIN ++ cnt := cnt + 1; ++ IF bDDLOnly THEN ++ RAISE INFO '%', arec.coll_ddl; ++ ELSE ++ EXECUTE arec.coll_ddl; ++ END IF; ++ END; ++ END LOOP; ++ ELSE ++ -- handle 9.6 that is missing some columns in pg_collation ++ FOR arec IN ++ SELECT n.nspname AS schemaname, a.rolname AS ownername, c.collname, c.collcollate AS locale, ++ 'CREATE COLLATION ' || quote_ident(dest_schema) || '."' || c.collname || '" (provider = ' || ++ ', locale = ''' || c.collcollate || ''');' AS COLL_DDL ++ FROM pg_collation c ++ JOIN pg_namespace n ON (c.collnamespace = n.oid) ++ JOIN pg_roles a ON (c.collowner = a.oid) ++ WHERE n.nspname = quote_ident(source_schema) ++ ORDER BY c.collname ++ LOOP ++ BEGIN ++ cnt := cnt + 1; ++ IF bDDLOnly THEN ++ RAISE INFO '%', arec.coll_ddl; ++ ELSE ++ EXECUTE arec.coll_ddl; ++ END IF; ++ END; ++ END LOOP; ++ END IF; + RAISE NOTICE ' COLLATIONS cloned: %', LPAD(cnt::text, 5, ' '); + + -- MV: Create Domains + action := 'Domains'; + cnt := 0; + FOR arec IN +- SELECT n.nspname as "Schema", t.typname as "Name", pg_catalog.format_type(t.typbasetype, t.typtypmod) as "Type", +- (SELECT c.collname FROM pg_catalog.pg_collation c, pg_catalog.pg_type bt WHERE c.oid = t.typcollation AND +- bt.oid = t.typbasetype AND t.typcollation <> bt.typcollation) as "Collation", +- CASE WHEN t.typnotnull THEN 'not null' END as "Nullable", t.typdefault as "Default", +- pg_catalog.array_to_string(ARRAY(SELECT pg_catalog.pg_get_constraintdef(r.oid, true) FROM pg_catalog.pg_constraint r WHERE t.oid = r.contypid), ' ') as "Check", +- 'CREATE DOMAIN ' || quote_ident(dest_schema) || '.' || t.typname || ' AS ' || pg_catalog.format_type(t.typbasetype, t.typtypmod) || +- CASE WHEN t.typnotnull IS NOT NULL THEN ' NOT NULL ' ELSE ' ' END || CASE WHEN t.typdefault IS NOT NULL THEN 'DEFAULT ' || t.typdefault || ' ' ELSE ' ' END || +- pg_catalog.array_to_string(ARRAY(SELECT pg_catalog.pg_get_constraintdef(r.oid, true) FROM pg_catalog.pg_constraint r WHERE t.oid = r.contypid), ' ') || ';' AS DOM_DDL +- FROM pg_catalog.pg_type t LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace +- WHERE t.typtype = 'd' AND n.nspname = quote_ident(source_schema) AND pg_catalog.pg_type_is_visible(t.oid) ORDER BY 1, 2 ++ SELECT n.nspname AS "Schema", t.typname AS "Name", pg_catalog.format_type(t.typbasetype, t.typtypmod) AS "Type", ( ++ SELECT c.collname ++ FROM pg_catalog.pg_collation c, pg_catalog.pg_type bt ++ WHERE c.oid = t.typcollation ++ AND bt.oid = t.typbasetype ++ AND t.typcollation <> bt.typcollation) AS "Collation", CASE WHEN t.typnotnull THEN ++ 'not null' ++ END AS "Nullable", t.typdefault AS "Default", pg_catalog.array_to_string(ARRAY ( ++ SELECT pg_catalog.pg_get_constraintdef(r.oid, TRUE) ++ FROM pg_catalog.pg_constraint r ++ -- Issue#78 FIX: handle case-sensitive names with quote_ident() on t.typename ++ WHERE t.oid = r.contypid), ' ') AS "Check", 'CREATE DOMAIN ' || quote_ident(dest_schema) || '.' || quote_ident(t.typname) || ' AS ' || pg_catalog.format_type(t.typbasetype, t.typtypmod) || ++ CASE WHEN t.typnotnull IS NOT NULL THEN ++ ' NOT NULL ' ++ ELSE ++ ' ' ++ END || CASE WHEN t.typdefault IS NOT NULL THEN ++ 'DEFAULT ' || t.typdefault || ' ' ++ ELSE ++ ' ' ++ END || pg_catalog.array_to_string(ARRAY ( ++ SELECT pg_catalog.pg_get_constraintdef(r.oid, TRUE) ++ FROM pg_catalog.pg_constraint r ++ WHERE t.oid = r.contypid), ' ') || ';' AS DOM_DDL ++ FROM pg_catalog.pg_type t ++ LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace ++ WHERE t.typtype = 'd' ++ AND n.nspname = quote_ident(source_schema) ++ AND pg_catalog.pg_type_is_visible(t.oid) ++ ORDER BY 1, 2 + LOOP + BEGIN + cnt := cnt + 1; +- IF ddl_only THEN ++ IF bDDLOnly THEN + RAISE INFO '%', arec.dom_ddl; + ELSE + EXECUTE arec.dom_ddl; +@@ -177,36 +1030,70 @@ + -- MV: Create types + action := 'Types'; + cnt := 0; ++ lastsql = ''; + FOR arec IN +- SELECT c.relkind, n.nspname AS schemaname, t.typname AS typname, t.typcategory, CASE WHEN t.typcategory='C' THEN +- 'CREATE TYPE ' || quote_ident(dest_schema) || '.' || t.typname || ' AS (' || array_to_string(array_agg(a.attname || ' ' || pg_catalog.format_type(a.atttypid, a.atttypmod) ORDER BY c.relname, a.attnum),', ') || ');' +- WHEN t.typcategory='E' THEN +- 'CREATE TYPE ' || quote_ident(dest_schema) || '.' || t.typname || ' AS ENUM (' || REPLACE(quote_literal(array_to_string(array_agg(e.enumlabel ORDER BY e.enumsortorder),',')), ',', ''',''') || ');' +- ELSE '' END AS type_ddl FROM pg_type t JOIN pg_namespace n ON (n.oid = t.typnamespace) +- LEFT JOIN pg_enum e ON (t.oid = e.enumtypid) +- LEFT JOIN pg_class c ON (c.reltype = t.oid) LEFT JOIN pg_attribute a ON (a.attrelid = c.oid) +- WHERE n.nspname = quote_ident(source_schema) and (c.relkind IS NULL or c.relkind = 'c') and t.typcategory in ('C', 'E') group by 1,2,3,4 order by n.nspname, t.typcategory, t.typname ++ -- Fixed Issue#108:enclose double-quote roles with special characters for setting "OWNER TO" ++ -- SELECT c.relkind, n.nspname AS schemaname, t.typname AS typname, t.typcategory, pg_catalog.pg_get_userbyid(t.typowner) AS owner, CASE WHEN t.typcategory = 'C' THEN ++ SELECT c.relkind, n.nspname AS schemaname, t.typname AS typname, t.typcategory, '"' || pg_catalog.pg_get_userbyid(t.typowner) || '"' AS owner, CASE WHEN t.typcategory = 'C' THEN ++ 'CREATE TYPE ' || quote_ident(dest_schema) || '.' || t.typname || ' AS (' || array_to_string(array_agg(a.attname || ' ' || pg_catalog.format_type(a.atttypid, a.atttypmod) ++ ORDER BY c.relname, a.attnum), ', ') || ');' ++ WHEN t.typcategory = 'E' THEN ++ 'CREATE TYPE ' || quote_ident(dest_schema) || '.' || t.typname || ' AS ENUM (' || REPLACE(quote_literal(array_to_string(array_agg(e.enumlabel ORDER BY e.enumsortorder), ',')), ',', ''',''') || ');' ++ ELSE ++ '' ++ END AS type_ddl ++ FROM pg_type t ++ JOIN pg_namespace n ON (n.oid = t.typnamespace) ++ LEFT JOIN pg_enum e ON (t.oid = e.enumtypid) ++ LEFT JOIN pg_class c ON (c.reltype = t.oid) ++ LEFT JOIN pg_attribute a ON (a.attrelid = c.oid) ++ WHERE n.nspname = quote_ident(source_schema) ++ AND (c.relkind IS NULL ++ OR c.relkind = 'c') ++ AND t.typcategory IN ('C', 'E') ++ GROUP BY 1, 2, 3, 4, 5 ++ ORDER BY n.nspname, t.typcategory, t.typname ++ + LOOP + BEGIN + cnt := cnt + 1; + -- Keep composite and enum types in separate branches for fine tuning later if needed. + IF arec.typcategory = 'E' THEN +- -- RAISE NOTICE '%', arec.type_ddl; +- IF ddl_only THEN +- RAISE INFO '%', arec.type_ddl; +- ELSE +- EXECUTE arec.type_ddl; +- END IF; ++ IF bDDLOnly THEN ++ RAISE INFO '%', arec.type_ddl; ++ ++ --issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ RAISE INFO 'ALTER TYPE % OWNER TO %;', quote_ident(dest_schema) || '.' || arec.typname, arec.owner; ++ END IF; ++ ELSE ++ EXECUTE arec.type_ddl; + +- ELSEIF arec.typcategory = 'C' THEN +- -- RAISE NOTICE '%', arec.type_ddl; +- IF ddl_only THEN ++ --issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ EXECUTE 'ALTER TYPE ' || quote_ident(dest_schema) || '.' || arec.typname || ' OWNER TO ' || arec.owner; ++ END IF; ++ END IF; ++ ELSIF arec.typcategory = 'C' THEN ++ IF bDDLOnly THEN + RAISE INFO '%', arec.type_ddl; ++ --issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ RAISE INFO 'ALTER TYPE % OWNER TO %;', quote_ident(dest_schema) || '.' || arec.typname, arec.owner; ++ END IF; + ELSE + EXECUTE arec.type_ddl; ++ --issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ EXECUTE 'ALTER TYPE ' || quote_ident(dest_schema) || '.' || arec.typname || ' OWNER TO ' || arec.owner; ++ END IF; + END IF; + ELSE +- RAISE NOTICE 'Unhandled type:%-%', arec.typcategory, arec.typname; ++ RAISE NOTICE ' Unhandled type:%-%', arec.typcategory, arec.typname; + END IF; + END; + END LOOP; +@@ -214,82 +1101,361 @@ + + -- Create sequences + action := 'Sequences'; +- seq_cnt := 0; +- -- TODO: Find a way to make this sequence's owner is the correct table. +- FOR object IN +- SELECT sequence_name::text +- FROM information_schema.sequences +- WHERE sequence_schema = quote_ident(source_schema) ++ ++ cnt := 0; ++ -- fix#63 get from pg_sequences not information_schema ++ -- fix#63 take 2: get it from information_schema.sequences since we need to treat IDENTITY columns differently. ++ -- fix#95 get owner as well by joining to pg_sequences ++ -- fix#106 we can get owner info with pg_class, pg_user/pg_group, and information_schema.sequences, so we can avoid the hit to pg_sequences which is not available in 9.6 ++ FOR object, buffer IN ++ -- Fixed Issue#108: ++ -- SELECT s1.sequence_name::text, s2.sequenceowner FROM information_schema.sequences s1 JOIN pg_sequences s2 ON (s1.sequence_schema = s2.schemaname AND s1.sequence_name = s2.sequencename) AND s1.sequence_schema = quote_ident(source_schema) ++ -- SELECT s.sequence_name::text, '"' || u.usename || '"' as owner FROM information_schema.sequences s JOIN pg_class c ON (s.sequence_name = c.relname AND s.sequence_schema = c.relnamespace::regnamespace::text) JOIN pg_user u ON (c.relowner = u.usesysid) ++ -- WHERE c.relkind = 'S' AND s.sequence_schema = quote_ident(source_schema) ++ -- UNION SELECT s.sequence_name::text, g.groname as owner FROM information_schema.sequences s JOIN pg_class c ON (s.sequence_name = c.relname AND s.sequence_schema = c.relnamespace::regnamespace::text) JOIN pg_group g ON (c.relowner = g.grosysid) ++ -- WHERE c.relkind = 'S' AND s.sequence_schema = quote_ident(source_schema) ++ SELECT sequencename::text, sequenceowner FROM pg_catalog.pg_sequences WHERE schemaname = quote_ident(source_schema) + LOOP +- seq_cnt := seq_cnt + 1; +- IF ddl_only THEN ++ cnt := cnt + 1; ++ IF bDDLOnly THEN ++ -- issue#95 + RAISE INFO '%', 'CREATE SEQUENCE ' || quote_ident(dest_schema) || '.' || quote_ident(object) || ';'; ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ RAISE INFO '%', 'ALTER SEQUENCE ' || quote_ident(dest_schema) || '.' || quote_ident(object) || ' OWNER TO ' || buffer || ';'; ++ END IF; + ELSE + EXECUTE 'CREATE SEQUENCE ' || quote_ident(dest_schema) || '.' || quote_ident(object); ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ EXECUTE 'ALTER SEQUENCE ' || quote_ident(dest_schema) || '.' || quote_ident(object) || ' OWNER TO ' || buffer; ++ END IF; + END IF; + srctbl := quote_ident(source_schema) || '.' || quote_ident(object); + +- EXECUTE 'SELECT last_value, is_called +- FROM ' || quote_ident(source_schema) || '.' || quote_ident(object) || ';' +- INTO sq_last_value, sq_is_called; +- +- EXECUTE 'SELECT max_value, start_value, increment_by, min_value, cache_size, cycle, data_type +- FROM pg_catalog.pg_sequences WHERE schemaname='|| quote_literal(source_schema) || ' AND sequencename=' || quote_literal(object) || ';' +- INTO sq_max_value, sq_start_value, sq_increment_by, sq_min_value, sq_cache_value, sq_is_cycled, sq_data_type ; ++ IF sq_server_version_num < 100000 THEN ++ EXECUTE 'SELECT last_value, is_called FROM ' || quote_ident(source_schema) || '.' || quote_ident(object) || ';' INTO sq_last_value, sq_is_called; ++ EXECUTE 'SELECT maximum_value, start_value, increment, minimum_value, 1 cache_size, cycle_option, data_type ++ FROM information_schema.sequences WHERE sequence_schema='|| quote_literal(source_schema) || ' AND sequence_name=' || quote_literal(object) || ';' ++ INTO sq_max_value, sq_start_value, sq_increment_by, sq_min_value, sq_cache_value, sq_is_cycled, sq_data_type; ++ IF sq_is_cycled ++ THEN ++ sq_cycled := 'CYCLE'; ++ ELSE ++ sq_cycled := 'NO CYCLE'; ++ END IF; + +- IF sq_is_cycled +- THEN +- sq_cycled := 'CYCLE'; ++ qry := 'ALTER SEQUENCE ' || quote_ident(dest_schema) || '.' || quote_ident(object) ++ || ' INCREMENT BY ' || sq_increment_by ++ || ' MINVALUE ' || sq_min_value ++ || ' MAXVALUE ' || sq_max_value ++ -- will update current sequence value after this ++ || ' START WITH ' || sq_start_value ++ || ' RESTART ' || sq_min_value ++ || ' CACHE ' || sq_cache_value ++ || ' ' || sq_cycled || ' ;' ; + ELSE +- sq_cycled := 'NO CYCLE'; +- END IF; ++ EXECUTE 'SELECT max_value, start_value, increment_by, min_value, cache_size, cycle, data_type, COALESCE(last_value, 1) ++ FROM pg_catalog.pg_sequences WHERE schemaname='|| quote_literal(source_schema) || ' AND sequencename=' || quote_literal(object) || ';' ++ INTO sq_max_value, sq_start_value, sq_increment_by, sq_min_value, sq_cache_value, sq_is_cycled, sq_data_type, sq_last_value; ++ IF sq_is_cycled ++ THEN ++ sq_cycled := 'CYCLE'; ++ ELSE ++ sq_cycled := 'NO CYCLE'; ++ END IF; + +- qry := 'ALTER SEQUENCE ' || quote_ident(dest_schema) || '.' || quote_ident(object) +- || ' AS ' || sq_data_type +- || ' INCREMENT BY ' || sq_increment_by +- || ' MINVALUE ' || sq_min_value +- || ' MAXVALUE ' || sq_max_value +- || ' START WITH ' || sq_start_value +- || ' RESTART ' || sq_min_value +- || ' CACHE ' || sq_cache_value +- || ' ' || sq_cycled || ' ;' ; ++ qry := 'ALTER SEQUENCE ' || quote_ident(dest_schema) || '.' || quote_ident(object) ++ || ' AS ' || sq_data_type ++ || ' INCREMENT BY ' || sq_increment_by ++ || ' MINVALUE ' || sq_min_value ++ || ' MAXVALUE ' || sq_max_value ++ -- will update current sequence value after this ++ || ' START WITH ' || sq_start_value ++ || ' RESTART ' || sq_min_value ++ || ' CACHE ' || sq_cache_value ++ || ' ' || sq_cycled || ' ;' ; ++ END IF; + +- IF ddl_only THEN ++ IF bDDLOnly THEN + RAISE INFO '%', qry; + ELSE + EXECUTE qry; + END IF; + + buffer := quote_ident(dest_schema) || '.' || quote_ident(object); +- IF include_recs THEN ++ IF bData THEN + EXECUTE 'SELECT setval( ''' || buffer || ''', ' || sq_last_value || ', ' || sq_is_called || ');' ; + ELSE +- if ddl_only THEN +- RAISE INFO '%', 'SELECT setval( ''' || buffer || ''', ' || sq_start_value || ', ' || sq_is_called || ');' ; ++ if bDDLOnly THEN ++ -- fix#63 ++ -- RAISE INFO '%', 'SELECT setval( ''' || buffer || ''', ' || sq_start_value || ', ' || sq_is_called || ');' ; ++ RAISE INFO '%', 'SELECT setval( ''' || buffer || ''', ' || sq_last_value || ', ' || sq_is_called || ');' ; + ELSE +- EXECUTE 'SELECT setval( ''' || buffer || ''', ' || sq_start_value || ', ' || sq_is_called || ');' ; ++ -- fix#63 ++ -- EXECUTE 'SELECT setval( ''' || buffer || ''', ' || sq_start_value || ', ' || sq_is_called || ');' ; ++ EXECUTE 'SELECT setval( ''' || buffer || ''', ' || sq_last_value || ', ' || sq_is_called || ');' ; + END IF; + + END IF; + END LOOP; +- RAISE NOTICE ' SEQUENCES cloned: %', LPAD(seq_cnt::text, 5, ' '); ++ RAISE NOTICE ' SEQUENCES cloned: %', LPAD(cnt::text, 5, ' '); ++ + +--- Create tables ++ -- Create tables including partitioned ones (parent/children) and unlogged ones. Order by is critical since child partition range logic is dependent on it. + action := 'Tables'; +- cnt := 0; +- FOR object IN +- SELECT TABLE_NAME::text +- FROM information_schema.tables +- WHERE table_schema = quote_ident(source_schema) +- AND table_type = 'BASE TABLE' ++ SELECT setting INTO v_dummy FROM pg_settings WHERE name='search_path'; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: search_path=%', v_dummy; END IF; + ++ cnt := 0; ++ -- Issue#61 FIX: use set_config for empty string ++ -- SET search_path = ''; ++ SELECT set_config('search_path', '', false) into v_dummy; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: setting search_path to empty string:%', v_dummy; END IF; ++ -- Fix#86 add isgenerated to column list ++ -- Fix#91 add tblowner for setting the table ownership to that of the source ++ -- Fix#99 added join to pg_tablespace ++ ++ -- Handle PG versions greater than last major/minor version of PG 9.6.24 ++ IF sq_server_version_num > 90624 THEN ++ FOR tblname, relpersist, bRelispart, relknd, data_type, udt_name, udt_schema, ocomment, l_child, isGenerated, tblowner, tblspace IN ++ -- 2021-03-08 MJV #39 fix: change sql to get indicator of user-defined columns to issue warnings ++ -- select c.relname, c.relpersistence, c.relispartition, c.relkind ++ -- FROM pg_class c, pg_namespace n where n.oid = c.relnamespace and n.nspname = quote_ident(source_schema) and c.relkind in ('r','p') and ++ -- order by c.relkind desc, c.relname ++ --Fix#65 add another left join to distinguish child tables by inheritance ++ -- Fix#86 add is_generated to column select ++ -- Fix#91 add tblowner to the select ++ -- Fix#105 need a different kinda distinct to avoid retrieving a table twice in the case of a table with multiple USER-DEFINED datatypes using DISTINCT ON instead of just DISTINCT ++ --SELECT DISTINCT c.relname, c.relpersistence, c.relispartition, c.relkind, co.data_type, co.udt_name, co.udt_schema, obj_description(c.oid), i.inhrelid, ++ -- COALESCE(co.is_generated, ''), pg_catalog.pg_get_userbyid(c.relowner) as "Owner", CASE WHEN reltablespace = 0 THEN 'pg_default' ELSE ts.spcname END as tablespace ++ -- fixed #108 by enclosing owner in double quotes to avoid errors for bad characters like #.@... ++ -- SELECT DISTINCT ON (c.relname, c.relpersistence, c.relispartition, c.relkind, co.data_type) c.relname, c.relpersistence, c.relispartition, c.relkind, co.data_type, co.udt_name, co.udt_schema, obj_description(c.oid), i.inhrelid, ++ SELECT DISTINCT ON (c.relname, c.relpersistence, c.relispartition, c.relkind, co.data_type) c.relname, c.relpersistence, c.relispartition, c.relkind, co.data_type, co.udt_name, co.udt_schema, obj_description(c.oid), i.inhrelid, ++ COALESCE(co.is_generated, ''), '"' || pg_catalog.pg_get_userbyid(c.relowner) || '"' as "Owner", CASE WHEN reltablespace = 0 THEN 'pg_default' ELSE ts.spcname END as tablespace ++ FROM pg_class c ++ JOIN pg_namespace n ON (n.oid = c.relnamespace ++ AND n.nspname = quote_ident(source_schema) ++ AND c.relkind IN ('r', 'p')) ++ LEFT JOIN information_schema.columns co ON (co.table_schema = n.nspname ++ AND co.table_name = c.relname ++ AND (co.data_type = 'USER-DEFINED' OR co.is_generated = 'ALWAYS')) ++ LEFT JOIN pg_inherits i ON (c.oid = i.inhrelid) ++ -- issue#99 added join ++ LEFT JOIN pg_tablespace ts ON (c.reltablespace = ts.oid) ++ ORDER BY c.relkind DESC, c.relname + LOOP + cnt := cnt + 1; +- buffer := quote_ident(dest_schema) || '.' || quote_ident(object); +- IF ddl_only THEN +- RAISE INFO '%', 'CREATE TABLE ' || buffer || ' (LIKE ' || quote_ident(source_schema) || '.' || quote_ident(object) || ' INCLUDING ALL)'; ++ lastsql = ''; ++ IF l_child IS NULL THEN ++ bChild := False; + ELSE +- EXECUTE 'CREATE TABLE ' || buffer || ' (LIKE ' || quote_ident(source_schema) || '.' || quote_ident(object) || ' INCLUDING ALL)'; ++ bChild := True; ++ END IF; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: TABLE START --> table=% bRelispart=% relkind=% bChild=%',tblname, bRelispart, relknd, bChild; END IF; ++ ++ IF data_type = 'USER-DEFINED' THEN ++ -- RAISE NOTICE ' Table (%) has column(s) with user-defined types so using get_table_ddl() instead of CREATE TABLE LIKE construct.',tblname; ++ cnt :=cnt; ++ END IF; ++ buffer := quote_ident(dest_schema) || '.' || quote_ident(tblname); ++ buffer2 := ''; ++ IF relpersist = 'u' THEN ++ buffer2 := 'UNLOGGED '; ++ END IF; ++ IF relknd = 'r' THEN ++ IF bDDLOnly THEN ++ IF data_type = 'USER-DEFINED' THEN ++ -- FIXED #65, #67 ++ -- SELECT * INTO buffer3 FROM public.pg_get_tabledef(quote_ident(source_schema), tblname); ++ SELECT * INTO buffer3 FROM public.get_table_ddl(quote_ident(source_schema), tblname, False); ++ ++ buffer3 := REPLACE(buffer3, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.'); ++ RAISE INFO '%', buffer3; ++ -- issue#91 fix ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ RAISE INFO 'ALTER TABLE IF EXISTS % OWNER TO %;', quote_ident(dest_schema) || '.' || tblname, tblowner; ++ END IF; ++ ELSE ++ IF NOT bChild THEN ++ RAISE INFO '%', 'CREATE ' || buffer2 || 'TABLE ' || buffer || ' (LIKE ' || quote_ident(source_schema) || '.' || quote_ident(tblname) || ' INCLUDING ALL);'; ++ -- issue#91 fix ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ RAISE INFO 'ALTER TABLE IF EXISTS % OWNER TO %;', quote_ident(dest_schema) || '.' || tblname, tblowner; ++ END IF; ++ ++ -- issue#99 ++ IF tblspace <> 'pg_default' THEN ++ -- replace with user-defined tablespace ++ -- ALTER TABLE myschema.mytable SET TABLESPACE usrtblspc; ++ RAISE INFO 'ALTER TABLE IF EXISTS % SET TABLESPACE %;', quote_ident(dest_schema) || '.' || tblname, tblspace; ++ END IF; ++ ELSE ++ -- FIXED #65, #67 ++ -- SELECT * INTO buffer3 FROM public.pg_get_tabledef(quote_ident(source_schema), tblname); ++ SELECT * INTO buffer3 FROM public.get_table_ddl(quote_ident(source_schema), tblname, False); ++ buffer3 := REPLACE(buffer3, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.'); ++ RAISE INFO '%', buffer3; ++ -- issue#91 fix ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ RAISE INFO 'ALTER TABLE IF EXISTS % OWNER TO %;', quote_ident(dest_schema) || '.' || tblname, tblowner; ++ END IF; ++ END IF; ++ END IF; ++ ELSE ++ IF data_type = 'USER-DEFINED' THEN ++ -- FIXED #65, #67 ++ -- SELECT * INTO buffer3 FROM public.pg_get_tabledef(quote_ident(source_schema), tblname); ++ SELECT * INTO buffer3 FROM public.get_table_ddl(quote_ident(source_schema), tblname, False); ++ buffer3 := REPLACE(buffer3, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.'); ++ IF bDebug THEN RAISE NOTICE 'DEBUG: tabledef01:%', buffer3; END IF; ++ -- #82: Table def should be fully qualified with target schema, ++ -- so just make search path = public to handle extension types that should reside in public schema ++ v_dummy = 'public'; ++ SELECT set_config('search_path', v_dummy, false) into v_dummy; ++ EXECUTE buffer3; ++ -- issue#91 fix ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ buffer3 = 'ALTER TABLE IF EXISTS ' || quote_ident(dest_schema) || '.' || tblname || ' OWNER TO ' || tblowner; ++ lastsql = buffer3; ++ EXECUTE buffer3; ++ END IF; ++ ELSE ++ IF (NOT bChild OR bRelispart) THEN ++ buffer3 := 'CREATE ' || buffer2 || 'TABLE ' || buffer || ' (LIKE ' || quote_ident(source_schema) || '.' || quote_ident(tblname) || ' INCLUDING ALL)'; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: tabledef02:%', buffer3; END IF; ++ EXECUTE buffer3; ++ -- issue#91 fix ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ buffer3 = 'ALTER TABLE IF EXISTS ' || quote_ident(dest_schema) || '.' || quote_ident(tblname) || ' OWNER TO ' || tblowner; ++ lastsql = buffer3; ++ EXECUTE buffer3; ++ END IF; ++ ++ -- issue#99 ++ IF tblspace <> 'pg_default' THEN ++ -- replace with user-defined tablespace ++ -- ALTER TABLE myschema.mytable SET TABLESPACE usrtblspc; ++ buffer3 = 'ALTER TABLE IF EXISTS ' || quote_ident(dest_schema) || '.' || tblname || ' SET TABLESPACE ' || tblspace; ++ EXECUTE buffer3; ++ END IF; ++ ++ ELSE ++ -- FIXED #65, #67 ++ -- SELECT * INTO buffer3 FROM public.pg_get_tabledef(quote_ident(source_schema), tblname); ++ SELECT * INTO buffer3 FROM public.get_table_ddl(quote_ident(source_schema), tblname, False); ++ ++ buffer3 := REPLACE(buffer3, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.'); ++ -- set client_min_messages higher to avoid messages like this: ++ -- NOTICE: merging column "city_id" with inherited definition ++ set client_min_messages = 'WARNING'; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: tabledef03:%', buffer3; END IF; ++ EXECUTE buffer3; ++ -- issue#91 fix ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ buffer3 = 'ALTER TABLE IF EXISTS ' || quote_ident(dest_schema) || '.' || tblname || ' OWNER TO ' || tblowner; ++ lastsql = buffer3; ++ EXECUTE buffer3; ++ END IF; ++ ++ -- reset it back, only get these for inheritance-based tables ++ set client_min_messages = 'notice'; ++ END IF; ++ END IF; ++ -- Add table comment. ++ IF ocomment IS NOT NULL THEN ++ EXECUTE 'COMMENT ON TABLE ' || buffer || ' IS ' || quote_literal(ocomment); ++ END IF; ++ END IF; ++ ELSIF relknd = 'p' THEN ++ -- define parent table and assume child tables have already been created based on top level sort order. ++ -- Issue #103 Put the complex query into its own function, get_table_ddl_complex() ++ SELECT * INTO qry FROM public.get_table_ddl_complex(source_schema, dest_schema, tblname, sq_server_version_num); ++ IF bDebug THEN RAISE NOTICE 'DEBUG: tabledef04 - %', buffer; END IF; ++ ++ -- consider replacing complicated query above with this simple call to get_table_ddl()... ++ -- SELECT * INTO qry FROM public.get_table_ddl(quote_ident(source_schema), tblname, False); ++ -- qry := REPLACE(qry, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.'); ++ ++ IF bDDLOnly THEN ++ RAISE INFO '%', qry; ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ RAISE INFO 'ALTER TABLE IF EXISTS % OWNER TO %;', quote_ident(dest_schema) || '.' || quote_ident(tblname), tblowner; ++ END IF; ++ ELSE ++ -- Issue#103: we need to always set search_path priority to target schema when we execute DDL ++ IF bDebug THEN RAISE NOTICE 'DEBUG: tabledef04 context: old search path=% new search path=% current search path=%', src_path_old, src_path_new, v_dummy; END IF; ++ SELECT setting INTO spath_tmp FROM pg_settings WHERE name = 'search_path'; ++ IF spath_tmp <> dest_schema THEN ++ -- change it to target schema and don't forget to change it back after we execute the DDL ++ spath = 'SET search_path = "' || dest_schema || '"'; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: changing search_path --> %', spath; END IF; ++ EXECUTE spath; ++ SELECT setting INTO v_dummy FROM pg_settings WHERE name = 'search_path'; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: search_path changed to %', v_dummy; END IF; ++ END IF; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: tabledef04:%', qry; END IF; ++ EXECUTE qry; ++ ++ -- Issue#103 ++ -- Set search path back to what it was ++ spath = 'SET search_path = "' || spath_tmp || '"'; ++ EXECUTE spath; ++ SELECT setting INTO v_dummy FROM pg_settings WHERE name = 'search_path'; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: search_path changed back to %', v_dummy; END IF; ++ ++ -- issue#91 fix ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ buffer3 = 'ALTER TABLE IF EXISTS ' || quote_ident(dest_schema) || '.' || quote_ident(tblname) || ' OWNER TO ' || tblowner; ++ lastsql = buffer3; ++ EXECUTE buffer3; ++ END IF; ++ ++ END IF; ++ -- loop for child tables and alter them to attach to parent for specific partition method. ++ -- Issue#103 fix: only loop for the table we are currently processing, tblname! ++ FOR aname, part_range, object IN ++ SELECT quote_ident(dest_schema) || '.' || c1.relname as tablename, pg_catalog.pg_get_expr(c1.relpartbound, c1.oid) as partrange, quote_ident(dest_schema) || '.' || c2.relname as object ++ FROM pg_catalog.pg_class c1, pg_namespace n, pg_catalog.pg_inherits i, pg_class c2 ++ WHERE n.nspname = quote_ident(source_schema) AND c1.relnamespace = n.oid AND c1.relkind = 'r' ++ -- Issue#103: added this condition to only work on current partitioned table. The problem was regression testing previously only worked on one partition table clone case ++ AND c2.relname = tblname AND ++ c1.relispartition AND c1.oid=i.inhrelid AND i.inhparent = c2.oid AND c2.relnamespace = n.oid ORDER BY pg_catalog.pg_get_expr(c1.relpartbound, c1.oid) = 'DEFAULT', ++ c1.oid::pg_catalog.regclass::pg_catalog.text ++ LOOP ++ qry := 'ALTER TABLE ONLY ' || object || ' ATTACH PARTITION ' || aname || ' ' || part_range || ';'; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: %',qry; END IF; ++ -- issue#91, not sure if we need to do this for child tables ++ -- issue#95 we dont set ownership here ++ IF bDDLOnly THEN ++ RAISE INFO '%', qry; ++ IF NOT bNoOwner THEN ++ NULL; ++ END IF; ++ ELSE ++ EXECUTE qry; ++ IF NOT bNoOwner THEN ++ NULL; ++ END IF; ++ END IF; ++ END LOOP; + END IF; + + -- INCLUDING ALL creates new index names, we restore them to the old name. +@@ -300,171 +1466,975 @@ + WHERE old.schemaname = source_schema + AND new.schemaname = dest_schema + AND old.tablename = new.tablename +- AND old.tablename = object ++ AND old.tablename = tblname + AND old.indexname <> new.indexname + AND regexp_replace(old.indexdef, E'.*USING','') = regexp_replace(new.indexdef, E'.*USING','') +- ORDER BY old.indexname, new.indexname ++ ORDER BY old.indexdef, new.indexdef + LOOP +- IF ddl_only THEN ++ IF bDDLOnly THEN + RAISE INFO '%', 'ALTER INDEX ' || quote_ident(dest_schema) || '.' || quote_ident(ix_new_name) || ' RENAME TO ' || quote_ident(ix_old_name) || ';'; + ELSE +- EXECUTE 'ALTER INDEX ' || quote_ident(dest_schema) || '.' || quote_ident(ix_new_name) || ' RENAME TO ' || quote_ident(ix_old_name) || ';'; ++ -- The SELECT query above may return duplicate names when a column is ++ -- indexed twice the same manner with 2 different names. Therefore, to ++ -- avoid a 'relation "xxx" already exists' we test if the index name ++ -- is in use or free. Skipping existing index will fallback on unused ++ -- ones and every duplicate will be mapped to distinct old names. ++ IF NOT EXISTS ( ++ SELECT TRUE ++ FROM pg_indexes ++ WHERE schemaname = dest_schema ++ AND tablename = tblname ++ AND indexname = quote_ident(ix_old_name)) ++ AND EXISTS ( ++ SELECT TRUE ++ FROM pg_indexes ++ WHERE schemaname = dest_schema ++ AND tablename = tblname ++ AND indexname = quote_ident(ix_new_name)) ++ THEN ++ EXECUTE 'ALTER INDEX ' || quote_ident(dest_schema) || '.' || quote_ident(ix_new_name) || ' RENAME TO ' || quote_ident(ix_old_name) || ';'; ++ END IF; + END IF; + END LOOP; + +- records_count := 0; +- IF include_recs +- THEN ++ lastsql = ''; ++ IF bData THEN + -- Insert records from source table +- RAISE NOTICE 'Populating cloned table, %', buffer; +- EXECUTE 'INSERT INTO ' || buffer || ' SELECT * FROM ' || quote_ident(source_schema) || '.' || quote_ident(object) || ';'; +- +- -- restart the counter for PK's internal identity sequence +- EXECUTE 'SELECT count(*) FROM ' || quote_ident(dest_schema) || '.' || quote_ident(object) || ';' INTO records_count; +- FOR column_ IN +- SELECT column_name::text +- FROM information_schema.columns +- WHERE +- table_schema = dest_schema AND +- table_name = object AND +- is_identity = 'YES' +- LOOP +- EXECUTE 'ALTER TABLE ' || quote_ident(dest_schema) || '.' || quote_ident(object) || ' ALTER COLUMN ' || quote_ident(column_) || ' RESTART WITH ' || records_count + 1 || ';'; +- END LOOP; ++ ++ -- 2021-03-03 MJV FIX ++ buffer := quote_ident(dest_schema) || '.' || quote_ident(tblname); ++ ++ -- 2020/06/18 - Issue #31 fix: add "OVERRIDING SYSTEM VALUE" for IDENTITY columns marked as GENERATED ALWAYS. ++ select count(*) into cnt2 from pg_class c, pg_attribute a, pg_namespace n ++ where a.attrelid = c.oid and c.relname = quote_ident(tblname) and n.oid = c.relnamespace and n.nspname = quote_ident(source_schema) and a.attidentity = 'a'; ++ buffer3 := ''; ++ IF cnt2 > 0 THEN ++ buffer3 := ' OVERRIDING SYSTEM VALUE'; ++ END IF; ++ -- BUG for inserting rows from tables with user-defined columns ++ -- INSERT INTO sample_clone.address OVERRIDING SYSTEM VALUE SELECT * FROM sample.address; ++ -- ERROR: column "id2" is of type sample_clone.udt_myint but expression is of type udt_myint ++ ++ -- Issue#86 fix: ++ -- IF data_type = 'USER-DEFINED' THEN ++ IF bDebug THEN RAISE NOTICE 'DEBUG: includerecs branch table=% data_type=% isgenerated=% buffer3=%', tblname, data_type, isGenerated, buffer3; END IF; ++ IF data_type = 'USER-DEFINED' OR isGenerated = 'ALWAYS' THEN ++ ++ -- RAISE WARNING 'Bypassing copying rows for table (%) with user-defined data types. You must copy them manually.', tblname; ++ -- won't work --> INSERT INTO clone1.address (id2, id3, addr) SELECT cast(id2 as clone1.udt_myint), cast(id3 as clone1.udt_myint), addr FROM sample.address; ++ -- Issue#101 --> INSERT INTO clone1.address2 (id2, id3, addr) SELECT id2::text::clone1.udt_myint, id3::text::clone1.udt_myint, addr FROM sample.address; ++ ++ -- Issue#79 implementation follows ++ -- COPY sample.statuses(id, s) TO '/tmp/statuses.txt' WITH DELIMITER AS ','; ++ -- COPY sample_clone1.statuses FROM '/tmp/statuses.txt' (DELIMITER ',', NULL ''); ++ -- Issue#101 fix: use text cast to get around the problem. ++ IF bFileCopy THEN ++ IF bWindows THEN ++ buffer2 := 'COPY ' || quote_ident(source_schema) || '.' || quote_ident(tblname) || ' TO ''C:\WINDOWS\TEMP\cloneschema.tmp'' WITH DELIMITER AS '','';'; ++ tblarray2 := tblarray2 || buffer2; ++ -- Issue #81 reformat COPY command for upload ++ -- buffer2:= 'COPY ' || quote_ident(dest_schema) || '.' || quote_ident(tblname) || ' FROM ''C:\WINDOWS\TEMP\cloneschema.tmp'' (DELIMITER '','', NULL '''');'; ++ buffer2 := 'COPY ' || quote_ident(dest_schema) || '.' || quote_ident(tblname) || ' FROM ''C:\WINDOWS\TEMP\cloneschema.tmp'' (DELIMITER '','', NULL ''\N'', FORMAT CSV);'; ++ tblarray2 := tblarray2 || buffer2; ++ ELSE ++ buffer2 := 'COPY ' || quote_ident(source_schema) || '.' || quote_ident(tblname) || ' TO ''/tmp/cloneschema.tmp'' WITH DELIMITER AS '','';'; ++ tblarray2 := tblarray2 || buffer2; ++ -- Issue #81 reformat COPY command for upload ++ -- buffer2 := 'COPY ' || quote_ident(dest_schema) || '.' || quote_ident(tblname) || ' FROM ''/tmp/cloneschema.tmp'' (DELIMITER '','', NULL '''');'; ++ -- works--> COPY sample.timestamptbl2 FROM '/tmp/cloneschema.tmp' WITH (DELIMITER ',', NULL '\N', FORMAT CSV) ; ++ buffer2 := 'COPY ' || quote_ident(dest_schema) || '.' || quote_ident(tblname) || ' FROM ''/tmp/cloneschema.tmp'' (DELIMITER '','', NULL ''\N'', FORMAT CSV);'; ++ tblarray2 := tblarray2 || buffer2; ++ END IF; ++ ELSE ++ -- Issue#101: assume direct copy with text cast, add to separate array ++ SELECT * INTO buffer3 FROM public.get_insert_stmt_ddl(quote_ident(source_schema), quote_ident(dest_schema), quote_ident(tblname), True); ++ tblarray3 := tblarray3 || buffer3; ++ END IF; ++ ELSE ++ -- bypass child tables since we populate them when we populate the parents ++ IF bDebug THEN RAISE NOTICE 'DEBUG: tblname=% bRelispart=% relknd=% l_child=% bChild=%', tblname, bRelispart, relknd, l_child, bChild; END IF; ++ IF NOT bRelispart AND NOT bChild THEN ++ -- Issue#75: Must defer population of tables until child tables have been added to parents ++ -- Issue#101 Offer alternative of copy to/from file. Although originally intended for tables with UDTs, it is now expanded to handle all cases for performance improvement perhaps for large tables. ++ -- Issue#106 buffer3 shouldn't be in the mix ++ -- revisited: buffer3 should be in play for PG versions that handle IDENTITIES ++ buffer2 := 'INSERT INTO ' || buffer || buffer3 || ' SELECT * FROM ' || quote_ident(source_schema) || '.' || quote_ident(tblname) || ';'; ++ -- buffer2 := 'INSERT INTO ' || buffer || ' SELECT * FROM ' || quote_ident(source_schema) || '.' || quote_ident(tblname) || ';'; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: buffer2=%',buffer2; END IF; ++ IF bFileCopy THEN ++ tblarray2:= tblarray2 || buffer2; ++ ELSE ++ tblarray := tblarray || buffer2; ++ END IF; ++ END IF; ++ END IF; + END IF; + +- SET search_path = ''; ++ -- Issue#61 FIX: use set_config for empty string ++ -- SET search_path = ''; ++ SELECT set_config('search_path', '', false) into v_dummy; ++ + FOR column_, default_ IN + SELECT column_name::text, +- REPLACE(column_default::text, source_schema, dest_schema) +- FROM information_schema.COLUMNS +- WHERE table_schema = source_schema +- AND TABLE_NAME = object +- AND column_default LIKE 'nextval(%' || quote_ident(source_schema) || '%::regclass)' ++ REPLACE(column_default::text, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.') ++ FROM information_schema.COLUMNS ++ WHERE table_schema = source_schema ++ AND TABLE_NAME = tblname ++ AND column_default LIKE 'nextval(%' || quote_ident(source_schema) || '%::regclass)' + LOOP +- IF ddl_only THEN ++ -- Issue#78 FIX: handle case-sensitive names with quote_ident() on column name ++ buffer2 = 'ALTER TABLE ' || buffer || ' ALTER COLUMN ' || quote_ident(column_) || ' SET DEFAULT ' || default_ || ';'; ++ IF bDDLOnly THEN + -- May need to come back and revisit this since previous sql will not return anything since no schema as created! +- RAISE INFO '%', 'ALTER TABLE ' || buffer || ' ALTER COLUMN ' || column_ || ' SET DEFAULT ' || default_ || ';'; ++ RAISE INFO '%', buffer2; + ELSE +- EXECUTE 'ALTER TABLE ' || buffer || ' ALTER COLUMN ' || column_ || ' SET DEFAULT ' || default_; ++ EXECUTE buffer2; + END IF; + END LOOP; +- EXECUTE 'SET search_path = ' || quote_ident(source_schema) ; + ++ EXECUTE 'SET search_path = ' || quote_ident(source_schema) ; + END LOOP; +- RAISE NOTICE ' TABLES cloned: %', LPAD(cnt::text, 5, ' '); +- +- -- add FK constraint +- action := 'FK Constraints'; +- cnt := 0; +- SET search_path = ''; +- FOR qry IN +- SELECT 'ALTER TABLE ' || quote_ident(dest_schema) || '.' || quote_ident(rn.relname) +- || ' ADD CONSTRAINT ' || quote_ident(ct.conname) || ' ' || REPLACE(pg_get_constraintdef(ct.oid), 'REFERENCES ' ||quote_ident(source_schema), 'REFERENCES ' || quote_ident(dest_schema)) || ';' +- FROM pg_constraint ct +- JOIN pg_class rn ON rn.oid = ct.conrelid +- WHERE connamespace = src_oid +- AND rn.relkind = 'r' +- AND ct.contype = 'f' ++ ELSE ++ -- Handle 9.6 versions 90600 ++ FOR tblname, relpersist, relknd, data_type, udt_name, udt_schema, ocomment, l_child, isGenerated, tblowner, tblspace IN ++ -- 2021-03-08 MJV #39 fix: change sql to get indicator of user-defined columns to issue warnings ++ -- select c.relname, c.relpersistence, c.relispartition, c.relkind ++ -- FROM pg_class c, pg_namespace n where n.oid = c.relnamespace and n.nspname = quote_ident(source_schema) and c.relkind in ('r','p') and ++ -- order by c.relkind desc, c.relname ++ --Fix#65 add another left join to distinguish child tables by inheritance ++ -- Fix#86 add is_generated to column select ++ -- Fix#91 add tblowner to the select ++ -- Fix#105 need a different kinda distinct to avoid retrieving a table twice in the case of a table with multiple USER-DEFINED datatypes using DISTINCT ON instead of just DISTINCT ++ -- Fixed Issue#108: double quote roles to avoid problems with special characters in OWNER TO statements ++ --SELECT DISTINCT c.relname, c.relpersistence, c.relispartition, c.relkind, co.data_type, co.udt_name, co.udt_schema, obj_description(c.oid), i.inhrelid, ++ -- COALESCE(co.is_generated, ''), pg_catalog.pg_get_userbyid(c.relowner) as "Owner", CASE WHEN reltablespace = 0 THEN 'pg_default' ELSE ts.spcname END as tablespace ++ -- SELECT DISTINCT ON (c.relname, c.relpersistence, c.relkind, co.data_type) c.relname, c.relpersistence, c.relkind, co.data_type, co.udt_name, co.udt_schema, obj_description(c.oid), i.inhrelid, ++ -- COALESCE(co.is_generated, ''), pg_catalog.pg_get_userbyid(c.relowner) as "Owner", CASE WHEN reltablespace = 0 THEN 'pg_default' ELSE ts.spcname END as tablespace ++ SELECT DISTINCT ON (c.relname, c.relpersistence, c.relkind, co.data_type) c.relname, c.relpersistence, c.relkind, co.data_type, co.udt_name, co.udt_schema, obj_description(c.oid), i.inhrelid, ++ COALESCE(co.is_generated, ''), '"' || pg_catalog.pg_get_userbyid(c.relowner) || '"' as "Owner", CASE WHEN reltablespace = 0 THEN 'pg_default' ELSE ts.spcname END as tablespace ++ FROM pg_class c ++ JOIN pg_namespace n ON (n.oid = c.relnamespace ++ AND n.nspname = quote_ident(source_schema) ++ AND c.relkind IN ('r', 'p')) ++ LEFT JOIN information_schema.columns co ON (co.table_schema = n.nspname ++ AND co.table_name = c.relname ++ AND (co.data_type = 'USER-DEFINED' OR co.is_generated = 'ALWAYS')) ++ LEFT JOIN pg_inherits i ON (c.oid = i.inhrelid) ++ -- issue#99 added join ++ LEFT JOIN pg_tablespace ts ON (c.reltablespace = ts.oid) ++ ORDER BY c.relkind DESC, c.relname + LOOP + cnt := cnt + 1; +- IF ddl_only THEN +- RAISE INFO '%', qry; ++ IF l_child IS NULL THEN ++ bChild := False; + ELSE +- EXECUTE qry; ++ bChild := True; + END IF; +- END LOOP; +- EXECUTE 'SET search_path = ' || quote_ident(source_schema) ; +- RAISE NOTICE ' FKEYS cloned: %', LPAD(cnt::text, 5, ' '); +- +--- Create views +- action := 'Views'; +- cnt := 0; +- FOR object IN +- SELECT table_name::text, +- view_definition +- FROM information_schema.views +- WHERE table_schema = quote_ident(source_schema) +- +- LOOP +- cnt := cnt + 1; +- buffer := quote_ident(dest_schema) || '.' || quote_ident(object); +- SELECT view_definition INTO v_def +- FROM information_schema.views +- WHERE table_schema = quote_ident(source_schema) +- AND table_name = quote_ident(object); ++ IF bDebug THEN RAISE NOTICE 'DEBUG: TABLE START --> table=% bRelispart=NA relkind=% bChild=%',tblname, relknd, bChild; END IF; + +- IF ddl_only THEN +- RAISE INFO '%', 'CREATE OR REPLACE VIEW ' || buffer || ' AS ' || v_def || ';' ; +- ELSE +- EXECUTE 'CREATE OR REPLACE VIEW ' || buffer || ' AS ' || v_def || ';' ; ++ IF data_type = 'USER-DEFINED' THEN ++ -- RAISE NOTICE ' Table (%) has column(s) with user-defined types so using get_table_ddl() instead of CREATE TABLE LIKE construct.',tblname; ++ cnt :=cnt; + END IF; +- END LOOP; +- RAISE NOTICE ' VIEWS cloned: %', LPAD(cnt::text, 5, ' '); +- +- -- Create Materialized views +- action := 'Mat. Views'; +- cnt := 0; +- FOR object IN +- SELECT matviewname::text, +- definition +- FROM pg_catalog.pg_matviews +- WHERE schemaname = quote_ident(source_schema) +- +- LOOP +- cnt := cnt + 1; +- buffer := dest_schema || '.' || quote_ident(object); +- SELECT replace(definition,';','') INTO v_def +- FROM pg_catalog.pg_matviews +- WHERE schemaname = quote_ident(source_schema) +- AND matviewname = quote_ident(object); +- +- IF include_recs THEN +- EXECUTE 'CREATE MATERIALIZED VIEW ' || buffer || ' AS ' || v_def || ';' ; +- ELSE +- IF ddl_only THEN +- RAISE INFO '%', 'CREATE MATERIALIZED VIEW ' || buffer || ' AS ' || v_def || ' WITH NO DATA;' ; +- ELSE +- EXECUTE 'CREATE MATERIALIZED VIEW ' || buffer || ' AS ' || v_def || ' WITH NO DATA;' ; +- END IF; ++ buffer := quote_ident(dest_schema) || '.' || quote_ident(tblname); ++ buffer2 := ''; ++ IF relpersist = 'u' THEN ++ buffer2 := 'UNLOGGED '; ++ END IF; ++ IF relknd = 'r' THEN ++ IF bDDLOnly THEN ++ IF data_type = 'USER-DEFINED' THEN ++ -- FIXED #65, #67 ++ -- SELECT * INTO buffer3 FROM public.pg_get_tabledef(quote_ident(source_schema), tblname); ++ SELECT * INTO buffer3 FROM public.get_table_ddl(quote_ident(source_schema), tblname, False); ++ ++ buffer3 := REPLACE(buffer3, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.'); ++ RAISE INFO '%', buffer3; ++ -- issue#91 fix ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ RAISE INFO 'ALTER TABLE IF EXISTS % OWNER TO %;', quote_ident(dest_schema) || '.' || tblname, tblowner; ++ END IF; ++ ELSE ++ IF NOT bChild THEN ++ RAISE INFO '%', 'CREATE ' || buffer2 || 'TABLE ' || buffer || ' (LIKE ' || quote_ident(source_schema) || '.' || quote_ident(tblname) || ' INCLUDING ALL);'; ++ -- issue#91 fix ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ RAISE INFO 'ALTER TABLE IF EXISTS % OWNER TO %;', quote_ident(dest_schema) || '.' || tblname, tblowner; ++ END IF; + +- END IF; ++ -- issue#99 ++ IF tblspace <> 'pg_default' THEN ++ -- replace with user-defined tablespace ++ -- ALTER TABLE myschema.mytable SET TABLESPACE usrtblspc; ++ RAISE INFO 'ALTER TABLE IF EXISTS % SET TABLESPACE %;', quote_ident(dest_schema) || '.' || tblname, tblspace; ++ END IF; ++ ELSE ++ -- FIXED #65, #67 ++ -- SELECT * INTO buffer3 FROM public.pg_get_tabledef(quote_ident(source_schema), tblname); ++ SELECT * INTO buffer3 FROM public.get_table_ddl(quote_ident(source_schema), tblname, False); ++ buffer3 := REPLACE(buffer3, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.'); ++ RAISE INFO '%', buffer3; ++ -- issue#91 fix ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ RAISE INFO 'ALTER TABLE IF EXISTS % OWNER TO %;', quote_ident(dest_schema) || '.' || tblname, tblowner; ++ END IF; ++ END IF; ++ END IF; ++ ELSE ++ IF data_type = 'USER-DEFINED' THEN ++ -- FIXED #65, #67 ++ -- SELECT * INTO buffer3 FROM public.pg_get_tabledef(quote_ident(source_schema), tblname); ++ SELECT * INTO buffer3 FROM public.get_table_ddl(quote_ident(source_schema), tblname, False); ++ buffer3 := REPLACE(buffer3, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.'); ++ IF bDebug THEN RAISE NOTICE 'DEBUG: tabledef01:%', buffer3; END IF; ++ -- #82: Table def should be fully qualified with target schema, ++ -- so just make search path = public to handle extension types that should reside in public schema ++ v_dummy = 'public'; ++ SELECT set_config('search_path', v_dummy, false) into v_dummy; ++ EXECUTE buffer3; ++ -- issue#91 fix ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ buffer3 = 'ALTER TABLE IF EXISTS ' || quote_ident(dest_schema) || '.' || tblname || ' OWNER TO ' || tblowner; ++ lastsql = buffer3; ++ EXECUTE buffer3; ++ END IF; ++ ELSE ++ IF (NOT bChild) THEN ++ buffer3 := 'CREATE ' || buffer2 || 'TABLE ' || buffer || ' (LIKE ' || quote_ident(source_schema) || '.' || quote_ident(tblname) || ' INCLUDING ALL)'; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: tabledef02:%', buffer3; END IF; ++ EXECUTE buffer3; ++ -- issue#91 fix ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ buffer3 = 'ALTER TABLE IF EXISTS ' || quote_ident(dest_schema) || '.' || quote_ident(tblname) || ' OWNER TO ' || tblowner; ++ lastsql = buffer3; ++ EXECUTE buffer3; ++ END IF; ++ ++ -- issue#99 ++ IF tblspace <> 'pg_default' THEN ++ -- replace with user-defined tablespace ++ -- ALTER TABLE myschema.mytable SET TABLESPACE usrtblspc; ++ buffer3 = 'ALTER TABLE IF EXISTS ' || quote_ident(dest_schema) || '.' || tblname || ' SET TABLESPACE ' || tblspace; ++ EXECUTE buffer3; ++ END IF; ++ ++ ELSE ++ -- FIXED #65, #67 ++ -- SELECT * INTO buffer3 FROM public.pg_get_tabledef(quote_ident(source_schema), tblname); ++ SELECT * INTO buffer3 FROM public.get_table_ddl(quote_ident(source_schema), tblname, False); ++ ++ buffer3 := REPLACE(buffer3, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.'); ++ -- set client_min_messages higher to avoid messages like this: ++ -- NOTICE: merging column "city_id" with inherited definition ++ set client_min_messages = 'WARNING'; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: tabledef03:%', buffer3; END IF; ++ EXECUTE buffer3; ++ -- issue#91 fix ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ buffer3 = 'ALTER TABLE IF EXISTS ' || quote_ident(dest_schema) || '.' || tblname || ' OWNER TO ' || tblowner; ++ lastsql = buffer3; ++ EXECUTE buffer3; ++ END IF; ++ ++ -- reset it back, only get these for inheritance-based tables ++ set client_min_messages = 'notice'; ++ END IF; ++ END IF; ++ -- Add table comment. ++ IF ocomment IS NOT NULL THEN ++ EXECUTE 'COMMENT ON TABLE ' || buffer || ' IS ' || quote_literal(ocomment); ++ END IF; ++ END IF; ++ ELSIF relknd = 'p' THEN ++ -- define parent table and assume child tables have already been created based on top level sort order. ++ -- Issue #103 Put the complex query into its own function, get_table_ddl_complex() ++ SELECT * INTO qry FROM public.get_table_ddl_complex(source_schema, dest_schema, tblname, sq_server_version_num); ++ IF bDebug THEN RAISE NOTICE 'DEBUG: tabledef04 - %', buffer; END IF; ++ ++ -- consider replacing complicated query above with this simple call to get_table_ddl()... ++ -- SELECT * INTO qry FROM public.get_table_ddl(quote_ident(source_schema), tblname, False); ++ -- qry := REPLACE(qry, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.'); ++ ++ IF bDDLOnly THEN ++ RAISE INFO '%', qry; ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ RAISE INFO 'ALTER TABLE IF EXISTS % OWNER TO %;', quote_ident(dest_schema) || '.' || quote_ident(tblname), tblowner; ++ END IF; ++ ELSE ++ -- Issue#103: we need to always set search_path priority to target schema when we execute DDL ++ IF bDebug THEN RAISE NOTICE 'DEBUG: tabledef04 context: old search path=% new search path=% current search path=%', src_path_old, src_path_new, v_dummy; END IF; ++ SELECT setting INTO spath_tmp FROM pg_settings WHERE name = 'search_path'; ++ IF spath_tmp <> dest_schema THEN ++ -- change it to target schema and don't forget to change it back after we execute the DDL ++ spath = 'SET search_path = "' || dest_schema || '"'; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: changing search_path --> %', spath; END IF; ++ EXECUTE spath; ++ SELECT setting INTO v_dummy FROM pg_settings WHERE name = 'search_path'; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: search_path changed to %', v_dummy; END IF; ++ END IF; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: tabledef04:%', qry; END IF; ++ EXECUTE qry; ++ ++ -- Issue#103 ++ -- Set search path back to what it was ++ spath = 'SET search_path = "' || spath_tmp || '"'; ++ EXECUTE spath; ++ SELECT setting INTO v_dummy FROM pg_settings WHERE name = 'search_path'; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: search_path changed back to %', v_dummy; END IF; ++ ++ -- issue#91 fix ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ buffer3 = 'ALTER TABLE IF EXISTS ' || quote_ident(dest_schema) || '.' || quote_ident(tblname) || ' OWNER TO ' || tblowner; ++ EXECUTE buffer3; ++ END IF; ++ ++ END IF; ++ -- loop for child tables and alter them to attach to parent for specific partition method. ++ -- Issue#103 fix: only loop for the table we are currently processing, tblname! ++ FOR aname, part_range, object IN ++ SELECT quote_ident(dest_schema) || '.' || c1.relname as tablename, pg_catalog.pg_get_expr(c1.relpartbound, c1.oid) as partrange, quote_ident(dest_schema) || '.' || c2.relname as object ++ FROM pg_catalog.pg_class c1, pg_namespace n, pg_catalog.pg_inherits i, pg_class c2 ++ WHERE n.nspname = quote_ident(source_schema) AND c1.relnamespace = n.oid AND c1.relkind = 'r' ++ -- Issue#103: added this condition to only work on current partitioned table. The problem was regression testing previously only worked on one partition table clone case ++ AND c2.relname = tblname AND ++ c1.relispartition AND c1.oid=i.inhrelid AND i.inhparent = c2.oid AND c2.relnamespace = n.oid ORDER BY pg_catalog.pg_get_expr(c1.relpartbound, c1.oid) = 'DEFAULT', ++ c1.oid::pg_catalog.regclass::pg_catalog.text ++ LOOP ++ qry := 'ALTER TABLE ONLY ' || object || ' ATTACH PARTITION ' || aname || ' ' || part_range || ';'; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: %',qry; END IF; ++ -- issue#91, not sure if we need to do this for child tables ++ -- issue#95 we dont set ownership here ++ IF bDDLOnly THEN ++ RAISE INFO '%', qry; ++ IF NOT bNoOwner THEN ++ NULL; ++ END IF; ++ ELSE ++ EXECUTE qry; ++ IF NOT bNoOwner THEN ++ NULL; ++ END IF; ++ END IF; ++ END LOOP; ++ END IF; ++ ++ -- INCLUDING ALL creates new index names, we restore them to the old name. ++ -- There should be no conflicts since they live in different schemas ++ FOR ix_old_name, ix_new_name IN ++ SELECT old.indexname, new.indexname ++ FROM pg_indexes old, pg_indexes new ++ WHERE old.schemaname = source_schema ++ AND new.schemaname = dest_schema ++ AND old.tablename = new.tablename ++ AND old.tablename = tblname ++ AND old.indexname <> new.indexname ++ AND regexp_replace(old.indexdef, E'.*USING','') = regexp_replace(new.indexdef, E'.*USING','') ++ ORDER BY old.indexdef, new.indexdef ++ LOOP ++ lastsql = ''; ++ IF bDDLOnly THEN ++ RAISE INFO '%', 'ALTER INDEX ' || quote_ident(dest_schema) || '.' || quote_ident(ix_new_name) || ' RENAME TO ' || quote_ident(ix_old_name) || ';'; ++ ELSE ++ -- The SELECT query above may return duplicate names when a column is ++ -- indexed twice the same manner with 2 different names. Therefore, to ++ -- avoid a 'relation "xxx" already exists' we test if the index name ++ -- is in use or free. Skipping existing index will fallback on unused ++ -- ones and every duplicate will be mapped to distinct old names. ++ IF NOT EXISTS ( ++ SELECT TRUE ++ FROM pg_indexes ++ WHERE schemaname = dest_schema ++ AND tablename = tblname ++ AND indexname = quote_ident(ix_old_name)) ++ AND EXISTS ( ++ SELECT TRUE ++ FROM pg_indexes ++ WHERE schemaname = dest_schema ++ AND tablename = tblname ++ AND indexname = quote_ident(ix_new_name)) ++ THEN ++ EXECUTE 'ALTER INDEX ' || quote_ident(dest_schema) || '.' || quote_ident(ix_new_name) || ' RENAME TO ' || quote_ident(ix_old_name) || ';'; ++ END IF; ++ END IF; ++ END LOOP; + ++ IF bData THEN ++ -- Insert records from source table ++ ++ -- 2021-03-03 MJV FIX ++ buffer := quote_ident(dest_schema) || '.' || quote_ident(tblname); ++ ++ -- Issue#86 fix: ++ -- IF data_type = 'USER-DEFINED' THEN ++ IF bDebug THEN RAISE NOTICE 'DEBUG: includerecs branch table=% data_type=% isgenerated=%', tblname, data_type, isGenerated; END IF; ++ IF data_type = 'USER-DEFINED' OR isGenerated = 'ALWAYS' THEN ++ ++ -- RAISE WARNING 'Bypassing copying rows for table (%) with user-defined data types. You must copy them manually.', tblname; ++ -- won't work --> INSERT INTO clone1.address (id2, id3, addr) SELECT cast(id2 as clone1.udt_myint), cast(id3 as clone1.udt_myint), addr FROM sample.address; ++ -- Issue#101 --> INSERT INTO clone1.address2 (id2, id3, addr) SELECT id2::text::clone1.udt_myint, id3::text::clone1.udt_myint, addr FROM sample.address; ++ ++ -- Issue#79 implementation follows ++ -- COPY sample.statuses(id, s) TO '/tmp/statuses.txt' WITH DELIMITER AS ','; ++ -- COPY sample_clone1.statuses FROM '/tmp/statuses.txt' (DELIMITER ',', NULL ''); ++ -- Issue#101 fix: use text cast to get around the problem. ++ IF bFileCopy THEN ++ IF bWindows THEN ++ buffer2 := 'COPY ' || quote_ident(source_schema) || '.' || quote_ident(tblname) || ' TO ''C:\WINDOWS\TEMP\cloneschema.tmp'' WITH DELIMITER AS '','';'; ++ tblarray2 := tblarray2 || buffer2; ++ -- Issue #81 reformat COPY command for upload ++ -- buffer2:= 'COPY ' || quote_ident(dest_schema) || '.' || quote_ident(tblname) || ' FROM ''C:\WINDOWS\TEMP\cloneschema.tmp'' (DELIMITER '','', NULL '''');'; ++ buffer2 := 'COPY ' || quote_ident(dest_schema) || '.' || quote_ident(tblname) || ' FROM ''C:\WINDOWS\TEMP\cloneschema.tmp'' (DELIMITER '','', NULL ''\N'', FORMAT CSV);'; ++ tblarray2 := tblarray2 || buffer2; ++ ELSE ++ buffer2 := 'COPY ' || quote_ident(source_schema) || '.' || quote_ident(tblname) || ' TO ''/tmp/cloneschema.tmp'' WITH DELIMITER AS '','';'; ++ tblarray2 := tblarray2 || buffer2; ++ -- Issue #81 reformat COPY command for upload ++ -- buffer2 := 'COPY ' || quote_ident(dest_schema) || '.' || quote_ident(tblname) || ' FROM ''/tmp/cloneschema.tmp'' (DELIMITER '','', NULL '''');'; ++ -- works--> COPY sample.timestamptbl2 FROM '/tmp/cloneschema.tmp' WITH (DELIMITER ',', NULL '\N', FORMAT CSV) ; ++ buffer2 := 'COPY ' || quote_ident(dest_schema) || '.' || quote_ident(tblname) || ' FROM ''/tmp/cloneschema.tmp'' (DELIMITER '','', NULL ''\N'', FORMAT CSV);'; ++ tblarray2 := tblarray2 || buffer2; ++ END IF; ++ ELSE ++ -- Issue#101: assume direct copy with text cast, add to separate array ++ SELECT * INTO buffer3 FROM public.get_insert_stmt_ddl(quote_ident(source_schema), quote_ident(dest_schema), quote_ident(tblname), True); ++ tblarray3 := tblarray3 || buffer3; ++ END IF; ++ ELSE ++ -- bypass child tables since we populate them when we populate the parents ++ IF bDebug THEN RAISE NOTICE 'DEBUG: tblname=% bRelispart=NA relknd=% l_child=% bChild=%', tblname, relknd, l_child, bChild; END IF; ++ ++ IF NOT bChild THEN ++ -- Issue#75: Must defer population of tables until child tables have been added to parents ++ -- Issue#101 Offer alternative of copy to/from file. Although originally intended for tables with UDTs, it is now expanded to handle all cases for performance improvement perhaps for large tables. ++ -- buffer2 := 'INSERT INTO ' || buffer || buffer3 || ' SELECT * FROM ' || quote_ident(source_schema) || '.' || quote_ident(tblname) || ';'; ++ buffer2 := 'INSERT INTO ' || buffer || ' SELECT * FROM ' || quote_ident(source_schema) || '.' || quote_ident(tblname) || ';'; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: buffer2=%',buffer2; END IF; ++ IF bFileCopy THEN ++ tblarray2:= tblarray2 || buffer2; ++ ELSE ++ tblarray := tblarray || buffer2; ++ END IF; ++ END IF; ++ END IF; ++ END IF; ++ ++ -- Issue#61 FIX: use set_config for empty string ++ -- SET search_path = ''; ++ SELECT set_config('search_path', '', false) into v_dummy; ++ ++ FOR column_, default_ IN ++ SELECT column_name::text, ++ REPLACE(column_default::text, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.') ++ FROM information_schema.COLUMNS ++ WHERE table_schema = source_schema ++ AND TABLE_NAME = tblname ++ AND column_default LIKE 'nextval(%' || quote_ident(source_schema) || '%::regclass)' ++ LOOP ++ -- Issue#78 FIX: handle case-sensitive names with quote_ident() on column name ++ buffer2 = 'ALTER TABLE ' || buffer || ' ALTER COLUMN ' || quote_ident(column_) || ' SET DEFAULT ' || default_ || ';'; ++ IF bDDLOnly THEN ++ -- May need to come back and revisit this since previous sql will not return anything since no schema as created! ++ RAISE INFO '%', buffer2; ++ ELSE ++ EXECUTE buffer2; ++ END IF; + END LOOP; +- RAISE NOTICE ' MAT VIEWS cloned: %', LPAD(cnt::text, 5, ' '); + +--- Create functions +- action := 'Functions'; ++ EXECUTE 'SET search_path = ' || quote_ident(source_schema) ; ++ END LOOP; ++ END IF; ++ -- end of 90600 branch ++ ++ RAISE NOTICE ' TABLES cloned: %', LPAD(cnt::text, 5, ' '); ++ ++ SELECT setting INTO v_dummy FROM pg_settings WHERE name = 'search_path'; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: search_path=%', v_dummy; END IF; ++ ++ -- Assigning sequences to table columns. ++ action := 'Sequences assigning'; + cnt := 0; +- FOR func_oid IN +- SELECT oid +- FROM pg_proc +- WHERE pronamespace = src_oid ++ FOR object IN ++ SELECT sequence_name::text ++ FROM information_schema.sequences ++ WHERE sequence_schema = quote_ident(source_schema) + LOOP + cnt := cnt + 1; +- SELECT pg_get_functiondef(func_oid) INTO qry; +- SELECT replace(qry, source_schema, dest_schema) INTO dest_qry; +- IF ddl_only THEN +- RAISE INFO '%', dest_qry; ++ srctbl := quote_ident(source_schema) || '.' || quote_ident(object); ++ ++ -- Get owning column, inspired from Sadique Ali post at: ++ -- https://sadique.io/blog/2019/05/07/viewing-sequence-ownership-information-in-postgres/ ++ -- Fixed via pull request#109 ++ SELECT ' OWNED BY ' ++ || quote_ident(dest_schema) ++ || '.' ++ || quote_ident(dc.relname) ++ || '.' ++ || quote_ident(a.attname) ++ INTO sq_owned ++ FROM pg_class AS c ++ JOIN pg_namespace n ON c.relnamespace = n.oid ++ JOIN pg_depend AS d ON c.relfilenode = d.objid ++ JOIN pg_class AS dc ON ( ++ d.refobjid = dc.relfilenode ++ AND dc.relnamespace = n.oid ++ ) ++ JOIN pg_attribute AS a ON ( ++ a.attnum = d.refobjsubid ++ AND a.attrelid = d.refobjid ++ ) ++ WHERE n.nspname = quote_ident(source_schema) ++ AND c.relkind = 'S' ++ AND c.relname = object; ++ ++ IF sq_owned IS NOT NULL THEN ++ qry := 'ALTER SEQUENCE ' ++ || quote_ident(dest_schema) ++ || '.' ++ || quote_ident(object) ++ || sq_owned ++ || ';'; ++ ++ IF bDDLOnly THEN ++ RAISE NOTICE 'DEBUG: %',qry; ++ RAISE INFO '%', qry; ++ ELSE ++ EXECUTE qry; ++ END IF; ++ ++ END IF; ++ ++ END LOOP; ++ RAISE NOTICE ' SEQUENCES set: %', LPAD(cnt::text, 5, ' '); ++ ++ -- Update IDENTITY sequences to the last value, bypass 9.6 versions ++ IF sq_server_version_num > 90624 THEN ++ action := 'Identity updating'; ++ cnt := 0; ++ FOR object, sq_last_value IN ++ SELECT sequencename::text, COALESCE(last_value, -999) from pg_sequences where schemaname = quote_ident(source_schema) ++ AND NOT EXISTS ++ (select 1 from information_schema.sequences where sequence_schema = quote_ident(source_schema) and sequence_name = sequencename) ++ LOOP ++ IF sq_last_value = -999 THEN ++ continue; ++ END IF; ++ cnt := cnt + 1; ++ buffer := quote_ident(dest_schema) || '.' || quote_ident(object); ++ IF bData THEN ++ EXECUTE 'SELECT setval( ''' || buffer || ''', ' || sq_last_value || ', ' || sq_is_called || ');' ; ++ ELSE ++ if bDDLOnly THEN ++ -- fix#63 ++ RAISE INFO '%', 'SELECT setval( ''' || buffer || ''', ' || sq_last_value || ', ' || sq_is_called || ');' ; ++ ELSE ++ -- fix#63 ++ EXECUTE 'SELECT setval( ''' || buffer || ''', ' || sq_last_value || ', ' || sq_is_called || ');' ; ++ END IF; ++ END IF; ++ END LOOP; ++ -- Fixed Issue#107: set lpad from 2 to 5 ++ RAISE NOTICE ' IDENTITIES set: %', LPAD(cnt::text, 5, ' '); ++ ELSE ++ -- Fixed Issue#107: set lpad from 2 to 5 ++ RAISE NOTICE ' IDENTITIES set: %', LPAD('-1'::text, 5, ' '); ++ END IF; ++ ++ -- Issue#78 forces us to defer FKeys until the end since we previously did row copies before FKeys ++ -- add FK constraint ++ -- action := 'FK Constraints'; ++ ++ -- Issue#62: Add comments on indexes, and then removed them from here and reworked later below. ++ ++ -- Issue 90: moved functions to here, before views or MVs that might use them ++ -- Create functions ++ action := 'Functions'; ++ cnt := 0; ++ -- MJV FIX per issue# 34 ++ -- SET search_path = ''; ++ EXECUTE 'SET search_path = ' || quote_ident(source_schema) ; ++ ++ -- Fixed Issue#65 ++ -- Fixed Issue#97 ++ -- FOR func_oid IN SELECT oid FROM pg_proc WHERE pronamespace = src_oid AND prokind != 'a' ++ IF is_prokind THEN ++ FOR func_oid, func_owner, func_name, func_args, func_argno, buffer3 IN ++ SELECT p.oid, pg_catalog.pg_get_userbyid(p.proowner), p.proname, oidvectortypes(p.proargtypes), p.pronargs, ++ CASE WHEN prokind = 'p' THEN 'PROCEDURE' WHEN prokind = 'f' THEN 'FUNCTION' ELSE '' END ++ FROM pg_proc p WHERE p.pronamespace = src_oid AND p.prokind != 'a' ++ LOOP ++ cnt := cnt + 1; ++ SELECT pg_get_functiondef(func_oid) ++ INTO qry; ++ ++ SELECT replace(qry, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.') INTO dest_qry; ++ IF bDDLOnly THEN ++ RAISE INFO '%;', dest_qry; ++ -- Issue#91 Fix ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ IF func_argno = 0 THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ RAISE INFO 'ALTER % %() OWNER TO %', buffer3, quote_ident(dest_schema) || '.' || quote_ident(func_name), '"' || func_owner || '";'; ++ ELSE ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ RAISE INFO 'ALTER % % OWNER TO %', buffer3, quote_ident(dest_schema) || '.' || quote_ident(func_name) || '(' || func_args || ')', '"' || func_owner || '";'; ++ END IF; ++ END IF; ++ ELSE ++ IF bDebug THEN RAISE NOTICE 'DEBUG: %', dest_qry; END IF; ++ EXECUTE dest_qry; ++ ++ -- Issue#91 Fix ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ IF func_argno = 0 THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ dest_qry = 'ALTER ' || buffer3 || ' ' || quote_ident(dest_schema) || '.' || quote_ident(func_name) || '() OWNER TO ' || '"' || func_owner || '";'; ++ ELSE ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ dest_qry = 'ALTER ' || buffer3 || ' ' || quote_ident(dest_schema) || '.' || quote_ident(func_name) || '(' || func_args || ') OWNER TO ' || '"' || func_owner || '";'; ++ END IF; ++ END IF; ++ EXECUTE dest_qry; ++ END IF; ++ END LOOP; ++ ELSE ++ FOR func_oid IN SELECT oid ++ FROM pg_proc ++ WHERE pronamespace = src_oid AND not proisagg ++ LOOP ++ cnt := cnt + 1; ++ SELECT pg_get_functiondef(func_oid) INTO qry; ++ SELECT replace(qry, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.') INTO dest_qry; ++ IF bDDLOnly THEN ++ RAISE INFO '%;', dest_qry; ++ ELSE ++ EXECUTE dest_qry; ++ END IF; ++ END LOOP; ++ END IF; ++ ++ -- Create aggregate functions. ++ -- Fixed Issue#65 ++ -- FOR func_oid IN SELECT oid FROM pg_proc WHERE pronamespace = src_oid AND prokind = 'a' ++ IF is_prokind THEN ++ FOR func_oid IN ++ SELECT oid ++ FROM pg_proc ++ WHERE pronamespace = src_oid AND prokind = 'a' ++ LOOP ++ cnt := cnt + 1; ++ SELECT ++ 'CREATE AGGREGATE ' ++ || dest_schema ++ || '.' ++ || p.proname ++ || '(' ++ -- || format_type(a.aggtranstype, NULL) ++ -- Issue#65 Fixes for specific datatype mappings ++ || CASE WHEN format_type(a.aggtranstype, NULL) = 'double precision[]' THEN 'float8' ++ WHEN format_type(a.aggtranstype, NULL) = 'anyarray' THEN 'anyelement' ++ ELSE format_type(a.aggtranstype, NULL) END ++ || ') (sfunc = ' ++ || regexp_replace(a.aggtransfn::text, '(^|\W)' || quote_ident(source_schema) || '\.', '\1' || quote_ident(dest_schema) || '.') ++ || ', stype = ' ++ -- || format_type(a.aggtranstype, NULL) ++ -- Issue#65 Fixes for specific datatype mappings ++ || CASE WHEN format_type(a.aggtranstype, NULL) = 'double precision[]' THEN 'float8[]' ELSE format_type(a.aggtranstype, NULL) END ++ || CASE ++ WHEN op.oprname IS NULL THEN '' ++ ELSE ', sortop = ' || op.oprname ++ END ++ || CASE ++ WHEN a.agginitval IS NULL THEN '' ++ ELSE ', initcond = ''' || a.agginitval || '''' ++ END ++ || ')' ++ INTO dest_qry ++ FROM pg_proc p ++ JOIN pg_aggregate a ON a.aggfnoid = p.oid ++ LEFT JOIN pg_operator op ON op.oid = a.aggsortop ++ WHERE p.oid = func_oid; ++ ++ IF bDDLOnly THEN ++ RAISE INFO '%;', dest_qry; ++ ELSE ++ EXECUTE dest_qry; ++ END IF; ++ ++ END LOOP; ++ RAISE NOTICE ' FUNCTIONS cloned: %', LPAD(cnt::text, 5, ' '); ++ + ELSE +- EXECUTE dest_qry; ++ FOR func_oid IN SELECT oid FROM pg_proc WHERE pronamespace = src_oid AND proisagg ++ LOOP ++ cnt := cnt + 1; ++ SELECT ++ 'CREATE AGGREGATE ' ++ || dest_schema ++ || '.' ++ || p.proname ++ || '(' ++ -- || format_type(a.aggtranstype, NULL) ++ -- Issue#65 Fixes for specific datatype mappings ++ || CASE WHEN format_type(a.aggtranstype, NULL) = 'double precision[]' THEN 'float8' ++ WHEN format_type(a.aggtranstype, NULL) = 'anyarray' THEN 'anyelement' ++ ELSE format_type(a.aggtranstype, NULL) END ++ || ') (sfunc = ' ++ || regexp_replace(a.aggtransfn::text, '(^|\W)' || quote_ident(source_schema) || '\.', '\1' || quote_ident(dest_schema) || '.') ++ || ', stype = ' ++ -- || format_type(a.aggtranstype, NULL) ++ -- Issue#65 Fixes for specific datatype mappings ++ || CASE WHEN format_type(a.aggtranstype, NULL) = 'double precision[]' THEN 'float8[]' ELSE format_type(a.aggtranstype, NULL) END ++ || CASE ++ WHEN op.oprname IS NULL THEN '' ++ ELSE ', sortop = ' || op.oprname ++ END ++ || CASE ++ WHEN a.agginitval IS NULL THEN '' ++ ELSE ', initcond = ''' || a.agginitval || '''' ++ END ++ || ')' ++ INTO dest_qry ++ FROM pg_proc p ++ JOIN pg_aggregate a ON a.aggfnoid = p.oid ++ LEFT JOIN pg_operator op ON op.oid = a.aggsortop ++ WHERE p.oid = func_oid; ++ ++ IF bDDLOnly THEN ++ RAISE INFO '%;', dest_qry; ++ ELSE ++ EXECUTE dest_qry; ++ END IF; ++ ++ END LOOP; ++ RAISE NOTICE ' FUNCTIONS cloned: %', LPAD(cnt::text, 5, ' '); + END IF; + ++ -- Create views ++ action := 'Views'; ++ ++ -- Issue#61 FIX: use set_config for empty string ++ -- MJV FIX #43: also had to reset search_path from source schema to empty. ++ -- SET search_path = ''; ++ SELECT set_config('search_path', '', false) ++ INTO v_dummy; ++ ++ cnt := 0; ++ --FOR object IN ++ -- SELECT table_name::text, view_definition ++ -- FROM information_schema.views ++ -- WHERE table_schema = quote_ident(source_schema) ++ ++ -- Issue#73 replace loop query to handle dependencies ++ -- Issue#91 get view_owner ++ FOR srctbl, aname, view_owner, object IN ++ WITH RECURSIVE views AS ( ++ SELECT n.nspname as schemaname, v.relname as tablename, v.oid::regclass AS viewname, ++ v.relkind = 'm' AS is_materialized, pg_catalog.pg_get_userbyid(v.relowner) as owner, ++ 1 AS level ++ FROM pg_depend AS d ++ JOIN pg_rewrite AS r ++ ON r.oid = d.objid ++ JOIN pg_class AS v ++ ON v.oid = r.ev_class ++ JOIN pg_namespace n ++ ON n.oid = v.relnamespace ++ -- WHERE v.relkind IN ('v', 'm') ++ WHERE v.relkind IN ('v') ++ AND d.classid = 'pg_rewrite'::regclass ++ AND d.refclassid = 'pg_class'::regclass ++ AND d.deptype = 'n' ++ UNION ++ -- add the views that depend on these ++ SELECT n.nspname as schemaname, v.relname as tablename, v.oid::regclass AS viewname, ++ v.relkind = 'm', pg_catalog.pg_get_userbyid(v.relowner) as owner, ++ views.level + 1 ++ FROM views ++ JOIN pg_depend AS d ++ ON d.refobjid = views.viewname ++ JOIN pg_rewrite AS r ++ ON r.oid = d.objid ++ JOIN pg_class AS v ++ ON v.oid = r.ev_class ++ JOIN pg_namespace n ++ ON n.oid = v.relnamespace ++ -- WHERE v.relkind IN ('v', 'm') ++ WHERE v.relkind IN ('v') ++ AND d.classid = 'pg_rewrite'::regclass ++ AND d.refclassid = 'pg_class'::regclass ++ AND d.deptype = 'n' ++ AND v.oid <> views.viewname ++ ) ++ SELECT tablename, viewname, owner, format('CREATE OR REPLACE%s VIEW %s AS%s', ++ CASE WHEN is_materialized ++ THEN ' MATERIALIZED' ++ ELSE '' ++ END, ++ viewname, ++ pg_get_viewdef(viewname)) ++ FROM views ++ WHERE schemaname = quote_ident(source_schema) ++ GROUP BY schemaname, tablename, viewname, owner, is_materialized ++ ORDER BY max(level), schemaname, tablename ++ LOOP ++ cnt := cnt + 1; ++ -- Issue#73 replace logic based on new loop sql ++ buffer := quote_ident(dest_schema) || '.' || quote_ident(aname); ++ -- MJV FIX: #43 ++ -- SELECT view_definition INTO v_def ++ -- SELECT REPLACE(view_definition, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.') INTO v_def ++ -- FROM information_schema.views ++ -- WHERE table_schema = quote_ident(source_schema) ++ -- AND table_name = quote_ident(object); ++ SELECT REPLACE(object, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.') INTO v_def; ++ -- NOTE: definition already includes the closing statement semicolon ++ SELECT REPLACE(aname, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.') INTO buffer3; ++ IF bDDLOnly THEN ++ RAISE INFO '%', v_def; ++ -- Issue#91 Fix ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ -- RAISE INFO 'ALTER TABLE % OWNER TO %', buffer3, view_owner || ';'; ++ RAISE INFO 'ALTER TABLE % OWNER TO %', buffer3, '"' ||view_owner || '";'; ++ END IF; ++ ELSE ++ -- EXECUTE 'CREATE OR REPLACE VIEW ' || buffer || ' AS ' || v_def; ++ EXECUTE v_def; ++ -- Issue#73: commented out comment logic for views since we do it elsewhere now. ++ -- Issue#91 Fix ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ v_def = 'ALTER TABLE ' || buffer3 || ' OWNER TO ' || '"' || view_owner || '";'; ++ EXECUTE v_def; ++ END IF; ++ END IF; + END LOOP; +- RAISE NOTICE ' FUNCTIONS cloned: %', LPAD(cnt::text, 5, ' '); ++ RAISE NOTICE ' VIEWS cloned: %', LPAD(cnt::text, 5, ' '); ++ ++ -- Create Materialized views ++ action := 'Mat. Views'; ++ cnt := 0; ++ -- Issue#91 get view_owner ++ FOR object, view_owner, v_def IN ++ SELECT matviewname::text, '"' || matviewowner::text || '"', replace(definition,';','') FROM pg_catalog.pg_matviews WHERE schemaname = quote_ident(source_schema) ++ LOOP ++ cnt := cnt + 1; ++ -- Issue#78 FIX: handle case-sensitive names with quote_ident() on target schema and object ++ buffer := quote_ident(dest_schema) || '.' || quote_ident(object); ++ ++ -- MJV FIX: #72 remove source schema in MV def ++ SELECT REPLACE(v_def, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.') INTO buffer2; ++ ++ IF bData THEN ++ -- issue#98 defer creation until after regular tables are populated. Also defer the ownership as well. ++ -- EXECUTE 'CREATE MATERIALIZED VIEW ' || buffer || ' AS ' || buffer2 || ' WITH DATA;' ; ++ buffer3 = 'CREATE MATERIALIZED VIEW ' || buffer || ' AS ' || buffer2 || ' WITH DATA;'; ++ mvarray := mvarray || buffer3; ++ ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ -- buffer3 = 'ALTER MATERIALIZED VIEW ' || buffer || ' OWNER TO ' || view_owner || ';' ; ++ -- EXECUTE buffer3; ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ buffer3 = 'ALTER MATERIALIZED VIEW ' || buffer || ' OWNER TO ' || view_owner || ';' ; ++ mvarray := mvarray || buffer3; ++ END IF; ++ ELSE ++ IF bDDLOnly THEN ++ RAISE INFO '%', 'CREATE MATERIALIZED VIEW ' || buffer || ' AS ' || buffer2 || ' WITH NO DATA;' ; ++ -- Issue#91 ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ RAISE INFO '%', 'ALTER MATERIALIZED VIEW ' || buffer || ' OWNER TO ' || view_owner || ';' ; ++ END IF; ++ ELSE ++ EXECUTE 'CREATE MATERIALIZED VIEW ' || buffer || ' AS ' || buffer2 || ' WITH NO DATA;' ; ++ -- Issue#91 ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ buffer3 = 'ALTER MATERIALIZED VIEW ' || buffer || ' OWNER TO ' || view_owner || ';' ; ++ EXECUTE buffer3; ++ END IF; ++ END IF; ++ END IF; ++ SELECT coalesce(obj_description(oid), '') into adef from pg_class where relkind = 'm' and relname = object; ++ IF adef <> '' THEN ++ IF bDDLOnly THEN ++ RAISE INFO '%', 'COMMENT ON MATERIALIZED VIEW ' || quote_ident(dest_schema) || '.' || object || ' IS ''' || adef || ''';'; ++ ELSE ++ -- Issue#$98: also defer if copy rows is on since we defer MVIEWS in that case ++ IF bData THEN ++ buffer3 = 'COMMENT ON MATERIALIZED VIEW ' || quote_ident(dest_schema) || '.' || object || ' IS ''' || adef || ''';'; ++ mvarray = mvarray || buffer3; ++ ELSE ++ EXECUTE 'COMMENT ON MATERIALIZED VIEW ' || quote_ident(dest_schema) || '.' || object || ' IS ''' || adef || ''';'; ++ END IF; ++ ++ END IF; ++ END IF; ++ ++ FOR aname, adef IN ++ SELECT indexname, replace(indexdef, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.') as newdef FROM pg_indexes where schemaname = quote_ident(source_schema) and tablename = object order by indexname ++ LOOP ++ IF bDDLOnly THEN ++ RAISE INFO '%', adef || ';'; ++ ELSE ++ EXECUTE adef || ';'; ++ END IF; ++ END LOOP; ++ ++ END LOOP; ++ RAISE NOTICE ' MAT VIEWS cloned: %', LPAD(cnt::text, 5, ' '); ++ ++ -- Issue 90 Move create functions to before views + + -- MV: Create Triggers ++ ++ -- MJV FIX: #38 ++ -- EXECUTE 'SET search_path = ' || quote_ident(source_schema) ; ++ ++ -- Issue#61 FIX: use set_config for empty string ++ -- SET search_path = ''; ++ SELECT set_config('search_path', '', false) into v_dummy; ++ + action := 'Triggers'; + cnt := 0; + FOR arec IN +- SELECT trigger_schema, trigger_name, event_object_table, action_order, action_condition, action_statement, action_orientation, action_timing, array_to_string(array_agg(event_manipulation::text), ' OR '), +- 'CREATE TRIGGER ' || trigger_name || ' ' || action_timing || ' ' || array_to_string(array_agg(event_manipulation::text), ' OR ') || ' ON ' || quote_ident(dest_schema) || '.' || event_object_table || +- ' FOR EACH ' || action_orientation || ' ' || action_statement || ';' as TRIG_DDL +- FROM information_schema.triggers where trigger_schema = quote_ident(source_schema) GROUP BY 1,2,3,4,5,6,7,8 ++ -- 2021-03-09 MJV FIX: #40 fixed sql to get the def using pg_get_triggerdef() sql ++ SELECT n.nspname, c.relname, t.tgname, p.proname, REPLACE(pg_get_triggerdef(t.oid), quote_ident(source_schema), quote_ident(dest_schema)) || ';' AS trig_ddl ++ FROM pg_trigger t, pg_class c, pg_namespace n, pg_proc p ++ WHERE n.nspname = quote_ident(source_schema) ++ AND n.oid = c.relnamespace ++ AND c.relkind in ('r','p') ++ AND n.oid = p.pronamespace ++ AND c.oid = t.tgrelid ++ AND p.oid = t.tgfoid ++ ORDER BY c.relname, t.tgname + LOOP + BEGIN + cnt := cnt + 1; +- IF ddl_only THEN ++ IF bDDLOnly THEN + RAISE INFO '%', arec.trig_ddl; + ELSE + EXECUTE arec.trig_ddl; +@@ -474,55 +2444,383 @@ + END LOOP; + RAISE NOTICE ' TRIGGERS cloned: %', LPAD(cnt::text, 5, ' '); + +- -- --------------------- +- -- MV: Permissions: Defaults +- -- --------------------- +- action := 'PRIVS: Defaults'; ++ ++ -- MV: Create Rules ++ -- Fixes Issue#59 Implement Rules ++ action := 'Rules'; + cnt := 0; + FOR arec IN +- SELECT pg_catalog.pg_get_userbyid(d.defaclrole) AS "owner", n.nspname AS schema, +- CASE d.defaclobjtype WHEN 'r' THEN 'table' WHEN 'S' THEN 'sequence' WHEN 'f' THEN 'function' WHEN 'T' THEN 'type' WHEN 'n' THEN 'schema' END AS atype, +- d.defaclacl as defaclacl, pg_catalog.array_to_string(d.defaclacl, ',') as defaclstr +- FROM pg_catalog.pg_default_acl d LEFT JOIN pg_catalog.pg_namespace n ON (n.oid = d.defaclnamespace) WHERE n.nspname IS NOT NULL and n.nspname = quote_ident(source_schema) ORDER BY 3, 2, 1 ++ SELECT regexp_replace(definition, E'[\\n\\r]+', ' ', 'g' ) as definition ++ FROM pg_rules ++ WHERE schemaname = quote_ident(source_schema) + LOOP +- BEGIN +- -- RAISE NOTICE 'owner=% type=% defaclacl=% defaclstr=%', arec.owner, arec.atype, arec.defaclacl, arec.defaclstr; ++ cnt := cnt + 1; ++ buffer := REPLACE(arec.definition, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.'); ++ IF bDDLOnly THEN ++ RAISE INFO '%', buffer; ++ ELSE ++ EXECUTE buffer; ++ END IF; ++ END LOOP; ++ RAISE NOTICE ' RULES cloned: %', LPAD(cnt::text, 5, ' '); ++ ++ ++ -- MV: Create Policies ++ -- Fixes Issue#66 Implement Security policies for RLS ++ action := 'Policies'; ++ cnt := 0; ++ -- #106 Handle 9.6 which doesn't have "permissive" ++ IF sq_server_version_num > 90624 THEN ++ FOR arec IN ++ -- Issue#78 FIX: handle case-sensitive names with quote_ident() on policy, tablename ++ SELECT schemaname as schemaname, tablename as tablename, 'CREATE POLICY ' || policyname || ' ON ' || quote_ident(dest_schema) || '.' || quote_ident(tablename) || ' AS ' || permissive || ' FOR ' || cmd || ' TO ' ++ || array_to_string(roles, ',', '*') || ' USING (' || regexp_replace(qual, E'[\\n\\r]+', ' ', 'g' ) || ')' ++ || CASE WHEN with_check IS NOT NULL THEN ' WITH CHECK (' ELSE '' END || coalesce(with_check, '') || CASE WHEN with_check IS NOT NULL THEN ');' ELSE ';' END as definition ++ FROM pg_policies ++ WHERE schemaname = quote_ident(source_schema) ++ ORDER BY policyname ++ LOOP ++ cnt := cnt + 1; ++ IF bDDLOnly THEN ++ RAISE INFO '%', arec.definition; ++ ELSE ++ EXECUTE arec.definition; ++ END IF; ++ ++ -- Issue#76: Enable row security if indicated ++ SELECT c.relrowsecurity INTO abool FROM pg_class c, pg_namespace n where n.nspname = quote_ident(arec.schemaname) AND n.oid = c.relnamespace AND c.relname = quote_ident(arec.tablename) and c.relkind = 'r'; ++ IF abool THEN ++ buffer = 'ALTER TABLE ' || quote_ident(dest_schema) || '.' || arec.tablename || ' ENABLE ROW LEVEL SECURITY;'; ++ IF bDDLOnly THEN ++ RAISE INFO '%', buffer; ++ ELSE ++ EXECUTE buffer; ++ END IF; ++ END IF; ++ END LOOP; ++ ELSE ++ -- handle 9.6 versions ++ FOR arec IN ++ -- Issue#78 FIX: handle case-sensitive names with quote_ident() on policy, tablename ++ SELECT schemaname as schemaname, tablename as tablename, 'CREATE POLICY ' || policyname || ' ON ' || quote_ident(dest_schema) || '.' || quote_ident(tablename) || ' FOR ' || cmd || ' TO ' ++ || array_to_string(roles, ',', '*') || ' USING (' || regexp_replace(qual, E'[\\n\\r]+', ' ', 'g' ) || ')' ++ || CASE WHEN with_check IS NOT NULL THEN ' WITH CHECK (' ELSE '' END || coalesce(with_check, '') || CASE WHEN with_check IS NOT NULL THEN ');' ELSE ';' END as definition ++ FROM pg_policies ++ WHERE schemaname = quote_ident(source_schema) ++ ORDER BY policyname ++ LOOP ++ cnt := cnt + 1; ++ IF bDDLOnly THEN ++ RAISE INFO '%', arec.definition; ++ ELSE ++ EXECUTE arec.definition; ++ END IF; ++ ++ -- Issue#76: Enable row security if indicated ++ SELECT c.relrowsecurity INTO abool FROM pg_class c, pg_namespace n where n.nspname = quote_ident(arec.schemaname) AND n.oid = c.relnamespace AND c.relname = quote_ident(arec.tablename) and c.relkind = 'r'; ++ IF abool THEN ++ buffer = 'ALTER TABLE ' || quote_ident(dest_schema) || '.' || arec.tablename || ' ENABLE ROW LEVEL SECURITY;'; ++ IF bDDLOnly THEN ++ RAISE INFO '%', buffer; ++ ELSE ++ EXECUTE buffer; ++ END IF; ++ END IF; ++ END LOOP; ++ END IF; ++ RAISE NOTICE ' POLICIES cloned: %', LPAD(cnt::text, 5, ' '); ++ ++ ++ -- MJV Fixed #62 for comments (PASS 1) ++ action := 'Comments1'; ++ cnt := 0; ++ FOR qry IN ++ -- Issue#74 Fix: Change schema from source to target. Also, do not include comments on foreign tables since we do not clone foreign tables at this time. ++ SELECT 'COMMENT ON ' || CASE WHEN c.relkind in ('r','p') AND a.attname IS NULL THEN 'TABLE ' WHEN c.relkind in ('r','p') AND ++ a.attname IS NOT NULL THEN 'COLUMN ' WHEN c.relkind = 'f' THEN 'FOREIGN TABLE ' WHEN c.relkind = 'm' THEN 'MATERIALIZED VIEW ' WHEN c.relkind = 'v' THEN 'VIEW ' ++ WHEN c.relkind = 'i' THEN 'INDEX ' WHEN c.relkind = 'S' THEN 'SEQUENCE ' ELSE 'XX' END || quote_ident(dest_schema) || '.' || CASE WHEN c.relkind in ('r','p') AND ++ -- Issue#78: handle case-sensitive names with quote_ident() ++ a.attname IS NOT NULL THEN quote_ident(c.relname) || '.' || a.attname ELSE quote_ident(c.relname) END || ++ -- Issue#74 Fix ++ -- ' IS ''' || d.description || ''';' as ddl ++ ' IS ' || quote_literal(d.description) || ';' as ddl ++ FROM pg_class c ++ JOIN pg_namespace n ON (n.oid = c.relnamespace) ++ LEFT JOIN pg_description d ON (c.oid = d.objoid) ++ LEFT JOIN pg_attribute a ON (c.oid = a.attrelid ++ AND a.attnum > 0 and a.attnum = d.objsubid) ++ WHERE c.relkind <> 'f' AND d.description IS NOT NULL AND n.nspname = quote_ident(source_schema) ++ ORDER BY ddl ++ LOOP ++ cnt := cnt + 1; ++ ++ -- BAD : "COMMENT ON SEQUENCE sample_clone2.CaseSensitive_ID_seq IS 'just a comment on CaseSensitive sequence';" ++ -- GOOD: "COMMENT ON SEQUENCE "CaseSensitive_ID_seq" IS 'just a comment on CaseSensitive sequence';" ++ ++ -- Issue#98 For MVs we create comments when we create the MVs ++ IF substring(qry,1,28) = 'COMMENT ON MATERIALIZED VIEW' THEN ++ IF bDebug THEN RAISE NOTICE 'DEBUG: deferring comments on MVs'; END IF; ++ cnt = cnt - 1; ++ continue; ++ END IF; ++ ++ IF bDDLOnly THEN ++ RAISE INFO '%', qry; ++ ELSE ++ EXECUTE qry; ++ END IF; ++ END LOOP; ++ RAISE NOTICE ' COMMENTS(1) cloned: %', LPAD(cnt::text, 5, ' '); ++ ++ -- MJV Fixed #62 for comments (PASS 2) ++ action := 'Comments2'; ++ cnt2 := 0; ++ IF is_prokind THEN ++ FOR qry IN ++ -- Issue#74 Fix: Change schema from source to target. ++ SELECT 'COMMENT ON SCHEMA ' || quote_ident(dest_schema) || ++ -- Issue#74 Fix ++ -- ' IS ''' || d.description || ''';' as ddl ++ ' IS ' || quote_literal(d.description) || ';' as ddl ++ from pg_namespace n, pg_description d where d.objoid = n.oid and n.nspname = quote_ident(source_schema) ++ UNION ++ -- Issue#74 Fix: need to replace source schema inline ++ -- SELECT 'COMMENT ON TYPE ' || pg_catalog.format_type(t.oid, NULL) || ' IS ''' || pg_catalog.obj_description(t.oid, 'pg_type') || ''';' as ddl ++ SELECT 'COMMENT ON TYPE ' || REPLACE(pg_catalog.format_type(t.oid, NULL), quote_ident(source_schema), quote_ident(dest_schema)) || ' IS ''' || pg_catalog.obj_description(t.oid, 'pg_type') || ''';' as ddl ++ FROM pg_catalog.pg_type t ++ JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace ++ WHERE (t.typrelid = 0 OR (SELECT c.relkind = 'c' FROM pg_catalog.pg_class c WHERE c.oid = t.typrelid)) ++ AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type el WHERE el.oid = t.typelem AND el.typarray = t.oid) ++ AND n.nspname = quote_ident(source_schema) COLLATE pg_catalog.default ++ AND pg_catalog.obj_description(t.oid, 'pg_type') IS NOT NULL and t.typtype = 'c' ++ UNION ++ -- Issue#78: handle case-sensitive names with quote_ident() ++ SELECT 'COMMENT ON COLLATION ' || quote_ident(dest_schema) || '.' || quote_ident(c.collname) || ' IS ''' || pg_catalog.obj_description(c.oid, 'pg_collation') || ''';' as ddl ++ FROM pg_catalog.pg_collation c, pg_catalog.pg_namespace n ++ WHERE n.oid = c.collnamespace AND c.collencoding IN (-1, pg_catalog.pg_char_to_encoding(pg_catalog.getdatabaseencoding())) ++ AND n.nspname = quote_ident(source_schema) COLLATE pg_catalog.default AND pg_catalog.obj_description(c.oid, 'pg_collation') IS NOT NULL ++ UNION ++ SELECT 'COMMENT ON ' || CASE WHEN p.prokind = 'f' THEN 'FUNCTION ' WHEN p.prokind = 'p' THEN 'PROCEDURE ' WHEN p.prokind = 'a' THEN 'AGGREGATE ' END || ++ quote_ident(dest_schema) || '.' || p.proname || ' (' || oidvectortypes(p.proargtypes) || ')' ++ -- Issue#74 Fix ++ -- ' IS ''' || d.description || ''';' as ddl ++ ' IS ' || quote_literal(d.description) || ';' as ddl ++ FROM pg_catalog.pg_namespace n ++ JOIN pg_catalog.pg_proc p ON p.pronamespace = n.oid ++ JOIN pg_description d ON (d.objoid = p.oid) ++ WHERE n.nspname = quote_ident(source_schema) ++ UNION ++ SELECT 'COMMENT ON POLICY ' || p1.policyname || ' ON ' || quote_ident(dest_schema) || '.' || p1.tablename || ++ -- Issue#74 Fix ++ -- ' IS ''' || d.description || ''';' as ddl ++ ' IS ' || quote_literal(d.description) || ';' as ddl ++ FROM pg_policies p1, pg_policy p2, pg_class c, pg_namespace n, pg_description d ++ WHERE p1.schemaname = n.nspname AND p1.tablename = c.relname AND n.oid = c.relnamespace ++ AND c.relkind in ('r','p') AND p1.policyname = p2.polname AND d.objoid = p2.oid AND p1.schemaname = quote_ident(source_schema) ++ UNION ++ SELECT 'COMMENT ON DOMAIN ' || quote_ident(dest_schema) || '.' || t.typname || ++ -- Issue#74 Fix ++ -- ' IS ''' || d.description || ''';' as ddl ++ ' IS ' || quote_literal(d.description) || ';' as ddl ++ FROM pg_catalog.pg_type t ++ LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace ++ JOIN pg_catalog.pg_description d ON d.classoid = t.tableoid AND d.objoid = t.oid AND d.objsubid = 0 ++ WHERE t.typtype = 'd' AND n.nspname = quote_ident(source_schema) COLLATE pg_catalog.default ++ ORDER BY 1 ++ LOOP ++ cnt2 := cnt2 + 1; ++ IF bDDLOnly THEN ++ RAISE INFO '%', qry; ++ ELSE ++ EXECUTE qry; ++ END IF; ++ END LOOP; ++ ELSE -- must be v 10 or less ++ FOR qry IN ++ -- Issue#74 Fix: Change schema from source to target. ++ SELECT 'COMMENT ON SCHEMA ' || quote_ident(dest_schema) || ++ -- Issue#74 Fix ++ -- ' IS ''' || d.description || ''';' as ddl ++ ' IS ' || quote_literal(d.description) || ';' as ddl ++ from pg_namespace n, pg_description d where d.objoid = n.oid and n.nspname = quote_ident(source_schema) ++ UNION ++ -- Issue#74 Fix: need to replace source schema inline ++ -- SELECT 'COMMENT ON TYPE ' || pg_catalog.format_type(t.oid, NULL) || ' IS ''' || pg_catalog.obj_description(t.oid, 'pg_type') || ''';' as ddl ++ SELECT 'COMMENT ON TYPE ' || REPLACE(pg_catalog.format_type(t.oid, NULL), quote_ident(source_schema), quote_ident(dest_schema)) || ' IS ''' || pg_catalog.obj_description(t.oid, 'pg_type') || ''';' as ddl ++ FROM pg_catalog.pg_type t ++ JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace ++ WHERE (t.typrelid = 0 OR (SELECT c.relkind = 'c' ++ FROM pg_catalog.pg_class c ++ WHERE c.oid = t.typrelid)) ++ AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type el ++ WHERE el.oid = t.typelem AND el.typarray = t.oid) ++ AND n.nspname = quote_ident(source_schema) COLLATE pg_catalog.default ++ AND pg_catalog.obj_description(t.oid, 'pg_type') IS NOT NULL and t.typtype = 'c' ++ UNION ++ -- FIX Issue#87 by adding double quotes around collation name ++ SELECT 'COMMENT ON COLLATION ' || quote_ident(dest_schema) || '."' || c.collname || '" IS ''' || pg_catalog.obj_description(c.oid, 'pg_collation') || ''';' as ddl ++ FROM pg_catalog.pg_collation c, pg_catalog.pg_namespace n ++ WHERE n.oid = c.collnamespace AND c.collencoding IN (-1, pg_catalog.pg_char_to_encoding(pg_catalog.getdatabaseencoding())) ++ AND n.nspname = quote_ident(source_schema) COLLATE pg_catalog.default AND pg_catalog.obj_description(c.oid, 'pg_collation') IS NOT NULL ++ UNION ++ SELECT 'COMMENT ON ' || CASE WHEN proisagg THEN 'AGGREGATE ' ELSE 'FUNCTION ' END || ++ quote_ident(dest_schema) || '.' || p.proname || ' (' || oidvectortypes(p.proargtypes) || ')' ++ -- Issue#74 Fix ++ -- ' IS ''' || d.description || ''';' as ddl ++ ' IS ' || quote_literal(d.description) || ';' as ddl ++ FROM pg_catalog.pg_namespace n ++ JOIN pg_catalog.pg_proc p ON p.pronamespace = n.oid ++ JOIN pg_description d ON (d.objoid = p.oid) ++ WHERE n.nspname = quote_ident(source_schema) ++ UNION ++ SELECT 'COMMENT ON POLICY ' || p1.policyname || ' ON ' || quote_ident(dest_schema) || '.' || p1.tablename || ++ -- Issue#74 Fix ++ -- ' IS ''' || d.description || ''';' as ddl ++ ' IS ' || quote_literal(d.description) || ';' as ddl ++ FROM pg_policies p1, pg_policy p2, pg_class c, pg_namespace n, pg_description d ++ WHERE p1.schemaname = n.nspname AND p1.tablename = c.relname AND n.oid = c.relnamespace ++ AND c.relkind in ('r','p') AND p1.policyname = p2.polname AND d.objoid = p2.oid AND p1.schemaname = quote_ident(source_schema) ++ UNION ++ SELECT 'COMMENT ON DOMAIN ' || quote_ident(dest_schema) || '.' || t.typname || ++ -- Issue#74 Fix ++ -- ' IS ''' || d.description || ''';' as ddl ++ ' IS ' || quote_literal(d.description) || ';' as ddl ++ FROM pg_catalog.pg_type t ++ LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace ++ JOIN pg_catalog.pg_description d ON d.classoid = t.tableoid AND d.objoid = t.oid AND d.objsubid = 0 ++ WHERE t.typtype = 'd' AND n.nspname = quote_ident(source_schema) COLLATE pg_catalog.default ++ ORDER BY 1 ++ LOOP ++ cnt2 := cnt2 + 1; ++ IF bDDLOnly THEN ++ RAISE INFO '%', qry; ++ ELSE ++ EXECUTE qry; ++ END IF; ++ END LOOP; ++ END IF; ++ RAISE NOTICE ' COMMENTS(2) cloned: %', LPAD(cnt2::text, 5, ' '); + +- FOREACH aclstr IN ARRAY arec.defaclacl +- LOOP +- cnt := cnt + 1; +- -- RAISE NOTICE 'aclstr=%', aclstr; +- -- break up into grantor, grantee, and privs, mydb_update=rwU/mydb_owner +- SELECT split_part(aclstr, '=',1) INTO grantee; +- SELECT split_part(aclstr, '=',2) INTO grantor; +- SELECT split_part(grantor, '/',1) INTO privs; +- SELECT split_part(grantor, '/',2) INTO grantor; +- -- RAISE NOTICE 'grantor=% grantee=% privs=%', grantor, grantee, privs; +- +- IF arec.atype = 'function' THEN +- -- Just having execute is enough to grant all apparently. +- buffer := 'ALTER DEFAULT PRIVILEGES FOR ROLE ' || grantor || ' IN SCHEMA ' || quote_ident(dest_schema) || ' GRANT ALL ON FUNCTIONS TO "' || grantee || '";'; +- IF ddl_only THEN +- RAISE INFO '%', buffer; +- ELSE +- EXECUTE buffer; +- END IF; + +- ELSIF arec.atype = 'sequence' THEN +- IF POSITION('r' IN privs) > 0 AND POSITION('w' IN privs) > 0 AND POSITION('U' IN privs) > 0 THEN +- -- arU is enough for all privs +- buffer := 'ALTER DEFAULT PRIVILEGES FOR ROLE ' || grantor || ' IN SCHEMA ' || quote_ident(dest_schema) || ' GRANT ALL ON SEQUENCES TO "' || grantee || '";'; +- IF ddl_only THEN ++ -- Issue#95 bypass if No ACL specified. ++ IF NOT bNoACL THEN ++ -- --------------------- ++ -- MV: Permissions: Defaults ++ -- --------------------- ++ EXECUTE 'SET search_path = ' || quote_ident(source_schema) ; ++ action := 'PRIVS: Defaults'; ++ cnt := 0; ++ FOR arec IN ++ SELECT pg_catalog.pg_get_userbyid(d.defaclrole) AS "owner", n.nspname AS schema, ++ CASE d.defaclobjtype WHEN 'r' THEN 'table' WHEN 'S' THEN 'sequence' WHEN 'f' THEN 'function' WHEN 'T' THEN 'type' WHEN 'n' THEN 'schema' END AS atype, ++ d.defaclacl as defaclacl, pg_catalog.array_to_string(d.defaclacl, ',') as defaclstr ++ FROM pg_catalog.pg_default_acl d LEFT JOIN pg_catalog.pg_namespace n ON (n.oid = d.defaclnamespace) ++ WHERE n.nspname IS NOT NULL AND n.nspname = quote_ident(source_schema) ++ ORDER BY 3, 2, 1 ++ LOOP ++ BEGIN ++ -- RAISE NOTICE ' owner=% type=% defaclacl=% defaclstr=%', arec.owner, arec.atype, arec.defaclacl, arec.defaclstr; ++ ++ FOREACH aclstr IN ARRAY arec.defaclacl ++ LOOP ++ cnt := cnt + 1; ++ -- RAISE NOTICE ' aclstr=%', aclstr; ++ -- break up into grantor, grantee, and privs, mydb_update=rwU/mydb_owner ++ SELECT split_part(aclstr, '=',1) INTO grantee; ++ SELECT split_part(aclstr, '=',2) INTO grantor; ++ SELECT split_part(grantor, '/',1) INTO privs; ++ SELECT split_part(grantor, '/',2) INTO grantor; ++ -- RAISE NOTICE ' grantor=% grantee=% privs=%', grantor, grantee, privs; ++ ++ IF arec.atype = 'function' THEN ++ -- Just having execute is enough to grant all apparently. ++ buffer := 'ALTER DEFAULT PRIVILEGES FOR ROLE ' || grantor || ' IN SCHEMA ' || quote_ident(dest_schema) || ' GRANT ALL ON FUNCTIONS TO "' || grantee || '";'; ++ ++ -- Issue#92 Fix ++ -- set role = cm_stage_ro_grp; ++ -- ALTER DEFAULT PRIVILEGES FOR ROLE cm_stage_ro_grp IN SCHEMA cm_stage GRANT REFERENCES, TRIGGER ON TABLES TO cm_stage_ro_grp; ++ IF grantor = grantee THEN ++ -- append set role to statement ++ buffer = 'SET ROLE = ' || grantor || '; ' || buffer; ++ END IF; ++ ++ IF bDDLOnly THEN + RAISE INFO '%', buffer; + ELSE + EXECUTE buffer; + END IF; ++ -- Issue#92 Fix: ++ EXECUTE 'SET ROLE = ' || calleruser; ++ ++ ELSIF arec.atype = 'sequence' THEN ++ IF POSITION('r' IN privs) > 0 AND POSITION('w' IN privs) > 0 AND POSITION('U' IN privs) > 0 THEN ++ -- arU is enough for all privs ++ buffer := 'ALTER DEFAULT PRIVILEGES FOR ROLE ' || grantor || ' IN SCHEMA ' || quote_ident(dest_schema) || ' GRANT ALL ON SEQUENCES TO "' || grantee || '";'; ++ ++ -- Issue#92 Fix ++ IF grantor = grantee THEN ++ -- append set role to statement ++ buffer = 'SET ROLE = ' || grantor || '; ' || buffer; ++ END IF; + +- ELSE +- -- have to specify each priv individually ++ IF bDDLOnly THEN ++ RAISE INFO '%', buffer; ++ ELSE ++ EXECUTE buffer; ++ END IF; ++ -- Issue#92 Fix: ++ EXECUTE 'SET ROLE = ' || calleruser; ++ ++ ELSE ++ -- have to specify each priv individually ++ buffer2 := ''; ++ IF POSITION('r' IN privs) > 0 THEN ++ buffer2 := 'SELECT'; ++ END IF; ++ IF POSITION('w' IN privs) > 0 THEN ++ IF buffer2 = '' THEN ++ buffer2 := 'UPDATE'; ++ ELSE ++ buffer2 := buffer2 || ', UPDATE'; ++ END IF; ++ END IF; ++ IF POSITION('U' IN privs) > 0 THEN ++ IF buffer2 = '' THEN ++ buffer2 := 'USAGE'; ++ ELSE ++ buffer2 := buffer2 || ', USAGE'; ++ END IF; ++ END IF; ++ buffer := 'ALTER DEFAULT PRIVILEGES FOR ROLE ' || grantor || ' IN SCHEMA ' || quote_ident(dest_schema) || ' GRANT ' || buffer2 || ' ON SEQUENCES TO "' || grantee || '";'; ++ ++ -- Issue#92 Fix ++ IF grantor = grantee THEN ++ -- append set role to statement ++ buffer = 'SET ROLE = ' || grantor || '; ' || buffer; ++ END IF; ++ ++ IF bDDLOnly THEN ++ RAISE INFO '%', buffer; ++ ELSE ++ EXECUTE buffer; ++ END IF; ++ select current_user into buffer; ++ -- Issue#92 Fix: ++ EXECUTE 'SET ROLE = ' || calleruser; ++ END IF; ++ ++ ELSIF arec.atype = 'table' THEN ++ -- do each priv individually, jeeeesh! + buffer2 := ''; ++ IF POSITION('a' IN privs) > 0 THEN ++ buffer2 := 'INSERT'; ++ END IF; + IF POSITION('r' IN privs) > 0 THEN +- buffer2 := 'SELECT'; ++ IF buffer2 = '' THEN ++ buffer2 := 'SELECT'; ++ ELSE ++ buffer2 := buffer2 || ', SELECT'; ++ END IF; + END IF; + IF POSITION('w' IN privs) > 0 THEN + IF buffer2 = '' THEN +@@ -531,181 +2829,431 @@ + buffer2 := buffer2 || ', UPDATE'; + END IF; + END IF; +- IF POSITION('U' IN privs) > 0 THEN +- IF buffer2 = '' THEN +- buffer2 := 'USAGE'; ++ IF POSITION('d' IN privs) > 0 THEN ++ IF buffer2 = '' THEN ++ buffer2 := 'DELETE'; + ELSE +- buffer2 := buffer2 || ', USAGE'; ++ buffer2 := buffer2 || ', DELETE'; + END IF; + END IF; +- buffer := 'ALTER DEFAULT PRIVILEGES FOR ROLE ' || grantor || ' IN SCHEMA ' || quote_ident(dest_schema) || ' GRANT ' || buffer2 || ' ON SEQUENCES TO "' || grantee || '";'; +- IF ddl_only THEN +- RAISE INFO '%', buffer; +- ELSE +- EXECUTE buffer; +- END IF; +- +- END IF; +- ELSIF arec.atype = 'table' THEN +- -- do each priv individually, jeeeesh! +- buffer2 := ''; +- IF POSITION('a' IN privs) > 0 THEN +- buffer2 := 'INSERT'; +- END IF; +- IF POSITION('r' IN privs) > 0 THEN +- IF buffer2 = '' THEN +- buffer2 := 'SELECT'; +- ELSE +- buffer2 := buffer2 || ', SELECT'; ++ IF POSITION('t' IN privs) > 0 THEN ++ IF buffer2 = '' THEN ++ buffer2 := 'TRIGGER'; ++ ELSE ++ buffer2 := buffer2 || ', TRIGGER'; ++ END IF; + END IF; +- END IF; +- IF POSITION('w' IN privs) > 0 THEN +- IF buffer2 = '' THEN +- buffer2 := 'UPDATE'; +- ELSE +- buffer2 := buffer2 || ', UPDATE'; ++ IF POSITION('T' IN privs) > 0 THEN ++ IF buffer2 = '' THEN ++ buffer2 := 'TRUNCATE'; ++ ELSE ++ buffer2 := buffer2 || ', TRUNCATE'; ++ END IF; + END IF; +- END IF; +- IF POSITION('d' IN privs) > 0 THEN +- IF buffer2 = '' THEN +- buffer2 := 'DELETE'; +- ELSE +- buffer2 := buffer2 || ', DELETE'; ++ buffer := 'ALTER DEFAULT PRIVILEGES FOR ROLE ' || grantor || ' IN SCHEMA ' || quote_ident(dest_schema) || ' GRANT ' || buffer2 || ' ON TABLES TO "' || grantee || '";'; ++ ++ -- Issue#92 Fix ++ IF grantor = grantee THEN ++ -- append set role to statement ++ buffer = 'SET ROLE = ' || grantor || '; ' || buffer; + END IF; +- END IF; +- IF POSITION('t' IN privs) > 0 THEN +- IF buffer2 = '' THEN +- buffer2 := 'TRIGGER'; ++ ++ IF bDDLOnly THEN ++ RAISE INFO '%', buffer; + ELSE +- buffer2 := buffer2 || ', TRIGGER'; ++ EXECUTE buffer; + END IF; +- END IF; +- IF POSITION('T' IN privs) > 0 THEN +- IF buffer2 = '' THEN +- buffer2 := 'TRUNCATE'; ++ select current_user into buffer; ++ -- Issue#92 Fix: ++ EXECUTE 'SET ROLE = ' || calleruser; ++ ++ ELSIF arec.atype = 'type' THEN ++ IF POSITION('r' IN privs) > 0 AND POSITION('w' IN privs) > 0 AND POSITION('U' IN privs) > 0 THEN ++ -- arU is enough for all privs ++ buffer := 'ALTER DEFAULT PRIVILEGES FOR ROLE ' || grantor || ' IN SCHEMA ' || quote_ident(dest_schema) || ' GRANT ALL ON TYPES TO "' || grantee || '";'; ++ ++ -- Issue#92 Fix ++ IF grantor = grantee THEN ++ -- append set role to statement ++ buffer = 'SET ROLE = ' || grantor || '; ' || buffer; ++ END IF; ++ ++ IF bDDLOnly THEN ++ RAISE INFO '%', buffer; ++ ELSE ++ EXECUTE buffer; ++ END IF; ++ -- Issue#92 Fix: ++ EXECUTE 'SET ROLE = ' || calleruser; ++ ++ ELSIF POSITION('U' IN privs) THEN ++ buffer := 'ALTER DEFAULT PRIVILEGES FOR ROLE ' || grantor || ' IN SCHEMA ' || quote_ident(dest_schema) || ' GRANT USAGE ON TYPES TO "' || grantee || '";'; ++ ++ -- Issue#92 Fix ++ IF grantor = grantee THEN ++ -- append set role to statement ++ buffer = 'SET ROLE = ' || grantor || '; ' || buffer; ++ END IF; ++ ++ IF bDDLOnly THEN ++ RAISE INFO '%', buffer; ++ ELSE ++ EXECUTE buffer; ++ END IF; ++ -- Issue#92 Fix: ++ EXECUTE 'SET ROLE = ' || calleruser; ++ + ELSE +- buffer2 := buffer2 || ', TRUNCATE'; +- END IF; ++ RAISE WARNING 'Unhandled TYPE Privs:: type=% privs=% owner=% defaclacl=% defaclstr=% grantor=% grantee=% ', arec.atype, privs, arec.owner, arec.defaclacl, arec.defaclstr, grantor, grantee; + END IF; +- buffer := 'ALTER DEFAULT PRIVILEGES FOR ROLE ' || grantor || ' IN SCHEMA ' || quote_ident(dest_schema) || ' GRANT ' || buffer2 || ' ON TABLES TO "' || grantee || '";'; +- IF ddl_only THEN +- RAISE INFO '%', buffer; +- ELSE +- EXECUTE buffer; +- END IF; +- + ELSE +- RAISE WARNING 'Doing nothing for type=% privs=%', arec.atype, privs; ++ RAISE WARNING 'Unhandled Privs:: type=% privs=% owner=% defaclacl=% defaclstr=% grantor=% grantee=% ', arec.atype, privs, arec.owner, arec.defaclacl, arec.defaclstr, grantor, grantee; + END IF; +- END LOOP; +- END; +- END LOOP; ++ END LOOP; ++ END; ++ END LOOP; + +- RAISE NOTICE ' DFLT PRIVS cloned: %', LPAD(cnt::text, 5, ' '); ++ RAISE NOTICE ' DFLT PRIVS cloned: %', LPAD(cnt::text, 5, ' '); ++ END IF; -- NO ACL BRANCH + +- -- MV: PRIVS: schema +- -- crunchy data extension, check_access +- -- SELECT role_path, base_role, as_role, objtype, schemaname, objname, array_to_string(array_agg(privname),',') as privs FROM all_access() +- -- WHERE base_role != CURRENT_USER and objtype = 'schema' and schemaname = 'public' group by 1,2,3,4,5,6; ++ -- Issue#95 bypass if No ACL specified ++ IF NOT bNoACL THEN ++ -- MV: PRIVS: schema ++ -- crunchy data extension, check_access ++ -- SELECT role_path, base_role, as_role, objtype, schemaname, objname, array_to_string(array_agg(privname),',') as privs FROM all_access() ++ -- WHERE base_role != CURRENT_USER and objtype = 'schema' and schemaname = 'public' group by 1,2,3,4,5,6; + +- action := 'PRIVS: Schema'; +- cnt := 0; +- FOR arec IN +- SELECT 'GRANT ' || p.perm::perm_type || ' ON SCHEMA ' || quote_ident(dest_schema) || ' TO "' || r.rolname || '";' as schema_ddl +- FROM pg_catalog.pg_namespace AS n CROSS JOIN pg_catalog.pg_roles AS r CROSS JOIN (VALUES ('USAGE'), ('CREATE')) AS p(perm) +- WHERE n.nspname = quote_ident(source_schema) AND NOT r.rolsuper AND has_schema_privilege(r.oid, n.oid, p.perm) order by r.rolname, p.perm::perm_type +- LOOP +- BEGIN +- cnt := cnt + 1; +- IF ddl_only THEN +- RAISE INFO '%', arec.schema_ddl; +- ELSE +- EXECUTE arec.schema_ddl; +- END IF; ++ action := 'PRIVS: Schema'; ++ cnt := 0; ++ FOR arec IN ++ SELECT 'GRANT ' || p.perm::perm_type || ' ON SCHEMA ' || quote_ident(dest_schema) || ' TO "' || r.rolname || '";' as schema_ddl ++ FROM pg_catalog.pg_namespace AS n ++ CROSS JOIN pg_catalog.pg_roles AS r ++ CROSS JOIN (VALUES ('USAGE'), ('CREATE')) AS p(perm) ++ WHERE n.nspname = quote_ident(source_schema) AND NOT r.rolsuper AND has_schema_privilege(r.oid, n.oid, p.perm) ++ ORDER BY r.rolname, p.perm::perm_type ++ LOOP ++ BEGIN ++ cnt := cnt + 1; ++ IF bDDLOnly THEN ++ RAISE INFO '%', arec.schema_ddl; ++ ELSE ++ EXECUTE arec.schema_ddl; ++ END IF; + +- END; +- END LOOP; +- RAISE NOTICE 'SCHEMA PRIVS cloned: %', LPAD(cnt::text, 5, ' '); ++ END; ++ END LOOP; ++ RAISE NOTICE 'SCHEMA PRIVS cloned: %', LPAD(cnt::text, 5, ' '); ++ END IF; -- NO ACL BRANCH + +- -- MV: PRIVS: sequences +- action := 'PRIVS: Sequences'; +- cnt := 0; +- FOR arec IN +- SELECT 'GRANT ' || p.perm::perm_type || ' ON ' || quote_ident(dest_schema) || '.' || t.relname::text || ' TO "' || r.rolname || '";' as seq_ddl +- FROM pg_catalog.pg_class AS t CROSS JOIN pg_catalog.pg_roles AS r CROSS JOIN (VALUES ('SELECT'), ('USAGE'), ('UPDATE')) AS p(perm) +- WHERE t.relnamespace::regnamespace::name = quote_ident(source_schema) AND t.relkind = 'S' AND NOT r.rolsuper AND has_sequence_privilege(r.oid, t.oid, p.perm) +- LOOP +- BEGIN +- cnt := cnt + 1; +- IF ddl_only OR seq_cnt = 0 THEN +- RAISE INFO '%', arec.seq_ddl; +- ELSE +- EXECUTE arec.seq_ddl; +- END IF; ++ -- Issue#95 bypass if No ACL specified ++ IF NOT bNoACL THEN ++ -- MV: PRIVS: sequences ++ action := 'PRIVS: Sequences'; ++ cnt := 0; ++ FOR arec IN ++ -- Issue#78 FIX: handle case-sensitive names with quote_ident() on t.relname ++ SELECT 'GRANT ' || p.perm::perm_type || ' ON ' || quote_ident(dest_schema) || '.' || quote_ident(t.relname::text) || ' TO "' || r.rolname || '";' as seq_ddl ++ FROM pg_catalog.pg_class AS t ++ CROSS JOIN pg_catalog.pg_roles AS r ++ CROSS JOIN (VALUES ('SELECT'), ('USAGE'), ('UPDATE')) AS p(perm) ++ WHERE t.relnamespace::regnamespace::name = quote_ident(source_schema) AND t.relkind = 'S' AND NOT r.rolsuper AND has_sequence_privilege(r.oid, t.oid, p.perm) ++ LOOP ++ BEGIN ++ cnt := cnt + 1; ++ -- IF bDebug THEN RAISE NOTICE 'DEBUG: ddl=%', arec.seq_ddl; END IF; ++ IF bDDLOnly THEN ++ RAISE INFO '%', arec.seq_ddl; ++ ELSE ++ EXECUTE arec.seq_ddl; ++ END IF; ++ END; ++ END LOOP; ++ RAISE NOTICE ' SEQ. PRIVS cloned: %', LPAD(cnt::text, 5, ' '); ++ END IF; -- NO ACL BRANCH + +- END; +- END LOOP; +- RAISE NOTICE ' SEQ. PRIVS cloned: %', LPAD(cnt::text, 5, ' '); ++ -- Issue#95 bypass if No ACL specified ++ IF NOT bNoACL THEN ++ -- MV: PRIVS: functions ++ action := 'PRIVS: Functions/Procedures'; ++ cnt := 0; + +- -- MV: PRIVS: functions +- action := 'PRIVS: Functions'; +- cnt := 0; +- FOR arec IN +- SELECT 'GRANT EXECUTE ON FUNCTION ' || quote_ident(dest_schema) || '.' || regexp_replace(f.oid::regprocedure::text, '^((("[^"]*")|([^"][^.]*))\.)?', '') || ' TO "' || r.rolname || '";' as func_ddl +- FROM pg_catalog.pg_proc f CROSS JOIN pg_catalog.pg_roles AS r WHERE f.pronamespace::regnamespace::name = quote_ident(source_schema) AND NOT r.rolsuper AND has_function_privilege(r.oid, f.oid, 'EXECUTE') +- order by regexp_replace(f.oid::regprocedure::text, '^((("[^"]*")|([^"][^.]*))\.)?', '') +- LOOP +- BEGIN +- cnt := cnt + 1; +- IF ddl_only THEN +- RAISE INFO '%', arec.func_ddl; +- ELSE +- EXECUTE arec.func_ddl; ++ -- Issue#61 FIX: use set_config for empty string ++ -- SET search_path = ''; ++ SELECT set_config('search_path', '', false) into v_dummy; ++ ++ -- RAISE NOTICE ' source_schema=% dest_schema=%',source_schema, dest_schema; ++ FOR arec IN ++ -- 2021-03-05 MJV FIX: issue#35: caused exception in some functions with parameters and gave privileges to other users that should not have gotten them. ++ -- SELECT 'GRANT EXECUTE ON FUNCTION ' || quote_ident(dest_schema) || '.' || replace(regexp_replace(f.oid::regprocedure::text, '^((("[^"]*")|([^"][^.]*))\.)?', ''), source_schema, dest_schema) || ' TO "' || r.rolname || '";' as func_ddl ++ -- FROM pg_catalog.pg_proc f CROSS JOIN pg_catalog.pg_roles AS r WHERE f.pronamespace::regnamespace::name = quote_ident(source_schema) AND NOT r.rolsuper AND has_function_privilege(r.oid, f.oid, 'EXECUTE') ++ -- order by regexp_replace(f.oid::regprocedure::text, '^((("[^"]*")|([^"][^.]*))\.)?', '') ++ ++ -- 2021-03-05 MJV FIX: issue#37: defaults cause problems, use system function that returns args WITHOUT DEFAULTS ++ -- COALESCE(r.routine_type, 'FUNCTION'): for aggregate functions, information_schema.routines contains NULL as routine_type value. ++ -- Issue#78 FIX: handle case-sensitive names with quote_ident() on rp.routine_name ++ SELECT 'GRANT ' || rp.privilege_type || ' ON ' || COALESCE(r.routine_type, 'FUNCTION') || ' ' || quote_ident(dest_schema) || '.' || quote_ident(rp.routine_name) || ' (' || pg_get_function_identity_arguments(p.oid) || ') TO ' || string_agg(distinct rp.grantee, ',') || ';' as func_dcl ++ FROM information_schema.routine_privileges rp, information_schema.routines r, pg_proc p, pg_namespace n ++ WHERE rp.routine_schema = quote_ident(source_schema) ++ AND rp.is_grantable = 'YES' ++ AND rp.routine_schema = r.routine_schema ++ AND rp.routine_name = r.routine_name ++ AND rp.routine_schema = n.nspname ++ AND n.oid = p.pronamespace ++ AND p.proname = r.routine_name ++ GROUP BY rp.privilege_type, r.routine_type, rp.routine_name, pg_get_function_identity_arguments(p.oid) ++ LOOP ++ BEGIN ++ cnt := cnt + 1; ++ IF bDDLOnly THEN ++ RAISE INFO '%', arec.func_dcl; ++ ELSE ++ EXECUTE arec.func_dcl; ++ END IF; ++ END; ++ END LOOP; ++ EXECUTE 'SET search_path = ' || quote_ident(source_schema) ; ++ RAISE NOTICE ' FUNC PRIVS cloned: %', LPAD(cnt::text, 5, ' '); ++ END IF; -- NO ACL BRANCH ++ ++ -- Issue#95 bypass if No ACL specified ++ IF NOT bNoACL THEN ++ -- MV: PRIVS: tables ++ action := 'PRIVS: Tables'; ++ -- regular, partitioned, and foreign tables plus view and materialized view permissions. Ignored for now: implement foreign table defs. ++ cnt := 0; ++ FOR arec IN ++ -- SELECT 'GRANT ' || p.perm::perm_type || CASE WHEN t.relkind in ('r', 'p', 'f') THEN ' ON TABLE ' WHEN t.relkind in ('v', 'm') THEN ' ON ' END || quote_ident(dest_schema) || '.' || t.relname::text || ' TO "' || r.rolname || '";' as tbl_ddl, ++ -- has_table_privilege(r.oid, t.oid, p.perm) AS granted, t.relkind ++ -- FROM pg_catalog.pg_class AS t CROSS JOIN pg_catalog.pg_roles AS r CROSS JOIN (VALUES (TEXT 'SELECT'), ('INSERT'), ('UPDATE'), ('DELETE'), ('TRUNCATE'), ('REFERENCES'), ('TRIGGER')) AS p(perm) ++ -- WHERE t.relnamespace::regnamespace::name = quote_ident(source_schema) AND t.relkind in ('r', 'p', 'f', 'v', 'm') AND NOT r.rolsuper AND has_table_privilege(r.oid, t.oid, p.perm) order by t.relname::text, t.relkind ++ -- 2021-03-05 MJV FIX: Fixed Issue#36 for tables ++ SELECT c.relkind, 'GRANT ' || tb.privilege_type || CASE WHEN c.relkind in ('r', 'p') THEN ' ON TABLE ' WHEN c.relkind in ('v', 'm') THEN ' ON ' END || ++ -- Issue#78 FIX: handle case-sensitive names with quote_ident() on t.relname ++ -- Issue#108 FIX: enclose double-quote grantees with special characters ++ -- quote_ident(dest_schema) || '.' || quote_ident(tb.table_name) || ' TO ' || string_agg(tb.grantee, ',') || ';' as tbl_dcl ++ quote_ident(dest_schema) || '.' || quote_ident(tb.table_name) || ' TO ' || string_agg('"' || tb.grantee || '"', ',') || ';' as tbl_dcl ++ FROM information_schema.table_privileges tb, pg_class c, pg_namespace n ++ WHERE tb.table_schema = quote_ident(source_schema) AND tb.table_name = c.relname AND c.relkind in ('r', 'p', 'v', 'm') ++ AND c.relnamespace = n.oid AND n.nspname = quote_ident(source_schema) ++ GROUP BY c.relkind, tb.privilege_type, tb.table_schema, tb.table_name ++ LOOP ++ BEGIN ++ cnt := cnt + 1; ++ -- IF bDebug THEN RAISE NOTICE 'DEBUG: ddl=%', arec.tbl_dcl; END IF; ++ -- Issue#46. Fixed reference to invalid record name (tbl_ddl --> tbl_dcl). ++ IF arec.relkind = 'f' THEN ++ RAISE WARNING 'Foreign tables are not currently implemented, so skipping privs for them. ddl=%', arec.tbl_dcl; ++ ELSE ++ IF bDDLOnly THEN ++ RAISE INFO '%', arec.tbl_dcl; ++ ELSE ++ EXECUTE arec.tbl_dcl; ++ END IF; + END IF; ++ END; ++ END LOOP; ++ RAISE NOTICE ' TABLE PRIVS cloned: %', LPAD(cnt::text, 5, ' '); ++ END IF; -- NO ACL BRANCH ++ ++ -- LOOP for regular tables and populate them if specified ++ -- Issue#75 moved from big table loop above to here. ++ IF bData THEN ++ r = clock_timestamp(); ++ -- IF bVerbose THEN RAISE NOTICE 'START: copy rows %',clock_timestamp() - t; END IF; ++ IF bVerbose THEN RAISE NOTICE 'Copying rows...'; END IF; ++ ++ EXECUTE 'SET search_path = ' || quote_ident(dest_schema) ; ++ action := 'Copy Rows'; ++ FOREACH tblelement IN ARRAY tblarray ++ LOOP ++ s = clock_timestamp(); ++ IF bDebug THEN RAISE NOTICE 'DEBUG1: no UDTs %', tblelement; END IF; ++ EXECUTE tblelement; ++ GET DIAGNOSTICS cnt = ROW_COUNT; ++ buffer = substring(tblelement, 13); ++ SELECT POSITION(' OVERRIDING SYSTEM VALUE SELECT ' IN buffer) INTO cnt2; ++ IF cnt2 = 0 THEN ++ SELECT POSITION(' SELECT ' IN buffer) INTO cnt2; ++ buffer = substring(buffer,1, cnt2); ++ ELSE ++ buffer = substring(buffer,1, cnt2); ++ END IF; ++ SELECT RPAD(buffer, 35, ' ') INTO buffer; ++ cnt2 := cast(extract(epoch from (clock_timestamp() - s)) as numeric(18,3)); ++ IF bVerbose THEN RAISE NOTICE 'Populated cloned table, % Rows Copied: % seconds: %', buffer, LPAD(cnt::text, 10, ' '), LPAD(cnt2::text, 5, ' '); END IF; ++ tblscopied := tblscopied + 1; ++ END LOOP; + +- END; +- END LOOP; +- RAISE NOTICE ' FUNC PRIVS cloned: %', LPAD(cnt::text, 5, ' '); ++ -- Issue#79 implementation ++ -- Do same for tables with user-defined elements using copy to file method ++ FOREACH tblelement IN ARRAY tblarray2 ++ LOOP ++ s = clock_timestamp(); ++ IF bDebug THEN RAISE NOTICE 'DEBUG2: UDTs %', tblelement; END IF; ++ EXECUTE tblelement; ++ GET DIAGNOSTICS cnt = ROW_COUNT; ++ ++ -- STATEMENT LOOKS LIKE THIS: ++ -- INSERT INTO sample11.warehouses SELECT * FROM sample.warehouses; ++ -- INSERT INTO sample11.person OVERRIDING SYSTEM VALUE SELECT * FROM sample.person; ++ -- COPY sample.address TO '/tmp/cloneschema.tmp' WITH DELIMITER AS ',';\ ++ buffer = TRIM(tblelement::text); ++ -- RAISE NOTICE 'element=%', buffer; ++ cnt1 = POSITION('INSERT INTO' IN buffer); ++ cnt2 = POSITION('COPY ' IN buffer); ++ IF cnt1 > 0 THEN ++ buffer = substring(buffer, 12); ++ ELSIF cnt2 > 0 THEN ++ buffer = substring(buffer, 5); ++ ELSE ++ RAISE EXCEPTION 'Programming Error for parsing tblarray2.'; ++ END IF; ++ ++ -- RAISE NOTICE 'buffer1=%', buffer; ++ cnt1 = POSITION(' OVERRIDING ' IN buffer); ++ cnt2 = POSITION('SELECT * FROM ' IN buffer); ++ cnt3 = POSITION(' FROM ' IN buffer); ++ cnt4 = POSITION(' TO ' IN buffer); ++ IF cnt1 > 0 THEN ++ buffer = substring(buffer, 1, cnt1-2); ++ ELSIF cnt2 > 0 THEN ++ buffer = substring(buffer, 1, cnt2-2); ++ ELSIF cnt3 > 0 THEN ++ buffer = substring(buffer, 1, cnt3-1); ++ ELSIF cnt4 > 0 THEN ++ -- skip the COPY TO statements ++ continue; ++ ELSE ++ RAISE EXCEPTION 'Programming Error for parsing tblarray2.'; ++ END IF; ++ -- RAISE NOTICE 'buffer2=%', buffer; ++ ++ SELECT RPAD(buffer, 35, ' ') INTO buffer; ++ -- RAISE NOTICE 'buffer3=%', buffer; ++ cnt2 := cast(extract(epoch from (clock_timestamp() - s)) as numeric(18,3)); ++ IF bVerbose THEN RAISE NOTICE 'Populated cloned table, % Rows Copied: % seconds: %', buffer, LPAD(cnt::text, 10, ' '), LPAD(cnt2::text, 5, ' '); END IF; ++ tblscopied := tblscopied + 1; ++ END LOOP; + +- -- MV: PRIVS: tables +- action := 'PRIVS: Tables'; +- -- regular, partitioned, and foreign tables plus view and materialized view permissions. TODO: implement foreign table defs. ++ -- Issue#101 ++ -- Do same for tables with user-defined elements using direct method with text cast ++ FOREACH tblelement IN ARRAY tblarray3 ++ LOOP ++ s = clock_timestamp(); ++ IF bDebug THEN RAISE NOTICE 'DEBUG3: UDTs %', tblelement; END IF; ++ EXECUTE tblelement; ++ GET DIAGNOSTICS cnt = ROW_COUNT; ++ cnt2 = POSITION(' (' IN tblelement::text); ++ IF cnt2 > 0 THEN ++ buffer = substring(tblelement, 1, cnt2); ++ buffer = substring(buffer, 6); ++ SELECT RPAD(buffer, 35, ' ') INTO buffer; ++ cnt2 := cast(extract(epoch from (clock_timestamp() - s)) as numeric(18,3)); ++ IF bVerbose THEN RAISE NOTICE 'Populated cloned table, % Rows Copied: % seconds: %', buffer, LPAD(cnt::text, 10, ' '), LPAD(cnt2::text, 5, ' '); END IF; ++ tblscopied := tblscopied + 1; ++ END IF; ++ END LOOP; ++ ++ -- Issue#98 MVs deferred until now ++ FOREACH tblelement IN ARRAY mvarray ++ LOOP ++ s = clock_timestamp(); ++ EXECUTE tblelement; ++ -- get diagnostics for MV creates or refreshes does not work, always returns 1 ++ GET DIAGNOSTICS cnt = ROW_COUNT; ++ buffer = substring(tblelement, 25); ++ cnt2 = POSITION(' AS ' IN buffer); ++ IF cnt2 > 0 THEN ++ buffer = substring(buffer, 1, cnt2); ++ SELECT RPAD(buffer, 36, ' ') INTO buffer; ++ cnt2 := cast(extract(epoch from (clock_timestamp() - s)) as numeric(18,3)); ++ IF bVerbose THEN RAISE NOTICE 'Populated Mat. View, % Rows Inserted: ? seconds: %', buffer, LPAD(cnt2::text, 5, ' '); END IF; ++ mvscopied := mvscopied + 1; ++ END IF; ++ END LOOP; ++ ++ cnt := cast(extract(epoch from (clock_timestamp() - r)) as numeric(18,3)); ++ IF bVerbose THEN RAISE NOTICE 'Copy rows duration: % seconds',cnt; END IF; ++ END IF; ++ RAISE NOTICE ' TABLES copied: %', LPAD(tblscopied::text, 5, ' '); ++ RAISE NOTICE ' MATVIEWS refreshed: %', LPAD(mvscopied::text, 5, ' '); ++ ++ ++ -- Issue#78 forces us to defer FKeys until the end since we previously did row copies before FKeys ++ -- add FK constraint ++ action := 'FK Constraints'; + cnt := 0; +- FOR arec IN +- SELECT 'GRANT ' || p.perm::perm_type || CASE WHEN t.relkind in ('r', 'p', 'f') THEN ' ON TABLE ' WHEN t.relkind in ('v', 'm') THEN ' ON ' END || quote_ident(dest_schema) || '.' || t.relname::text || ' TO "' || r.rolname || '";' as tbl_ddl, +- has_table_privilege(r.oid, t.oid, p.perm) AS granted, t.relkind +- FROM pg_catalog.pg_class AS t CROSS JOIN pg_catalog.pg_roles AS r CROSS JOIN (VALUES (TEXT 'SELECT'), ('INSERT'), ('UPDATE'), ('DELETE'), ('TRUNCATE'), ('REFERENCES'), ('TRIGGER')) AS p(perm) +- WHERE t.relnamespace::regnamespace::name = quote_ident(source_schema) AND t.relkind in ('r', 'p', 'f', 'v', 'm') AND NOT r.rolsuper AND has_table_privilege(r.oid, t.oid, p.perm) order by t.relname::text, t.relkind +- LOOP +- BEGIN +- cnt := cnt + 1; +- -- RAISE NOTICE 'ddl=%', arec.tbl_ddl; +- IF arec.relkind = 'f' THEN +- RAISE WARNING 'Foreign tables are not currently implemented, so skipping privs for them. ddl=%', arec.tbl_ddl; +- ELSE +- IF ddl_only THEN +- RAISE INFO '%', arec.tbl_ddl; +- ELSE +- EXECUTE arec.tbl_ddl; +- END IF; + +- END IF; +- END; ++ -- Issue#61 FIX: use set_config for empty string ++ -- SET search_path = ''; ++ SELECT set_config('search_path', '', false) into v_dummy; ++ ++ FOR qry IN ++ SELECT 'ALTER TABLE ' || quote_ident(dest_schema) || '.' || quote_ident(rn.relname) ++ || ' ADD CONSTRAINT ' || quote_ident(ct.conname) || ' ' || REPLACE(pg_get_constraintdef(ct.oid), 'REFERENCES ' || quote_ident(source_schema) || '.', 'REFERENCES ' ++ || quote_ident(dest_schema) || '.') || ';' ++ FROM pg_constraint ct ++ JOIN pg_class rn ON rn.oid = ct.conrelid ++ -- Issue#103 needed to add this left join ++ LEFT JOIN pg_inherits i ON (rn.oid = i.inhrelid) ++ WHERE connamespace = src_oid ++ AND rn.relkind = 'r' ++ AND ct.contype = 'f' ++ -- Issue#103 fix: needed to also add this null check ++ AND i.inhrelid is null ++ LOOP ++ cnt := cnt + 1; ++ IF bDDLOnly THEN ++ RAISE INFO '%', qry; ++ ELSE ++ IF bDebug THEN RAISE NOTICE 'DEBUG: adding FKEY constraint: %', qry; END IF; ++ EXECUTE qry; ++ END IF; + END LOOP; +- RAISE NOTICE ' TABLE PRIVS cloned: %', LPAD(cnt::text, 5, ' '); ++ EXECUTE 'SET search_path = ' || quote_ident(source_schema) ; ++ RAISE NOTICE ' FKEYS cloned: %', LPAD(cnt::text, 5, ' '); + +- -- Set the search_path back to what it was before +- EXECUTE 'SET search_path = ' || src_path_old; ++ ++ IF src_path_old = '' OR src_path_old = '""' THEN ++ -- RAISE NOTICE 'Restoring old search_path to empty string'; ++ SELECT set_config('search_path', '', false) into v_dummy; ++ ELSE ++ -- RAISE NOTICE 'Restoring old search_path to:%', src_path_old; ++ EXECUTE 'SET search_path = ' || src_path_old; ++ END IF; ++ SELECT setting INTO v_dummy FROM pg_settings WHERE name = 'search_path'; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: setting search_path back to what it was: %', v_dummy; END IF; ++ cnt := cast(extract(epoch from (clock_timestamp() - t)) as numeric(18,3)); ++ IF bVerbose THEN RAISE NOTICE 'clone_schema duration: % seconds',cnt; END IF; + + EXCEPTION + WHEN others THEN + BEGIN + GET STACKED DIAGNOSTICS v_diag1 = MESSAGE_TEXT, v_diag2 = PG_EXCEPTION_DETAIL, v_diag3 = PG_EXCEPTION_HINT, v_diag4 = RETURNED_SQLSTATE, v_diag5 = PG_CONTEXT, v_diag6 = PG_EXCEPTION_CONTEXT; +- -- v_ret := 'line=' || v_diag6 || '. '|| v_diag4 || '. ' || v_diag1 || ' .' || v_diag2 || ' .' || v_diag3; +- v_ret := 'line=' || v_diag6 || '. '|| v_diag4 || '. ' || v_diag1; +- RAISE EXCEPTION 'Action: % Diagnostics: %',action, v_ret; +- -- Set the search_path back to what it was before +- EXECUTE 'SET search_path = ' || src_path_old; ++ v_ret := 'line=' || v_diag6 || '. '|| v_diag4 || '. ' || v_diag1; ++ -- Issue#101: added version to exception output ++ -- RAISE NOTICE 'v_diag1=% v_diag2=% v_diag3=% v_diag4=% v_diag5=% v_diag6=%', v_diag1, v_diag2, v_diag3, v_diag4, v_diag5, v_diag6; ++ buffer2 = ''; ++ IF action = 'Copy Rows' AND v_diag4 = '42704' THEN ++ -- Issue#105 Help user to fix the problem. ++ buffer2 = 'It appears you have a USER-DEFINED column type mismatch. Try running clone_schema with the FILECOPY option. '; ++ END IF; ++ IF lastsql <> '' THEN ++ buffer = v_ret || E'\n'|| buffer2 || E'\n'|| lastsql; ++ ELSE ++ buffer = v_ret || E'\n'|| buffer2; ++ END IF; ++ RAISE EXCEPTION 'Version: % Action: % Diagnostics: %',v_version, action, buffer; ++ ++ IF src_path_old = '' THEN ++ -- RAISE NOTICE 'setting old search_path to empty string'; ++ SELECT set_config('search_path', '', false); ++ ELSE ++ -- RAISE NOTICE 'setting old search_path to:%', src_path_old; ++ EXECUTE 'SET search_path = ' || src_path_old; ++ END IF; ++ + RETURN; + END; + +@@ -713,14 +3261,14 @@ + END; + + $BODY$ +- LANGUAGE plpgsql VOLATILE +- COST 100; +-ALTER FUNCTION public.clone_schema(text, text, boolean, boolean) OWNER TO "{db_user}"; +-""" ++ LANGUAGE plpgsql VOLATILE COST 100; + ++ALTER FUNCTION public.clone_schema(text, text, cloneparms[]) OWNER TO "{db_user}"; ++-- REVOKE ALL PRIVILEGES ON FUNCTION clone_schema(text, text, cloneparms[]) FROM public; ++""" # noqa + +-class CloneSchema: + ++class CloneSchema: + def _create_clone_schema_function(self): + """ + Creates a postgres function `clone_schema` that copies a schema and its +@@ -752,9 +3300,8 @@ def clone_schema(self, base_schema_name, new_schema_name, set_connection=True): + if schema_exists(new_schema_name): + raise ValidationError("New schema name already exists") + +- sql = 'SELECT clone_schema(%(base_schema)s, %(new_schema)s, true, false)' ++ sql = "SELECT clone_schema(%(base_schema)s, %(new_schema)s, 'DATA')" + cursor.execute( +- sql, +- {'base_schema': base_schema_name, 'new_schema': new_schema_name} ++ sql, {"base_schema": base_schema_name, "new_schema": new_schema_name} + ) + cursor.close() + +From c49b4a1c254ebe713259515a4c8373a9b19dd000 Mon Sep 17 00:00:00 2001 +From: Marc 'risson' Schmitt +Date: Thu, 16 Nov 2023 13:32:06 +0100 +Subject: [PATCH 2/3] clone: allow setting up the clone mode (DATA, NODATA) + +Signed-off-by: Marc 'risson' Schmitt +--- + django_tenants/clone.py | 13 ++++++++++--- + django_tenants/models.py | 11 ++++++++++- + 2 files changed, 20 insertions(+), 4 deletions(-) + +diff --git a/django_tenants/clone.py b/django_tenants/clone.py +index 3afce109..6fa52c04 100644 +--- a/django_tenants/clone.py ++++ b/django_tenants/clone.py +@@ -3281,7 +3281,9 @@ def _create_clone_schema_function(self): + cursor.execute(CLONE_SCHEMA_FUNCTION.format(db_user=db_user)) + cursor.close() + +- def clone_schema(self, base_schema_name, new_schema_name, set_connection=True): ++ def clone_schema( ++ self, base_schema_name, new_schema_name, clone_mode="DATA", set_connection=True ++ ): + """ + Creates a new schema `new_schema_name` as a clone of an existing schema + `old_schema_name`. +@@ -3300,8 +3302,13 @@ def clone_schema(self, base_schema_name, new_schema_name, set_connection=True): + if schema_exists(new_schema_name): + raise ValidationError("New schema name already exists") + +- sql = "SELECT clone_schema(%(base_schema)s, %(new_schema)s, 'DATA')" ++ sql = "SELECT clone_schema(%(base_schema)s, %(new_schema)s, %(clone_mode)s)" + cursor.execute( +- sql, {"base_schema": base_schema_name, "new_schema": new_schema_name} ++ sql, ++ { ++ "base_schema": base_schema_name, ++ "new_schema": new_schema_name, ++ "clone_mode": clone_mode, ++ }, + ) + cursor.close() +diff --git a/django_tenants/models.py b/django_tenants/models.py +index 0d1812d8..655e1994 100644 +--- a/django_tenants/models.py ++++ b/django_tenants/models.py +@@ -29,6 +29,13 @@ class TenantMixin(models.Model): + to be automatically created upon save. + """ + ++ clone_mode = "DATA" ++ """ ++ One of "DATA", "NODATA". ++ When using TENANT_BASE_SCHEMA, controls whether only the database ++ structure will be copied, or if data will be copied along with it. ++ """ ++ + schema_name = models.CharField(max_length=63, unique=True, db_index=True, + validators=[_check_schema_name]) + +@@ -184,7 +191,9 @@ def create_schema(self, check_if_exists=False, sync_schema=True, + # copy tables and data from provided model schema + base_schema = get_tenant_base_schema() + clone_schema = CloneSchema() +- clone_schema.clone_schema(base_schema, self.schema_name) ++ clone_schema.clone_schema( ++ base_schema, self.schema_name, self.clone_mode ++ ) + + call_command('migrate_schemas', + tenant=True, + +From 218fbcd3bfa555b20c6fb904e5fcf307d69f18af Mon Sep 17 00:00:00 2001 +From: Marc 'risson' Schmitt +Date: Thu, 16 Nov 2023 13:32:54 +0100 +Subject: [PATCH 3/3] clone: always (re-)create the clone_schema function + +Signed-off-by: Marc 'risson' Schmitt +--- + django_tenants/clone.py | 10 +++------- + 1 file changed, 3 insertions(+), 7 deletions(-) + +diff --git a/django_tenants/clone.py b/django_tenants/clone.py +index 6fa52c04..63fb8e22 100644 +--- a/django_tenants/clone.py ++++ b/django_tenants/clone.py +@@ -1,7 +1,6 @@ + from django.conf import settings + from django.core.exceptions import ValidationError + from django.db import connection, transaction +-from django.db.utils import ProgrammingError + + from django_tenants.utils import schema_exists + +@@ -3292,12 +3291,9 @@ def clone_schema( + connection.set_schema_to_public() + cursor = connection.cursor() + +- # check if the clone_schema function already exists in the db +- try: +- cursor.execute("SELECT 'clone_schema'::regproc") +- except ProgrammingError: +- self._create_clone_schema_function() +- transaction.commit() ++ # create or update the clone_schema function in the db ++ self._create_clone_schema_function() ++ transaction.commit() + + if schema_exists(new_schema_name): + raise ValidationError("New schema name already exists") diff --git a/ilot/py3-django-tenants/APKBUILD b/ilot/py3-django-tenants/APKBUILD new file mode 100644 index 0000000..f12eac2 --- /dev/null +++ b/ilot/py3-django-tenants/APKBUILD @@ -0,0 +1,43 @@ +# Contributor: Antoine Martin (ayakael) +# Maintainer: Antoine Martin (ayakael) +pkgname=py3-django-tenants +#_pkgreal is used by apkbuild-pypi to find modules at PyPI +_pkgreal=django-tenants +pkgver=3.6.1 +pkgrel=0 +pkgdesc="Tenant support for Django using PostgreSQL schemas." +url="https://pypi.python.org/project/django-tenants" +arch="noarch" +license="KIT" +depends="py3-django py3-psycopg py3-gunicorn py3-coverage" +checkdepends="python3-dev py3-pytest" +makedepends="py3-setuptools py3-gpep517 py3-wheel" +source=" + $pkgname-$pkgver.tar.gz::https://codeload.github.com/django-tenants/django-tenants/tar.gz/refs/tags/v$pkgver + 997_update-from-pgclone-schema.patch + " +builddir="$srcdir/$_pkgreal-$pkgver" +options="!check" # Requires setting up test database +subpackages="$pkgname-pyc" + +build() { + gpep517 build-wheel \ + --wheel-dir .dist \ + --output-fd 3 3>&1 >&2 +} + +check() { + python3 -m venv --clear --without-pip --system-site-packages .testenv + .testenv/bin/python3 -m installer .dist/*.whl + DJANGO_SETTINGS_MODULE=tests.settings .testenv/bin/python3 -m pytest -v +} + +package() { + python3 -m installer -d "$pkgdir" \ + .dist/*.whl +} + +sha512sums=" +b18afce81ccc89e49fcc4ebe85d90be602415ca898c1660a4e71e2bef6a3ed2e8c724e94b61d8c6f48f3fb19eb2a87d6a6f5bbf449b3e2f661f87e4b5638eafb py3-django-tenants-3.6.1.tar.gz +f2424bb188db2e3c7d13c15e5bdf0959c6f794e68dbc677c8b876d4faa321f78aded5565539f1bfd97583c6df0fcc19ec05abe203b08407e4446dd7194756825 997_update-from-pgclone-schema.patch +" From 2babe46d95a35f007326f40259d2de22df425563 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Fri, 9 Aug 2024 22:28:42 -0400 Subject: [PATCH 23/38] ilot/py3-scim2-filter-parser: new aport --- ilot/py3-scim2-filter-parser/APKBUILD | 38 +++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) create mode 100644 ilot/py3-scim2-filter-parser/APKBUILD diff --git a/ilot/py3-scim2-filter-parser/APKBUILD b/ilot/py3-scim2-filter-parser/APKBUILD new file mode 100644 index 0000000..784a660 --- /dev/null +++ b/ilot/py3-scim2-filter-parser/APKBUILD @@ -0,0 +1,38 @@ +# Contributor: Antoine Martin (ayakael) +# Maintainer: Antoine Martin (ayakael) +pkgname=py3-scim2-filter-parser +#_pkgreal is used by apkbuild-pypi to find modules at PyPI +_pkgreal=scim2-filter-parser +pkgver=0.5.0 +pkgrel=0 +pkgdesc="A customizable parser/transpiler for SCIM2.0 filters" +url="https://pypi.python.org/project/scim2-filter-parser" +arch="noarch" +license="MIT" +depends="py3-django py3-sly" +checkdepends="py3-pytest" +makedepends="py3-setuptools py3-gpep517 py3-wheel poetry" +source="$pkgname-$pkgver.tar.gz::https://github.com/15five/scim2-filter-parser/archive/refs/tags/$pkgver.tar.gz" +builddir="$srcdir/$_pkgreal-$pkgver" +subpackages="$pkgname-pyc" + +build() { + gpep517 build-wheel \ + --wheel-dir .dist \ + --output-fd 3 3>&1 >&2 +} + +check() { + python3 -m venv --clear --without-pip --system-site-packages .testenv + .testenv/bin/python3 -m installer .dist/*.whl + .testenv/bin/python3 -m pytest -v +} + +package() { + python3 -m installer -d "$pkgdir" \ + .dist/*.whl +} + +sha512sums=" +5347852af6b82a764a32bc491a7e0f05f06b4f4d93dfa375668b5ca1a15ee58f488702536e350100fe5c96a5c94c492ea8cbd0e1952c5920d5a10e1453357f8c py3-scim2-filter-parser-0.5.0.tar.gz +" From 9410c4943e93843578530746fa38638d53db42bb Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Fri, 9 Aug 2024 22:28:45 -0400 Subject: [PATCH 24/38] ilot/py3-tenant-schemas-celery: new aport --- ilot/py3-tenant-schemas-celery/APKBUILD | 41 +++++++++++++++++++++++++ 1 file changed, 41 insertions(+) create mode 100644 ilot/py3-tenant-schemas-celery/APKBUILD diff --git a/ilot/py3-tenant-schemas-celery/APKBUILD b/ilot/py3-tenant-schemas-celery/APKBUILD new file mode 100644 index 0000000..4398eae --- /dev/null +++ b/ilot/py3-tenant-schemas-celery/APKBUILD @@ -0,0 +1,41 @@ +# Contributor: Antoine Martin (ayakael) +# Maintainer: Antoine Martin (ayakael) +pkgname=py3-tenant-schemas-celery +#_pkgreal is used by apkbuild-pypi to find modules at PyPI +_pkgreal=tenant-schemas-celery +pkgver=2.2.0 +pkgrel=0 +pkgdesc="Celery integration for django-tenant-schemas and django-tenants" +url="https://pypi.python.org/project/tenant-schemas-celery" +arch="noarch" +license="MIT" +depends="py3-django-tenants py3-celery" +checkdepends="python3-dev py3-pytest" +makedepends="py3-setuptools py3-gpep517 py3-wheel" +source=" + $pkgname-$pkgver.tar.gz::https://codeload.github.com/maciej-gol/tenant-schemas-celery/tar.gz/refs/tags/$pkgver + " +options="!check" # Test suite wants docker +builddir="$srcdir/$_pkgreal-$pkgver" +subpackages="$pkgname-pyc" + +build() { + gpep517 build-wheel \ + --wheel-dir .dist \ + --output-fd 3 3>&1 >&2 +} + +check() { + python3 -m venv --clear --without-pip --system-site-packages .testenv + .testenv/bin/python3 -m installer .dist/*.whl + DJANGO_SETTINGS_MODULE=tests.settings .testenv/bin/python3 -m pytest -v +} + +package() { + python3 -m installer -d "$pkgdir" \ + .dist/*.whl +} + +sha512sums=" +dad71011306936dc84d966797b113008780750e9e973513092bec892be0d1468e0a0e7e8e2fcca9765309a27767e1c72bdaad7c8aca16353ae1eef783c239148 py3-tenant-schemas-celery-2.2.0.tar.gz +" From 71cf1c997b33b8d888dc5b26dc3563c3cac5f19a Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Fri, 9 Aug 2024 22:28:47 -0400 Subject: [PATCH 25/38] ilot/uptime-kuma: new aport --- ilot/uptime-kuma/APKBUILD | 49 ++++++++++++++++++++++++ ilot/uptime-kuma/uptime-kuma.conf | 47 +++++++++++++++++++++++ ilot/uptime-kuma/uptime-kuma.openrc | 48 +++++++++++++++++++++++ ilot/uptime-kuma/uptime-kuma.pre-install | 25 ++++++++++++ 4 files changed, 169 insertions(+) create mode 100644 ilot/uptime-kuma/APKBUILD create mode 100644 ilot/uptime-kuma/uptime-kuma.conf create mode 100644 ilot/uptime-kuma/uptime-kuma.openrc create mode 100755 ilot/uptime-kuma/uptime-kuma.pre-install diff --git a/ilot/uptime-kuma/APKBUILD b/ilot/uptime-kuma/APKBUILD new file mode 100644 index 0000000..cea07d0 --- /dev/null +++ b/ilot/uptime-kuma/APKBUILD @@ -0,0 +1,49 @@ +# Contributor: Antoine Martin (ayakael) +# Maintainer: Antoine Martin (ayakael) +pkgname=uptime-kuma +pkgver=1.23.13 +pkgrel=0 +pkgdesc='A fancy self-hosted monitoring tool' +arch="all" +url="https://github.com/louislam/uptime-kuma" +license="MIT" +depends="nodejs" +makedepends="npm" +source=" + uptime-kuma-$pkgver.tar.gz::https://github.com/louislam/uptime-kuma/archive/refs/tags/$pkgver.tar.gz + uptime-kuma.openrc + uptime-kuma.conf + " +subpackages="$pkgname-doc $pkgname-openrc" +install="$pkgname.pre-install" + +build() { + npm ci + npm run build + rm -Rf "$builddir"/node_modules + npm ci --omit=dev +} + +package() { + install -dm 755 \ + "$pkgdir"/usr/share/webapps \ + "$pkgdir"/usr/share/doc \ + "$pkgdir"/usr/share/licenses/uptime-kuma \ + "$pkgdir"/etc/init.d \ + "$pkgdir"/etc/conf.d + + # install + cp -a "$builddir" "$pkgdir/usr/share/webapps/uptime-kuma" + + # openrc + install -Dm755 "$srcdir"/uptime-kuma.openrc "$pkgdir"/etc/init.d/uptime-kuma + install -Dm755 "$srcdir"/uptime-kuma.conf "$pkgdir"/etc/conf.d/uptime-kuma + + # docs and licenses + mv "$pkgdir"/usr/share/webapps/uptime-kuma/LICENSE "$pkgdir"/usr/share/licenses/uptime-kuma/. +} +sha512sums=" +9045cdc69d46ce34011f7866844a8d1866eee21850be6eede3226e77b9c0d3ecc0190481671f04f25da40345b29cc2d13de07bcc27e7baeff7901b4bd9c8b93f uptime-kuma-1.23.13.tar.gz +0ceddb98a6f318029b8bd8b5a49b55c883e77a5f8fffe2b9b271c9abf0ac52dc7a6ea4dbb4a881124a7857f1e43040f18755c1c2a034479e6a94d2b65a73d847 uptime-kuma.openrc +1dbae536b23e3624e139155abbff383bba3209ff2219983da2616b4376b1a5041df812d1e5164716fc6e967a8446d94baae3b96ee575d400813cc6fdc2cc274e uptime-kuma.conf +" diff --git a/ilot/uptime-kuma/uptime-kuma.conf b/ilot/uptime-kuma/uptime-kuma.conf new file mode 100644 index 0000000..f816a99 --- /dev/null +++ b/ilot/uptime-kuma/uptime-kuma.conf @@ -0,0 +1,47 @@ +# uptime-kuma config +# for more info +# see https://github.com/louislam/uptime-kuma/wiki/Environment-Variables + +# Set the directory where the data should be stored (could be relative) +# DATA_DIR=/var/lib/uptime-kuma + +# Host to bind to, could be an ip. +# UPTIME_KUMA_HOST=:: + +# Port to listen to +# UPTIME_KUMA_PORT=3001 + +# Path to SSL key +# UPTIME_KUMA_SSL_KEY= + +# Path to SSL certificate +# UPTIME_KUMA_SSL_CERT= + +# SSL Key Passphrase +# UPTIME_KUMA_SSL_KEY_PASSPHRASE= + +# Cloudflare Tunnel Token +# UPTIME_KUMA_CLOUDFLARED_TOKEN= + +# By default, Uptime Kuma is not allowed in iframe if the domain name is not +# the same as the parent. It protects your Uptime Kuma to be a phishing +# website. If you don't need this protection, you can set it to true +# UPTIME_KUMA_DISABLE_FRAME_SAMEORIGIN=false + +# By default, Uptime Kuma is verifying that the websockets ORIGIN-Header +# matches your servers hostname. If you don't need this protection, you can +# set it to bypass. See GHSA-mj22-23ff-2hrr for further context. +# UPTIME_KUMA_WS_ORIGIN_CHECK=cors-like + +# Allow to specify any executables as Chromium +# UPTIME_KUMA_ALLOW_ALL_CHROME_EXEC=0 + +# Add your self-signed ca certs. +# NODE_EXTRA_CA_CERTS= + +# Ignore all TLS errors +# NOTE_TLS_REJECT_UNAUTHORIZED=0 + +# Set it to --insecure-http-parser, if you encountered error Invalid header +# value char when your website using WAF +# NODE_OPTIONS= diff --git a/ilot/uptime-kuma/uptime-kuma.openrc b/ilot/uptime-kuma/uptime-kuma.openrc new file mode 100644 index 0000000..ce7b00e --- /dev/null +++ b/ilot/uptime-kuma/uptime-kuma.openrc @@ -0,0 +1,48 @@ +#!/sbin/openrc-run + +description="Uptime Kuma self-hosted monitoring tool" + +# Change $directory to path to uptime-kuma +directory=${directory:-/usr/share/webapps/uptime-kuma} +pidfile=${pidfile:-/run/$RC_SVCNAME.pid} +DATA_DIR=${DATA_DIR:-/var/lib/uptime-kuma} + +log_dir="/var/log/$RC_SVCNAME" +logfile=${logfile:-$log_dir/$RC_SVCNAME.log} +output_log="${output_log:-$logfile}" +error_log="${error_log:-$logfile}" + +command=${command:-/usr/bin/node} +command_args="$directory/server/server.js" +command_user=${command_user:-uptime-kuma:uptime-kuma} +command_background=true + +depend() { + need net +} + +start_pre() { + checkpath --owner=$command_user --directory $log_dir \ + $DATA_DIR \ + $DATA_DIR/upload + checkpath --owner=$command_user --file $logfile \ + $DATA_DIR/error.log + + [ ! -e $DATA_DIR/kuma.db ] && + cp $directory/db/kuma.db $DATA_DIR + + checkpath --owner=$command_user --mode 600 --file $DATA_DIR/kuma.db* + + cd $directory + + export DATA_DIR UPTIME_KUMA_HOST UPTIME_KUMA_PORT UPTIME_KUMA_SSL_KEY \ + UPTIME_KUMA_SSL_CERT UPTIME_KUMA_SSL_KEY_PASSPHRASE \ + UPTIME_KUMA_CLOUDFLARED_TOKEN UPTIME_KUMA_DISABLE_FRAME_SAMEORIGIN \ + UPTIME_KUMA_WS_ORIGIN_CHECK UPTIME_KUMA_ALLOW_ALL_CHROME_EXEC \ + NODE_EXTRA_CA_CERTS NODE_TLS_REJECT_UNAUTHORIZED NODE_OPTIONS +} + +start_post() { + # Wait for the server to be started + sleep 10 +} diff --git a/ilot/uptime-kuma/uptime-kuma.pre-install b/ilot/uptime-kuma/uptime-kuma.pre-install new file mode 100755 index 0000000..0217278 --- /dev/null +++ b/ilot/uptime-kuma/uptime-kuma.pre-install @@ -0,0 +1,25 @@ +#!/bin/sh + +DATADIR='/var/lib/uptime-kuma' + +if ! getent group uptime-kuma 1>/dev/null; then + echo '* Creating group uptime-kuma' 1>&2 + + addgroup -S uptime-kuma +fi + +if ! id uptime-kuma 2>/dev/null 1>&2; then + echo '* Creating user uptime-kuma' 1>&2 + + adduser -DHS -G uptime-kuma -h "$DATADIR" -s /bin/sh \ + -g "added by apk for uptime-kuma" uptime-kuma + passwd -u uptime-kuma 1>/dev/null # unlock +fi + +if ! id -Gn uptime-kuma | grep -Fq www-data; then + echo '* Adding user uptime-kuma to group www-data' 1>&2 + + addgroup uptime-kuma www-data +fi + +exit 0 From f37777699f3821d93a14b84b15b140047b9fe18f Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Fri, 9 Aug 2024 22:28:49 -0400 Subject: [PATCH 26/38] ilot/wikijs: new aport --- ilot/wikijs/APKBUILD | 60 +++++++++++++++++++++++++++++ ilot/wikijs/config.sample.yml.patch | 13 +++++++ ilot/wikijs/wikijs.initd | 24 ++++++++++++ ilot/wikijs/wikijs.post-install | 18 +++++++++ ilot/wikijs/wikijs.pre-install | 20 ++++++++++ 5 files changed, 135 insertions(+) create mode 100644 ilot/wikijs/APKBUILD create mode 100644 ilot/wikijs/config.sample.yml.patch create mode 100644 ilot/wikijs/wikijs.initd create mode 100755 ilot/wikijs/wikijs.post-install create mode 100644 ilot/wikijs/wikijs.pre-install diff --git a/ilot/wikijs/APKBUILD b/ilot/wikijs/APKBUILD new file mode 100644 index 0000000..aeaad93 --- /dev/null +++ b/ilot/wikijs/APKBUILD @@ -0,0 +1,60 @@ +# Maintainer: Antoine Martin (ayakael) +# Contributor: Antoine Martin (ayakael) + +pkgname=wikijs +pkgver=2.5.303 +pkgrel=0 +pkgdesc="Wiki.js | A modern, lightweight and powerful wiki app built on Node.js" +license="AGPL-3.0" +arch="!armv7 x86_64" +options="!check" # No test suite +depends=" + libcap-setcap + nodejs>=10.12.0 + postgresql + python3 + " +makedepends=" + yarn + npm + " +url="https://github.com/Requarks/wiki" +subpackages="$pkgname-openrc" +install="$pkgname.post-install $pkgname.pre-install" +builddir="$srcdir"/wiki-$pkgver +pkgusers="wikijs" +pkggroups="wikijs" +source=" + $pkgname-$pkgver.tar.gz::https://github.com/requarks/wiki/archive/refs/tags/v$pkgver.tar.gz + wikijs.initd + config.sample.yml.patch +" + +prepare() { + default_prepare + sed -i "s|\"version.*|\"version\": \"$pkgver\",|" "$builddir"/package.json + sed -i 's|"dev": true.*|"dev": "false",|' "$builddir"/package.json +} +build() { + yarn --frozen-lockfile --non-interactive + yarn build + rm -rf node_modules + yarn --production --frozen-lockfile --non-interactive +} + +package() { + install -Dm755 "$srcdir"/wikijs.initd "$pkgdir"/etc/init.d/wikijs + + install -Dm644 -o 5494 -g 5494 "$builddir"/config.sample.yml "$pkgdir"/etc/wikijs/config.yml + + install -Dm644 "$builddir"/package.json -t "$pkgdir"/usr/lib/bundles/wikijs + cp -aR "$builddir"/assets "$builddir"/server "$builddir"/node_modules "$pkgdir"/usr/lib/bundles/wikijs + + mkdir -p "$pkgdir"/var/lib/wikijs + chown 5494:5494 "$pkgdir"/var/lib/wikijs +} +sha512sums=" +a463d79ad0d8ff15dbe568b839094d697c6de0b2e991b77a4944e2a82f9789de6840e504a4673e4e0900d61596e880ca276008de86dac4f05f5823dc0427d2fc wikijs-2.5.303.tar.gz +355131ee5617348b82681cb8543c784eea59689990a268ecd3b77d44fe9abcca9c86fb8b047f0a8faeba079c650faa7790c5dd65418d313cd7561f38bb590c03 wikijs.initd +07b536c20e370d2a926038165f0e953283259c213a80a8648419565f5359ab05f528ac310e81606914013da212270df6feddb22e514cbcb2464c8274c956e4af config.sample.yml.patch +" diff --git a/ilot/wikijs/config.sample.yml.patch b/ilot/wikijs/config.sample.yml.patch new file mode 100644 index 0000000..6532c25 --- /dev/null +++ b/ilot/wikijs/config.sample.yml.patch @@ -0,0 +1,13 @@ +diff --git a/config.sample.yml.orig b/config.sample.yml +index 47edd8d..458472a 100644 +--- a/config.sample.yml.orig ++++ b/config.sample.yml +@@ -136,7 +136,7 @@ ha: false + # Data Path + # --------------------------------------------------------------------- + # Writeable data path used for cache and temporary user uploads. +-dataPath: ./data ++dataPath: /var/lib/wikijs/data + + # --------------------------------------------------------------------- + # Body Parser Limit diff --git a/ilot/wikijs/wikijs.initd b/ilot/wikijs/wikijs.initd new file mode 100644 index 0000000..680efbf --- /dev/null +++ b/ilot/wikijs/wikijs.initd @@ -0,0 +1,24 @@ +#!/sbin/openrc-run +name="$RC_SVCNAME" +cfgfile="/etc/conf.d/$RC_SVCNAME" +pidfile="/var/run/$RC_SVCNAME.pid" +command="/usr/bin/node server" +command_args="" +command_user="wikijs" +command_group="wikijs" +supervisor="supervise-daemon" +start_stop_daemon_args="" +command_background="yes" +output_log="/var/log/$RC_SVCNAME/$RC_SVCNAME.log" +error_log="/var/log/$RC_SVCNAME/$RC_SVCNAME.err" +working_directory="/usr/lib/bundles/wikijs" + +start_pre() { + checkpath --directory --owner $command_user:$command_user --mode 0775 \ + /var/log/$RC_SVCNAME \ + /var/lib/$RC_SVCNAME + export NODE_ENV=production + export CONFIG_FILE=/etc/wikijs/config.yml + cd "$working_directory" +} + diff --git a/ilot/wikijs/wikijs.post-install b/ilot/wikijs/wikijs.post-install new file mode 100755 index 0000000..748e847 --- /dev/null +++ b/ilot/wikijs/wikijs.post-install @@ -0,0 +1,18 @@ +#!/bin/sh +set -eu + +group=wikijs +config_file='/etc/wikijs/config.yml' + +setcap 'cap_net_bind_service=+ep' /usr/bin/node + +cat >&2 <<-EOF +* +* 1. Adjust settings in /etc/wikijs/config.yml. +* +* 2. Create database for wikijs: +* +* psql -c "CREATE ROLE wikijs PASSWORD 'top-secret' INHERIT LOGIN;" +* psql -c "CREATE DATABASE wkijs OWNER wikijs ENCODING 'UTF-8';" +* +EOF diff --git a/ilot/wikijs/wikijs.pre-install b/ilot/wikijs/wikijs.pre-install new file mode 100644 index 0000000..579485d --- /dev/null +++ b/ilot/wikijs/wikijs.pre-install @@ -0,0 +1,20 @@ +#!/bin/sh +# It's very important to set user/group correctly. + +wikijs_dir='/var/lib/wikijs' + +if ! getent group wikijs 1>/dev/null; then + echo '* Creating group wikijs' 1>&2 + + addgroup -S wikijs -g 5494 +fi + +if ! id wikijs 2>/dev/null 1>&2; then + echo '* Creating user wikijs' 1>&2 + + adduser -DHS -G wikijs -h "$wikijs_dir" -u 5494 -s /bin/sh \ + -g "added by apk for wikijs" wikijs + passwd -u wikijs 1>/dev/null # unlock +fi + +exit 0 From 7c15cbb47e80af202087e02ac4cf2eca02561e73 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Sat, 24 Aug 2024 18:41:43 -0400 Subject: [PATCH 27/38] gitlab-ci: drop --- .gitlab-ci.yml | 109 ------ .gitlab/bin/APKBUILD_SHIM | 111 ------ .gitlab/bin/apkbuild-shellcheck | 16 - .gitlab/bin/build-rootfs.sh | 556 ----------------------------- .gitlab/bin/build.sh | 283 --------------- .gitlab/bin/changed-aports | 20 -- .gitlab/bin/functions.sh | 74 ---- .gitlab/bin/lint | 96 ----- .gitlab/bin/push.sh | 56 --- .gitlab/patches/abuild-cross.patch | 17 - 10 files changed, 1338 deletions(-) delete mode 100644 .gitlab-ci.yml delete mode 100755 .gitlab/bin/APKBUILD_SHIM delete mode 100755 .gitlab/bin/apkbuild-shellcheck delete mode 100755 .gitlab/bin/build-rootfs.sh delete mode 100755 .gitlab/bin/build.sh delete mode 100755 .gitlab/bin/changed-aports delete mode 100755 .gitlab/bin/functions.sh delete mode 100755 .gitlab/bin/lint delete mode 100755 .gitlab/bin/push.sh delete mode 100644 .gitlab/patches/abuild-cross.patch diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml deleted file mode 100644 index dd8afae..0000000 --- a/.gitlab-ci.yml +++ /dev/null @@ -1,109 +0,0 @@ -stages: - - verify - - build - - deploy - -variables: - GIT_STRATEGY: clone - GIT_DEPTH: "500" - -lint: - stage: verify - interruptible: true - script: - - | - sudo apk add shellcheck atools sudo abuild - export PATH="$PATH:$CI_PROJECT_DIR/.gitlab/bin" - lint - allow_failure: true - only: - - merge_requests - tags: - - apk-$CI_MERGE_REQUEST_TARGET_BRANCH_NAME-x86_64 - -.build: - stage: build - interruptible: true - script: - - | - sudo apk add alpine-sdk lua-aports sudo - sudo addgroup $USER abuild - export PATH="$PATH:$CI_PROJECT_DIR/.gitlab/bin" - sudo -Eu $USER build.sh - artifacts: - paths: - - packages/ - - keys/ - - logs/ - expire_in: 7 days - when: always - only: - - merge_requests - -.cross: - stage: build - interruptible: true - script: - - | - sudo apk add alpine-sdk lua-aports sudo gzip xz qemu-$CI_QEMU_TARGET_ARCH - sudo addgroup $USER abuild - export PATH="$PATH:$CI_PROJECT_DIR/.gitlab/bin" - build-rootfs.sh alpine${CI_MERGE_REQUEST_TARGET_BRANCH_NAME/v} $CI_ALPINE_TARGET_ARCH --rootfsdir $HOME/sysroot-$CI_ALPINE_TARGET_ARCH - cp /etc/apk/repositories $HOME/sysroot-$CI_ALPINE_TARGET_ARCH/etc/apk/. - sudo -Eu $USER CHOST=$CI_TARGET_ALPINE_ARCH build.sh - artifacts: - paths: - - packages/ - - keys/ - - logs/ - expire_in: 7 days - when: always - only: - - merge_requests - -build-x86_64: - extends: .build - when: always - tags: - - apk-$CI_MERGE_REQUEST_TARGET_BRANCH_NAME-x86_64 - -build-aarch64: - extends: .build - when: always - tags: - - apk-$CI_MERGE_REQUEST_TARGET_BRANCH_NAME-aarch64 - -build-ppc64le: - extends: .build - when: manual - tags: - - apk-$CI_MERGE_REQUEST_TARGET_BRANCH_NAME-ppc64le - -build-s390x: - extends: .build - when: manual - tags: - - apk-$CI_MERGE_REQUEST_TARGET_BRANCH_NAME-s390x - -build-armv7: - extends: .cross - when: manual - tags: - - apk-$CI_MERGE_REQUEST_TARGET_BRANCH_NAME-x86_64 - variables: - CI_ALPINE_TARGET_ARCH: armv7 - CI_QEMU_TARGET_ARCH: arm - -push: - interruptible: true - stage: deploy - script: - - | - sudo apk add abuild git-lfs findutils - export PATH="$PATH:$CI_PROJECT_DIR/.gitlab/bin" - push.sh - rules: - - if: $CI_PIPELINE_SOURCE == "merge_request_event" - when: manual - tags: - - repo diff --git a/.gitlab/bin/APKBUILD_SHIM b/.gitlab/bin/APKBUILD_SHIM deleted file mode 100755 index 76577ff..0000000 --- a/.gitlab/bin/APKBUILD_SHIM +++ /dev/null @@ -1,111 +0,0 @@ -#!/bin/sh - -set -e - -arch= -builddir= -checkdepends= -depends= -depends_dev= -depends_doc= -depends_libs= -depends_openrc= -depends_static= -install= -install_if= -langdir= -ldpath= -license= -makedepends= -makedepends_build= -makedepends_host= -md5sums= -options= -patch_args= -pkgbasedir= -pkgdesc= -pkgdir= -pkgname= -pkgrel= -pkgver= -pkggroups= -pkgusers= -provides= -provider_priority= -replaces= -sha256sums= -sha512sums= -sonameprefix= -source= -srcdir= -startdir= -subpackages= -subpkgdir= -subpkgname= -triggers= -url= - -# abuild.conf - -CFLAGS= -CXXFLAGS= -CPPFLAGS= -LDFLAGS= -JOBS= -MAKEFLAGS= -CMAKE_CROSSOPTS= - -. ./APKBUILD - -: "$arch" -: "$builddir" -: "$checkdepends" -: "$depends" -: "$depends_dev" -: "$depends_doc" -: "$depends_libs" -: "$depends_openrc" -: "$depends_static" -: "$install" -: "$install_if" -: "$langdir" -: "$ldpath" -: "$license" -: "$makedepends" -: "$makedepends_build" -: "$makedepends_host" -: "$md5sums" -: "$options" -: "$patch_args" -: "$pkgbasedir" -: "$pkgdesc" -: "$pkgdir" -: "$pkgname" -: "$pkgrel" -: "$pkgver" -: "$pkggroups" -: "$pkgusers" -: "$provides" -: "$provider_priority" -: "$replaces" -: "$sha256sums" -: "$sha512sums" -: "$sonameprefix" -: "$source" -: "$srcdir" -: "$startdir" -: "$subpackages" -: "$subpkgdir" -: "$subpkgname" -: "$triggers" -: "$url" - -# abuild.conf - -: "$CFLAGS" -: "$CXXFLAGS" -: "$CPPFLAGS" -: "$LDFLAGS" -: "$JOBS" -: "$MAKEFLAGS" -: "$CMAKE_CROSSOPTS" diff --git a/.gitlab/bin/apkbuild-shellcheck b/.gitlab/bin/apkbuild-shellcheck deleted file mode 100755 index 3126684..0000000 --- a/.gitlab/bin/apkbuild-shellcheck +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/sh - -shellcheck -s ash \ - -e SC3043 \ - -e SC3057 \ - -e SC3060 \ - -e SC2016 \ - -e SC2086 \ - -e SC2169 \ - -e SC2155 \ - -e SC2100 \ - -e SC2209 \ - -e SC2030 \ - -e SC2031 \ - -e SC1090 \ - -xa $CI_PROJECT_DIR/.gitlab/bin/APKBUILD_SHIM diff --git a/.gitlab/bin/build-rootfs.sh b/.gitlab/bin/build-rootfs.sh deleted file mode 100755 index 44c4372..0000000 --- a/.gitlab/bin/build-rootfs.sh +++ /dev/null @@ -1,556 +0,0 @@ -#!/usr/bin/env bash -# Availabl here: https://lab.ilot.io/dotnet/arcade/-/blob/7f6d9796cc7f594772f798358dbdd8c69b6a97af/eng/common/cross/build-rootfs.sh -# Only modification: qemu-$arch-static becomes qemu-$arch - -set -e - -usage() -{ - echo "Usage: $0 [BuildArch] [CodeName] [lldbx.y] [llvmx[.y]] [--skipunmount] --rootfsdir ]" - echo "BuildArch can be: arm(default), arm64, armel, armv6, ppc64le, riscv64, s390x, x64, x86" - echo "CodeName - optional, Code name for Linux, can be: xenial(default), zesty, bionic, alpine" - echo " for alpine can be specified with version: alpineX.YY or alpineedge" - echo " for FreeBSD can be: freebsd12, freebsd13" - echo " for illumos can be: illumos" - echo " for Haiku can be: haiku." - echo "lldbx.y - optional, LLDB version, can be: lldb3.9(default), lldb4.0, lldb5.0, lldb6.0 no-lldb. Ignored for alpine and FreeBSD" - echo "llvmx[.y] - optional, LLVM version for LLVM related packages." - echo "--skipunmount - optional, will skip the unmount of rootfs folder." - echo "--use-mirror - optional, use mirror URL to fetch resources, when available." - echo "--jobs N - optional, restrict to N jobs." - exit 1 -} - -__CodeName=xenial -__CrossDir=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) -__BuildArch=arm -__AlpineArch=armv7 -__FreeBSDArch=arm -__FreeBSDMachineArch=armv7 -__IllumosArch=arm7 -__QEMUArch=arm -__UbuntuArch=armhf -__UbuntuRepo="http://ports.ubuntu.com/" -__LLDB_Package="liblldb-3.9-dev" -__SkipUnmount=0 - -# base development support -__UbuntuPackages="build-essential" - -__AlpinePackages="alpine-base" -__AlpinePackages+=" build-base" - -# symlinks fixer -__UbuntuPackages+=" symlinks" - -# runtime dependencies -__UbuntuPackages+=" libicu-dev" -__UbuntuPackages+=" liblttng-ust-dev" -__UbuntuPackages+=" libunwind8-dev" -__UbuntuPackages+=" libnuma-dev" - -# runtime libraries' dependencies -__UbuntuPackages+=" libcurl4-openssl-dev" -__UbuntuPackages+=" libkrb5-dev" -__UbuntuPackages+=" libssl-dev" -__UbuntuPackages+=" zlib1g-dev" - -__FreeBSDBase="12.3-RELEASE" -__FreeBSDPkg="1.17.0" -__FreeBSDABI="12" -__FreeBSDPackages="libunwind" -__FreeBSDPackages+=" icu" -__FreeBSDPackages+=" libinotify" -__FreeBSDPackages+=" openssl" -__FreeBSDPackages+=" krb5" -__FreeBSDPackages+=" terminfo-db" - -__IllumosPackages="icu" -__IllumosPackages+=" mit-krb5" -__IllumosPackages+=" openssl" -__IllumosPackages+=" zlib" - -__HaikuPackages="gmp" -__HaikuPackages+=" gmp_devel" -__HaikuPackages+=" krb5" -__HaikuPackages+=" krb5_devel" -__HaikuPackages+=" libiconv" -__HaikuPackages+=" libiconv_devel" -__HaikuPackages+=" llvm12_libunwind" -__HaikuPackages+=" llvm12_libunwind_devel" -__HaikuPackages+=" mpfr" -__HaikuPackages+=" mpfr_devel" - -# ML.NET dependencies -__UbuntuPackages+=" libomp5" -__UbuntuPackages+=" libomp-dev" - -__Keyring= -__UseMirror=0 - -__UnprocessedBuildArgs= -while :; do - if [[ "$#" -le 0 ]]; then - break - fi - - lowerI="$(echo "$1" | tr "[:upper:]" "[:lower:]")" - case $lowerI in - -\?|-h|--help) - usage - exit 1 - ;; - arm) - __BuildArch=arm - __UbuntuArch=armhf - __AlpineArch=armv7 - __QEMUArch=arm - ;; - arm64) - __BuildArch=arm64 - __UbuntuArch=arm64 - __AlpineArch=aarch64 - __QEMUArch=aarch64 - __FreeBSDArch=arm64 - __FreeBSDMachineArch=aarch64 - ;; - armel) - __BuildArch=armel - __UbuntuArch=armel - __UbuntuRepo="http://ftp.debian.org/debian/" - __CodeName=jessie - ;; - armv6) - __BuildArch=armv6 - __UbuntuArch=armhf - __QEMUArch=arm - __UbuntuRepo="http://raspbian.raspberrypi.org/raspbian/" - __CodeName=buster - __LLDB_Package="liblldb-6.0-dev" - - if [[ -e "/usr/share/keyrings/raspbian-archive-keyring.gpg" ]]; then - __Keyring="--keyring /usr/share/keyrings/raspbian-archive-keyring.gpg" - fi - ;; - riscv64) - __BuildArch=riscv64 - __AlpineArch=riscv64 - __QEMUArch=riscv64 - __UbuntuArch=riscv64 - __UbuntuRepo="http://deb.debian.org/debian-ports" - __UbuntuPackages="${__UbuntuPackages// libunwind8-dev/}" - unset __LLDB_Package - - if [[ -e "/usr/share/keyrings/debian-ports-archive-keyring.gpg" ]]; then - __Keyring="--keyring /usr/share/keyrings/debian-ports-archive-keyring.gpg --include=debian-ports-archive-keyring" - fi - ;; - ppc64le) - __BuildArch=ppc64le - __AlpineArch=ppc64le - __QEMUArch=ppc64le - __UbuntuArch=ppc64el - __UbuntuRepo="http://ports.ubuntu.com/ubuntu-ports/" - __UbuntuPackages="${__UbuntuPackages// libunwind8-dev/}" - __UbuntuPackages="${__UbuntuPackages// libomp-dev/}" - __UbuntuPackages="${__UbuntuPackages// libomp5/}" - unset __LLDB_Package - ;; - s390x) - __BuildArch=s390x - __AlpineArch=s390x - __QEMUArch=s390x - __UbuntuArch=s390x - __UbuntuRepo="http://ports.ubuntu.com/ubuntu-ports/" - __UbuntuPackages="${__UbuntuPackages// libunwind8-dev/}" - __UbuntuPackages="${__UbuntuPackages// libomp-dev/}" - __UbuntuPackages="${__UbuntuPackages// libomp5/}" - unset __LLDB_Package - ;; - x64) - __BuildArch=x64 - __AlpineArch=x86_64 - __QEMUArch=x86_64 - __UbuntuArch=amd64 - __FreeBSDArch=amd64 - __FreeBSDMachineArch=amd64 - __illumosArch=x86_64 - __UbuntuRepo= - ;; - x86) - __BuildArch=x86 - __AlpineArch=i386 - __QEMUArch=i386 - __UbuntuArch=i386 - __AlpineArch=x86 - __UbuntuRepo="http://archive.ubuntu.com/ubuntu/" - ;; - lldb*) - version="${lowerI/lldb/}" - parts=(${version//./ }) - - # for versions > 6.0, lldb has dropped the minor version - if [[ "${parts[0]}" -gt 6 ]]; then - version="${parts[0]}" - fi - - __LLDB_Package="liblldb-${version}-dev" - ;; - no-lldb) - unset __LLDB_Package - ;; - llvm*) - version="${lowerI/llvm/}" - parts=(${version//./ }) - __LLVM_MajorVersion="${parts[0]}" - __LLVM_MinorVersion="${parts[1]}" - - # for versions > 6.0, llvm has dropped the minor version - if [[ -z "$__LLVM_MinorVersion" && "$__LLVM_MajorVersion" -le 6 ]]; then - __LLVM_MinorVersion=0; - fi - ;; - xenial) # Ubuntu 16.04 - if [[ "$__CodeName" != "jessie" ]]; then - __CodeName=xenial - fi - ;; - zesty) # Ubuntu 17.04 - if [[ "$__CodeName" != "jessie" ]]; then - __CodeName=zesty - fi - ;; - bionic) # Ubuntu 18.04 - if [[ "$__CodeName" != "jessie" ]]; then - __CodeName=bionic - fi - ;; - focal) # Ubuntu 20.04 - if [[ "$__CodeName" != "jessie" ]]; then - __CodeName=focal - fi - ;; - jammy) # Ubuntu 22.04 - if [[ "$__CodeName" != "jessie" ]]; then - __CodeName=jammy - fi - ;; - jessie) # Debian 8 - __CodeName=jessie - - if [[ -z "$__UbuntuRepo" ]]; then - __UbuntuRepo="http://ftp.debian.org/debian/" - fi - ;; - stretch) # Debian 9 - __CodeName=stretch - __LLDB_Package="liblldb-6.0-dev" - - if [[ -z "$__UbuntuRepo" ]]; then - __UbuntuRepo="http://ftp.debian.org/debian/" - fi - ;; - buster) # Debian 10 - __CodeName=buster - __LLDB_Package="liblldb-6.0-dev" - - if [[ -z "$__UbuntuRepo" ]]; then - __UbuntuRepo="http://ftp.debian.org/debian/" - fi - ;; - bullseye) # Debian 11 - __CodeName=bullseye - - if [[ -z "$__UbuntuRepo" ]]; then - __UbuntuRepo="http://ftp.debian.org/debian/" - fi - ;; - sid) # Debian sid - __CodeName=sid - - if [[ -z "$__UbuntuRepo" ]]; then - __UbuntuRepo="http://ftp.debian.org/debian/" - fi - ;; - tizen) - __CodeName= - __UbuntuRepo= - __Tizen=tizen - ;; - alpine*) - __CodeName=alpine - __UbuntuRepo= - version="${lowerI/alpine/}" - - if [[ "$version" == "edge" ]]; then - __AlpineVersion=edge - else - parts=(${version//./ }) - __AlpineMajorVersion="${parts[0]}" - __AlpineMinoVersion="${parts[1]}" - __AlpineVersion="$__AlpineMajorVersion.$__AlpineMinoVersion" - fi - ;; - freebsd12) - __CodeName=freebsd - __SkipUnmount=1 - ;; - freebsd13) - __CodeName=freebsd - __FreeBSDBase="13.0-RELEASE" - __FreeBSDABI="13" - __SkipUnmount=1 - ;; - illumos) - __CodeName=illumos - __SkipUnmount=1 - ;; - haiku) - __CodeName=haiku - __BuildArch=x64 - __SkipUnmount=1 - ;; - --skipunmount) - __SkipUnmount=1 - ;; - --rootfsdir|-rootfsdir) - shift - __RootfsDir="$1" - ;; - --use-mirror) - __UseMirror=1 - ;; - --use-jobs) - shift - MAXJOBS=$1 - ;; - *) - __UnprocessedBuildArgs="$__UnprocessedBuildArgs $1" - ;; - esac - - shift -done - -if [[ "$__BuildArch" == "armel" ]]; then - __LLDB_Package="lldb-3.5-dev" -fi - -__UbuntuPackages+=" ${__LLDB_Package:-}" - -if [[ -n "$__LLVM_MajorVersion" ]]; then - __UbuntuPackages+=" libclang-common-${__LLVM_MajorVersion}${__LLVM_MinorVersion:+.$__LLVM_MinorVersion}-dev" -fi - -if [[ -z "$__RootfsDir" && -n "$ROOTFS_DIR" ]]; then - __RootfsDir="$ROOTFS_DIR" -fi - -if [[ -z "$__RootfsDir" ]]; then - __RootfsDir="$__CrossDir/../../../.tools/rootfs/$__BuildArch" -fi - -if [[ -d "$__RootfsDir" ]]; then - if [[ "$__SkipUnmount" == "0" ]]; then - umount "$__RootfsDir"/* || true - fi - rm -rf "$__RootfsDir" -fi - -mkdir -p "$__RootfsDir" -__RootfsDir="$( cd "$__RootfsDir" && pwd )" - -if [[ "$__CodeName" == "alpine" ]]; then - __ApkToolsVersion=2.12.11 - __ApkToolsDir="$(mktemp -d)" - - wget "https://gitlab.alpinelinux.org/api/v4/projects/5/packages/generic//v$__ApkToolsVersion/x86_64/apk.static" -P "$__ApkToolsDir" - chmod +x "$__ApkToolsDir/apk.static" - - mkdir -p "$__RootfsDir"/usr/bin - cp -v "/usr/bin/qemu-$__QEMUArch" "$__RootfsDir/usr/bin" - - if [[ "$__AlpineVersion" == "edge" ]]; then - version=edge - else - version="v$__AlpineVersion" - fi - - # initialize DB - "$__ApkToolsDir/apk.static" \ - -X "http://dl-cdn.alpinelinux.org/alpine/$version/main" \ - -X "http://dl-cdn.alpinelinux.org/alpine/$version/community" \ - -U --allow-untrusted --root "$__RootfsDir" --arch "$__AlpineArch" --initdb add - - if [[ "$__AlpineLlvmLibsLookup" == 1 ]]; then - __AlpinePackages+=" $("$__ApkToolsDir/apk.static" \ - -X "http://dl-cdn.alpinelinux.org/alpine/$version/main" \ - -X "http://dl-cdn.alpinelinux.org/alpine/$version/community" \ - -U --allow-untrusted --root "$__RootfsDir" --arch "$__AlpineArch" \ - search 'llvm*-libs' | sort | tail -1 | sed 's/-[^-]*//2g')" - fi - - # install all packages in one go - "$__ApkToolsDir/apk.static" \ - -X "http://dl-cdn.alpinelinux.org/alpine/$version/main" \ - -X "http://dl-cdn.alpinelinux.org/alpine/$version/community" \ - -U --allow-untrusted --no-scripts --root "$__RootfsDir" --arch "$__AlpineArch" \ - add $__AlpinePackages - - rm -r "$__ApkToolsDir" -elif [[ "$__CodeName" == "freebsd" ]]; then - mkdir -p "$__RootfsDir"/usr/local/etc - JOBS=${MAXJOBS:="$(getconf _NPROCESSORS_ONLN)"} - wget -O - "https://download.freebsd.org/ftp/releases/${__FreeBSDArch}/${__FreeBSDMachineArch}/${__FreeBSDBase}/base.txz" | tar -C "$__RootfsDir" -Jxf - ./lib ./usr/lib ./usr/libdata ./usr/include ./usr/share/keys ./etc ./bin/freebsd-version - echo "ABI = \"FreeBSD:${__FreeBSDABI}:${__FreeBSDMachineArch}\"; FINGERPRINTS = \"${__RootfsDir}/usr/share/keys\"; REPOS_DIR = [\"${__RootfsDir}/etc/pkg\"]; REPO_AUTOUPDATE = NO; RUN_SCRIPTS = NO;" > "${__RootfsDir}"/usr/local/etc/pkg.conf - echo "FreeBSD: { url: \"pkg+http://pkg.FreeBSD.org/\${ABI}/quarterly\", mirror_type: \"srv\", signature_type: \"fingerprints\", fingerprints: \"${__RootfsDir}/usr/share/keys/pkg\", enabled: yes }" > "${__RootfsDir}"/etc/pkg/FreeBSD.conf - mkdir -p "$__RootfsDir"/tmp - # get and build package manager - wget -O - "https://github.com/freebsd/pkg/archive/${__FreeBSDPkg}.tar.gz" | tar -C "$__RootfsDir"/tmp -zxf - - cd "$__RootfsDir/tmp/pkg-${__FreeBSDPkg}" - # needed for install to succeed - mkdir -p "$__RootfsDir"/host/etc - ./autogen.sh && ./configure --prefix="$__RootfsDir"/host && make -j "$JOBS" && make install - rm -rf "$__RootfsDir/tmp/pkg-${__FreeBSDPkg}" - # install packages we need. - INSTALL_AS_USER=$(whoami) "$__RootfsDir"/host/sbin/pkg -r "$__RootfsDir" -C "$__RootfsDir"/usr/local/etc/pkg.conf update - INSTALL_AS_USER=$(whoami) "$__RootfsDir"/host/sbin/pkg -r "$__RootfsDir" -C "$__RootfsDir"/usr/local/etc/pkg.conf install --yes $__FreeBSDPackages -elif [[ "$__CodeName" == "illumos" ]]; then - mkdir "$__RootfsDir/tmp" - pushd "$__RootfsDir/tmp" - JOBS=${MAXJOBS:="$(getconf _NPROCESSORS_ONLN)"} - echo "Downloading sysroot." - wget -O - https://github.com/illumos/sysroot/releases/download/20181213-de6af22ae73b-v1/illumos-sysroot-i386-20181213-de6af22ae73b-v1.tar.gz | tar -C "$__RootfsDir" -xzf - - echo "Building binutils. Please wait.." - wget -O - https://ftp.gnu.org/gnu/binutils/binutils-2.33.1.tar.bz2 | tar -xjf - - mkdir build-binutils && cd build-binutils - ../binutils-2.33.1/configure --prefix="$__RootfsDir" --target="${__illumosArch}-sun-solaris2.10" --program-prefix="${__illumosArch}-illumos-" --with-sysroot="$__RootfsDir" - make -j "$JOBS" && make install && cd .. - echo "Building gcc. Please wait.." - wget -O - https://ftp.gnu.org/gnu/gcc/gcc-8.4.0/gcc-8.4.0.tar.xz | tar -xJf - - CFLAGS="-fPIC" - CXXFLAGS="-fPIC" - CXXFLAGS_FOR_TARGET="-fPIC" - CFLAGS_FOR_TARGET="-fPIC" - export CFLAGS CXXFLAGS CXXFLAGS_FOR_TARGET CFLAGS_FOR_TARGET - mkdir build-gcc && cd build-gcc - ../gcc-8.4.0/configure --prefix="$__RootfsDir" --target="${__illumosArch}-sun-solaris2.10" --program-prefix="${__illumosArch}-illumos-" --with-sysroot="$__RootfsDir" --with-gnu-as \ - --with-gnu-ld --disable-nls --disable-libgomp --disable-libquadmath --disable-libssp --disable-libvtv --disable-libcilkrts --disable-libada --disable-libsanitizer \ - --disable-libquadmath-support --disable-shared --enable-tls - make -j "$JOBS" && make install && cd .. - BaseUrl=https://pkgsrc.smartos.org - if [[ "$__UseMirror" == 1 ]]; then - BaseUrl=https://pkgsrc.smartos.skylime.net - fi - BaseUrl="$BaseUrl/packages/SmartOS/trunk/${__illumosArch}/All" - echo "Downloading manifest" - wget "$BaseUrl" - echo "Downloading dependencies." - read -ra array <<<"$__IllumosPackages" - for package in "${array[@]}"; do - echo "Installing '$package'" - # find last occurrence of package in listing and extract its name - package="$(sed -En '/.*href="('"$package"'-[0-9].*).tgz".*/h;$!d;g;s//\1/p' All)" - echo "Resolved name '$package'" - wget "$BaseUrl"/"$package".tgz - ar -x "$package".tgz - tar --skip-old-files -xzf "$package".tmp.tg* -C "$__RootfsDir" 2>/dev/null - done - echo "Cleaning up temporary files." - popd - rm -rf "$__RootfsDir"/{tmp,+*} - mkdir -p "$__RootfsDir"/usr/include/net - mkdir -p "$__RootfsDir"/usr/include/netpacket - wget -P "$__RootfsDir"/usr/include/net https://raw.githubusercontent.com/illumos/illumos-gate/master/usr/src/uts/common/io/bpf/net/bpf.h - wget -P "$__RootfsDir"/usr/include/net https://raw.githubusercontent.com/illumos/illumos-gate/master/usr/src/uts/common/io/bpf/net/dlt.h - wget -P "$__RootfsDir"/usr/include/netpacket https://raw.githubusercontent.com/illumos/illumos-gate/master/usr/src/uts/common/inet/sockmods/netpacket/packet.h - wget -P "$__RootfsDir"/usr/include/sys https://raw.githubusercontent.com/illumos/illumos-gate/master/usr/src/uts/common/sys/sdt.h -elif [[ "$__CodeName" == "haiku" ]]; then - JOBS=${MAXJOBS:="$(getconf _NPROCESSORS_ONLN)"} - - echo "Building Haiku sysroot for x86_64" - mkdir -p "$__RootfsDir/tmp" - cd "$__RootfsDir/tmp" - git clone -b hrev56235 https://review.haiku-os.org/haiku - git clone -b btrev43195 https://review.haiku-os.org/buildtools - cd "$__RootfsDir/tmp/buildtools" && git checkout 7487388f5110021d400b9f3b88e1a7f310dc066d - - # Fetch some unmerged patches - cd "$__RootfsDir/tmp/haiku" - ## Add development build profile (slimmer than nightly) - git fetch origin refs/changes/64/4164/1 && git -c commit.gpgsign=false cherry-pick FETCH_HEAD - - # Build jam - cd "$__RootfsDir/tmp/buildtools/jam" - make - - # Configure cross tools - echo "Building cross-compiler" - mkdir -p "$__RootfsDir/generated" - cd "$__RootfsDir/generated" - "$__RootfsDir/tmp/haiku/configure" -j"$JOBS" --sysroot "$__RootfsDir" --cross-tools-source "$__RootfsDir/tmp/buildtools" --build-cross-tools x86_64 - - # Build Haiku packages - echo "Building Haiku" - echo 'HAIKU_BUILD_PROFILE = "development-raw" ;' > UserProfileConfig - "$__RootfsDir/tmp/buildtools/jam/jam0" -j"$JOBS" -q 'package' 'Haiku' - - BaseUrl="https://depot.haiku-os.org/__api/v2/pkg/get-pkg" - - # Download additional packages - echo "Downloading additional required packages" - read -ra array <<<"$__HaikuPackages" - for package in "${array[@]}"; do - echo "Downloading $package..." - # API documented here: https://github.com/haiku/haikudepotserver/blob/master/haikudepotserver-api2/src/main/resources/api2/pkg.yaml#L60 - # The schema here: https://github.com/haiku/haikudepotserver/blob/master/haikudepotserver-api2/src/main/resources/api2/pkg.yaml#L598 - hpkgDownloadUrl="$(wget -qO- --post-data='{"name":"'"$package"'","repositorySourceCode":"haikuports_x86_64","versionType":"LATEST","naturalLanguageCode":"en"}' \ - --header='Content-Type:application/json' "$BaseUrl" | jq -r '.result.versions[].hpkgDownloadURL')" - wget -P "$__RootfsDir/generated/download" "$hpkgDownloadUrl" - done - - # Setup the sysroot - echo "Setting up sysroot and extracting needed packages" - mkdir -p "$__RootfsDir/boot/system" - for file in "$__RootfsDir/generated/objects/haiku/x86_64/packaging/packages/"*.hpkg; do - "$__RootfsDir/generated/objects/linux/x86_64/release/tools/package/package" extract -C "$__RootfsDir/boot/system" "$file" - done - for file in "$__RootfsDir/generated/download/"*.hpkg; do - "$__RootfsDir/generated/objects/linux/x86_64/release/tools/package/package" extract -C "$__RootfsDir/boot/system" "$file" - done - - # Cleaning up temporary files - echo "Cleaning up temporary files" - rm -rf "$__RootfsDir/tmp" - for name in "$__RootfsDir/generated/"*; do - if [[ "$name" =~ "cross-tools-" ]]; then - : # Keep the cross-compiler - else - rm -rf "$name" - fi - done -elif [[ -n "$__CodeName" ]]; then - qemu-debootstrap $__Keyring --arch "$__UbuntuArch" "$__CodeName" "$__RootfsDir" "$__UbuntuRepo" - cp "$__CrossDir/$__BuildArch/sources.list.$__CodeName" "$__RootfsDir/etc/apt/sources.list" - chroot "$__RootfsDir" apt-get update - chroot "$__RootfsDir" apt-get -f -y install - chroot "$__RootfsDir" apt-get -y install $__UbuntuPackages - chroot "$__RootfsDir" symlinks -cr /usr - chroot "$__RootfsDir" apt-get clean - - if [[ "$__SkipUnmount" == "0" ]]; then - umount "$__RootfsDir"/* || true - fi - - if [[ "$__BuildArch" == "armel" && "$__CodeName" == "jessie" ]]; then - pushd "$__RootfsDir" - patch -p1 < "$__CrossDir/$__BuildArch/armel.jessie.patch" - popd - fi -elif [[ "$__Tizen" == "tizen" ]]; then - ROOTFS_DIR="$__RootfsDir" "$__CrossDir/tizen-build-rootfs.sh" "$__BuildArch" -else - echo "Unsupported target platform." - usage; - exit 1 -fi diff --git a/.gitlab/bin/build.sh b/.gitlab/bin/build.sh deleted file mode 100755 index 286f965..0000000 --- a/.gitlab/bin/build.sh +++ /dev/null @@ -1,283 +0,0 @@ -#!/bin/sh -# shellcheck disable=SC3043 - -. $CI_PROJECT_DIR/.gitlab/bin/functions.sh - -# shellcheck disable=SC3040 -set -eu -o pipefail - -readonly APORTSDIR=$CI_PROJECT_DIR -readonly REPOS="cross backports user testing community" -readonly ALPINE_REPOS="main community" -readonly ARCH=$(apk --print-arch) -# gitlab variables -readonly BASEBRANCH=$CI_MERGE_REQUEST_TARGET_BRANCH_NAME - -: "${REPODEST:=$HOME/packages}" -: "${MIRROR:=https://lab.ilot.io/ayakael/repo-apk/-/raw}" -: "${ALPINE_MIRROR:=http://dl-cdn.alpinelinux.org/alpine}" -: "${MAX_ARTIFACT_SIZE:=300000000}" #300M -: "${CI_DEBUG_BUILD:=}" - -: "${CI_ALPINE_BUILD_OFFSET:=0}" -: "${CI_ALPINE_BUILD_LIMIT:=9999}" -: "${CI_ALPINE_TARGET_ARCH:=$(uname -m)}" - -msg() { - local color=${2:-green} - case "$color" in - red) color="31";; - green) color="32";; - yellow) color="33";; - blue) color="34";; - *) color="32";; - esac - printf "\033[1;%sm>>>\033[1;0m %s\n" "$color" "$1" | xargs >&2 -} - -verbose() { - echo "> " "$@" - # shellcheck disable=SC2068 - $@ -} - -debugging() { - [ -n "$CI_DEBUG_BUILD" ] -} - -debug() { - if debugging; then - verbose "$@" - fi -} - -die() { - msg "$1" red - exit 1 -} - -capture_stderr() { - "$@" 2>&1 -} - -report() { - report=$1 - - reportsdir=$APORTSDIR/logs/ - mkdir -p "$reportsdir" - - tee -a "$reportsdir/$report.log" -} - -get_release() { - case $BASEBRANCH in - v*) echo "${BASEBRANCH%-*}";; - edge) echo edge;; - *) die "Branch \"$BASEBRANCH\" not supported!" - esac -} - -build_aport() { - local repo="$1" aport="$2" - cd "$APORTSDIR/$repo/$aport" - export CHOST=$CI_ALPINE_TARGET_ARCH - if abuild -r 2>&1 | report "build-$aport"; then - checkapk | report "checkapk-$aport" || true - aport_ok="$aport_ok $repo/$aport" - else - aport_ng="$aport_ng $repo/$aport" - fi -} - -check_aport() { - local repo="$1" aport="$2" - cd "$APORTSDIR/$repo/$aport" - export CHOST=$CI_ALPINE_TARGET_ARCH - # TODO: this enables crossbuild only on user, this should be cleaner - if [ "$repo" != "user" ] && [ "$repo" != "backports" ] && [ "$CI_ALPINE_TARGET_ARCH" != "$ARCH" ]; then - aport_na="$aport_na $repo/$aport" - return 1 - fi - if ! abuild check_arch 2>/dev/null; then - aport_na="$aport_na $repo/$aport" - return 1 - fi -} - -set_repositories_for() { - local target_repo="$1" repos='' repo='' - local release - - release=$(get_release) - for repo in $REPOS; do - repos="$repos $MIRROR/$release/$repo $REPODEST/$repo" - [ "$repo" = "$target_repo" ] && break - done - sudo sh -c "printf '%s\n' $repos >> /etc/apk/repositories" - sudo apk update || true - if [ "$CI_ALPINE_TARGET_ARCH" != "$ARCH" ]; then - sudo sh -c "printf '%s\n' $repos >> $HOME/sysroot-$CI_ALPINE_TARGET_ARCH/etc/apk/repositories" - sudo cp -R /etc/apk/keys/* $HOME/sysroot-$CI_ALPINE_TARGET_ARCH/etc/apk/keys/. - sudo apk --root=$HOME/sysroot-$CI_ALPINE_TARGET_ARCH update || true - fi -} - -apply_offset_limit() { - start=$1 - limit=$2 - end=$((start+limit)) - - sed -n "$((start+1)),${end}p" -} - -setup_system() { - local repos='' repo='' - local release - - release=$(get_release) - for repo in $ALPINE_REPOS; do - [ "$release" != "edge" ] && [ "$repo" == "testing" ] && continue - repos="$repos $ALPINE_MIRROR/$release/$repo" - done - repos="$repos $MIRROR/$release/cross" - sudo sh -c "printf '%s\n' $repos > /etc/apk/repositories" - sudo apk -U upgrade -a || sudo apk fix || die "Failed to up/downgrade system" - if [ "$CI_ALPINE_TARGET_ARCH" != "$ARCH" ]; then - sudo apk add gcc-$CI_ALPINE_TARGET_ARCH - fi - gitlab_key_to_rsa $ABUILD_KEY rsa-private $HOME/.abuild/$ABUILD_KEY_NAME.rsa - gitlab_key_to_rsa $ABUILD_KEY_PUB rsa-public $HOME/.abuild/$ABUILD_KEY_NAME.rsa.pub - chmod 700 $HOME/.abuild/$ABUILD_KEY_NAME.rsa - echo "PACKAGER_PRIVKEY=$HOME/.abuild/$ABUILD_KEY_NAME.rsa" >> $HOME/.abuild/abuild.conf - sudo cp $HOME/.abuild/$ABUILD_KEY_NAME.rsa.pub /etc/apk/keys/$ABUILD_KEY_NAME.rsa.pub - - # patch abuild for crosscompiling - sudo patch -p1 -d / -i $CI_PROJECT_DIR/.gitlab/patches/abuild-cross.patch - - sudo sed -i -E 's/export JOBS=[0-9]+$/export JOBS=$(nproc)/' /etc/abuild.conf - ( . /etc/abuild.conf && echo "Building with $JOBS jobs" ) - mkdir -p "$REPODEST" - git config --global init.defaultBranch master -} - -sysinfo() { - printf ">>> Host system information (arch: %s, release: %s) <<<\n" "$ARCH" "$(get_release)" - printf "- Number of Cores: %s\n" "$(nproc)" - printf "- Memory: %s Gb\n" "$(awk '/^MemTotal/ {print ($2/1024/1024)}' /proc/meminfo)" - printf "- Free space: %s\n" "$(df -hP / | awk '/\/$/ {print $4}')" -} - -copy_artifacts() { - cd "$APORTSDIR" - - packages_size="$(du -sk "$REPODEST" | awk '{print $1 * 1024}')" - if [ -z "$packages_size" ]; then - return - fi - - echo "Artifact size: $packages_size bytes" - - mkdir -p keys/ packages/ - - if [ "$packages_size" -lt $MAX_ARTIFACT_SIZE ]; then - msg "Copying packages for artifact upload" - cp -ar "$REPODEST"/* packages/ 2>/dev/null - cp ~/.abuild/*.rsa.pub keys/ - else - msg "Artifact size $packages_size larger than max ($MAX_ARTIFACT_SIZE), skipping uploading them" yellow - fi -} - -section_start setup "Setting up the system" collapse - -if debugging; then - set -x -fi - -aport_ok= -aport_na= -aport_ng= -failed= - -sysinfo || true -setup_system || die "Failed to setup system" - -# git no longer allows to execute in repositories owned by different users -sudo chown -R $USER: . - -fetch_flags="-qn" -debugging && fetch_flags="-v" - -git fetch $fetch_flags "$CI_MERGE_REQUEST_PROJECT_URL" \ - "+refs/heads/$BASEBRANCH:refs/heads/$BASEBRANCH" - -if debugging; then - merge_base=$(git merge-base "$BASEBRANCH" HEAD) || echo "Could not determine merge-base" - echo "Merge base: $merge_base" - git --version - git config -l - [ -n "$merge_base" ] && git tag -f merge-base "$merge_base" - git --no-pager log -200 --oneline --graph --decorate --all -fi - -section_end setup - -build_start=$CI_ALPINE_BUILD_OFFSET -build_limit=$CI_ALPINE_BUILD_LIMIT - -for repo in $(changed_repos); do - mkdir -p "$APORTSDIR"/logs "$APORTSDIR"/packages "$APORTSDIR"/keys - set_repositories_for "$repo" - built_aports=0 - changed_aports_in_repo=$(changed_aports "$repo") - changed_aports_in_repo_count=$(echo "$changed_aports_in_repo" | wc -l) - changed_aports_to_build=$(echo "$changed_aports_in_repo" | apply_offset_limit "$build_start" "$build_limit") - - msg "Changed aports in $repo:" - # shellcheck disable=SC2086 # Splitting is expected here - printf " - %s\n" $changed_aports_to_build - for pkgname in $changed_aports_to_build; do - section_start "build_$pkgname" "Building package $pkgname" - built_aports=$((built_aports+1)) - if check_aport "$repo" "$pkgname"; then - build_aport "$repo" "$pkgname" - fi - section_end "build_$pkgname" - done - - build_start=$((build_start-(changed_aports_in_repo_count-built_aports))) - build_limit=$((build_limit-built_aports)) - - if [ $build_limit -le 0 ]; then - msg "Limit reached, breaking" - break - fi -done - -section_start artifacts "Handeling artifacts" collapse -copy_artifacts || true -section_end artifacts - -section_start summary "Build summary" - -echo "### Build summary ###" - -for ok in $aport_ok; do - msg "$ok: build succesfully" -done - -for na in $aport_na; do - msg "$na: disabled for $CI_ALPINE_TARGET_ARCH" yellow -done - -for ng in $aport_ng; do - msg "$ng: build failed" red - failed=true -done -section_end summary - -if [ "$failed" = true ]; then - exit 1 -elif [ -z "$aport_ok" ]; then - msg "No packages found to be built." yellow -fi diff --git a/.gitlab/bin/changed-aports b/.gitlab/bin/changed-aports deleted file mode 100755 index 4541230..0000000 --- a/.gitlab/bin/changed-aports +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/sh - -if [ $# -lt 1 ]; then - echo "Usage: $0 " - exit 1 -fi - -if ! git rev-parse --is-inside-work-tree >/dev/null 2>&1; then - echo "Fatal: not inside a git repository" - exit 2 -fi - -basebranch=$1 - -if ! git rev-parse --verify --quiet $basebranch >/dev/null; then - # The base branch does not eixst, probably due to a shallow clone - git fetch -v $CI_MERGE_REQUEST_PROJECT_URL.git +refs/heads/$basebranch:refs/heads/$basebranch -fi - -git --no-pager diff --diff-filter=ACMR --name-only $basebranch...HEAD -- "*/APKBUILD" | xargs -r -n1 dirname diff --git a/.gitlab/bin/functions.sh b/.gitlab/bin/functions.sh deleted file mode 100755 index 44de1fe..0000000 --- a/.gitlab/bin/functions.sh +++ /dev/null @@ -1,74 +0,0 @@ -# shellcheck disable=SC3043 - -: - -# shellcheck disable=SC3040 -set -eu -o pipefail - -changed_repos() { - : "${APORTSDIR?APORTSDIR missing}" - : "${BASEBRANCH?BASEBRANCH missing}" - - cd "$APORTSDIR" - for repo in $REPOS; do - git diff --diff-filter=ACMR --exit-code "$BASEBRANCH"...HEAD -- "$repo" >/dev/null \ - || echo "$repo" - done -} - -changed_aports() { - : "${APORTSDIR?APORTSDIR missing}" - : "${BASEBRANCH?BASEBRANCH missing}" - - cd "$APORTSDIR" - local repo="$1" - local aports - - aports=$(git diff --name-only --diff-filter=ACMR --relative="$repo" \ - "$BASEBRANCH"...HEAD -- "*/APKBUILD" | xargs -rn1 dirname) - - # shellcheck disable=2086 - ap builddirs -d "$APORTSDIR/$repo" $aports 2>/dev/null | xargs -rn1 basename -} - -section_start() { - name=${1?arg 1 name missing} - header=${2?arg 2 header missing} - collapsed=$2 - timestamp=$(date +%s) - - options="" - case $collapsed in - yes|on|collapsed|true) options="[collapsed=true]";; - esac - - printf "\e[0Ksection_start:%d:%s%s\r\e[0K%s\n" "$timestamp" "$name" "$options" "$header" -} - -section_end() { - name=$1 - timestamp=$(date +%s) - - printf "\e[0Ksection_end:%d:%s\r\e[0K" "$timestamp" "$name" -} - -gitlab_key_to_rsa() { - KEY=$1 - TYPE=$2 - TGT=$3 - TGT_DIR=${TGT%/*} - if [ "$TGT" == "$TGT_DIR" ]; then - TGT_DIR="./" - fi - if [ ! -d "$TGT_DIR" ]; then - mkdir -p "$TGT_DIR" - fi - case $TYPE in - rsa-public) local type="PUBLIC";; - rsa-private) local type="RSA PRIVATE";; - esac - echo "-----BEGIN $type KEY-----" > "$TGT" - echo $1 | sed 's/.\{64\}/&\ -/g' >> "$TGT" - echo "-----END $type KEY-----" >> "$TGT" -} diff --git a/.gitlab/bin/lint b/.gitlab/bin/lint deleted file mode 100755 index c1edcfb..0000000 --- a/.gitlab/bin/lint +++ /dev/null @@ -1,96 +0,0 @@ -#!/bin/sh - -BLUE="\e[34m" -MAGENTA="\e[35m" -RESET="\e[0m" - -readonly BASEBRANCH=$CI_MERGE_REQUEST_TARGET_BRANCH_NAME - -verbose() { - echo "> " "$@" - # shellcheck disable=SC2068 - $@ -} - -debugging() { - [ -n "$CI_DEBUG_BUILD" ] -} - -debug() { - if debugging; then - verbose "$@" - fi -} - -# git no longer allows to execute in repositories owned by different users -sudo chown -R gitlab-runner: . - -fetch_flags="-qn" -debugging && fetch_flags="-v" - -git fetch $fetch_flags "$CI_MERGE_REQUEST_PROJECT_URL" \ - "+refs/heads/$BASEBRANCH:refs/heads/$BASEBRANCH" - -if debugging; then - merge_base=$(git merge-base "$BASEBRANCH" HEAD) - echo "$merge_base" - git --version - git config -l - git tag merge-base "$merge_base" || { echo "Could not determine merge-base"; exit 50; } - git log --oneline --graph --decorate --all -fi - -has_problems=0 - -for PKG in $(changed-aports "$BASEBRANCH"); do - printf "$BLUE==>$RESET Linting $PKG\n" - - ( - cd "$PKG" - - repo=$(basename $(dirname $PKG)); - - if [ "$repo" == "backports" ]; then - echo "Skipping $PKG as backports (we don't care)" - continue - fi - - printf "\n\n" - printf "$BLUE" - printf '======================================================\n' - printf " parse APKBUILD:\n" - printf '======================================================' - printf "$RESET\n\n" - ( . ./APKBUILD ) || has_problems=1 - - printf "\n\n" - printf "$BLUE" - printf '======================================================\n' - printf " abuild sanitycheck:\n" - printf '======================================================' - printf "$RESET\n\n" - abuild sanitycheck || has_problems=1 - - printf "\n\n" - printf "$BLUE" - printf '======================================================\n' - printf " apkbuild-shellcheck:\n" - printf '======================================================' - printf "$RESET\n" - apkbuild-shellcheck || has_problems=1 - - printf "\n\n" - printf "$BLUE" - printf '======================================================\n' - printf " apkbuild-lint:\n" - printf '======================================================' - printf "$RESET\n\n" - apkbuild-lint APKBUILD || has_problems=1 - - return $has_problems - ) || has_problems=1 - - echo -done - -exit $has_problems diff --git a/.gitlab/bin/push.sh b/.gitlab/bin/push.sh deleted file mode 100755 index c425216..0000000 --- a/.gitlab/bin/push.sh +++ /dev/null @@ -1,56 +0,0 @@ -#!/bin/sh - -# shellcheck disable=SC3043 - -. $CI_PROJECT_DIR/.gitlab/bin/functions.sh - -# shellcheck disable=SC3040 -set -eu -o pipefail - -readonly APORTSDIR=$CI_PROJECT_DIR -readonly REPOS="ilot" -readonly BASEBRANCH=$CI_MERGE_REQUEST_TARGET_BRANCH_NAME - -export GIT_SSH_COMMAND="ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no" - -gitlab_key_to_rsa $ABUILD_KEY rsa-private $HOME/.abuild/$ABUILD_KEY_NAME.rsa -gitlab_key_to_rsa $ABUILD_KEY_PUB rsa-public $HOME/.abuild/$ABUILD_KEY_NAME.rsa.pub -gitlab_key_to_rsa $SSH_KEY rsa-private $HOME/.ssh/id_rsa -chmod 700 "$HOME"/.ssh/id_rsa -chmod 700 "$HOME"/.abuild/$ABUILD_KEY_NAME.rsa - -echo "PACKAGER_PRIVKEY=$HOME/.abuild/$ABUILD_KEY_NAME.rsa" > $HOME/.abuild/abuild.conf -echo "REPODEST=$HOME/apk" >> $HOME/.abuild/abuild.conf -sudo cp $HOME/.abuild/$ABUILD_KEY_NAME.rsa.pub /etc/apk/keys/. - -if [ -d $HOME/apk ]; then - git -C $HOME/apk fetch - git -C $HOME/apk checkout $BASEBRANCH - git -C $HOME/apk pull --rebase -else - git clone git@lab.ilot.io:ilot/apk -b $BASEBRANCH $HOME/apk -fi - -for i in $(find packages -type f -name "*.apk"); do - install -vDm644 $i ${i/packages/$HOME\/apk} -done - -fetch_flags="-qn" -git fetch $fetch_flags "$CI_MERGE_REQUEST_PROJECT_URL" \ - "+refs/heads/$BASEBRANCH:refs/heads/$BASEBRANCH" - -for repo in $(changed_repos); do - rm $HOME/apk/$repo/*/APKINDEX.tar.gz | true - mkdir -p $repo/DUMMY - echo "pkgname=DUMMY" > $repo/DUMMY/APKBUILD - cd $repo/DUMMY - for i in $(find $HOME/apk/$repo -maxdepth 1 -mindepth 1 -printf '%P '); do - CHOST=$i abuild index - done - cd "$CI_PROJECT_DIR" - rm -R $repo/DUMMY -done - -git -C $HOME/apk add . -git -C $HOME/apk commit -m "Update from $CI_MERGE_REQUEST_IID - $CI_MERGE_REQUEST_TITLE" -git -C $HOME/apk push diff --git a/.gitlab/patches/abuild-cross.patch b/.gitlab/patches/abuild-cross.patch deleted file mode 100644 index 50afd23..0000000 --- a/.gitlab/patches/abuild-cross.patch +++ /dev/null @@ -1,17 +0,0 @@ -diff --git a/usr/bin/abuild.orig b/usr/bin/abuild -index 71e0681..d4ae3dd 100755 ---- a/usr/bin/abuild.orig -+++ b/usr/bin/abuild -@@ -2231,7 +2231,11 @@ calcdeps() { - list_has $i $builddeps && continue - subpackages_has ${i%%[<>=]*} || builddeps="$builddeps $i" - done -- hostdeps="$EXTRADEPENDS_TARGET" -+ for i in $EXTRADEPENDS_HOST $EXTRADEPENDS_TARGET $depends $makedepends; do -+ [ "$pkgname" = "${i%%[<>=]*}" ] && continue -+ list_has $i $hostdeps && continue -+ subpackages_has ${i%%[<>=]*} || hostdeps="$hostdeps $i" -+ done - fi - } - From e97ff811f06534014683d569eeaaa93956a71274 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Sat, 24 Aug 2024 21:10:16 -0400 Subject: [PATCH 28/38] forgejo-ci: initial --- .forgejo/bin/deploy.sh | 34 ++++++++++++++ .forgejo/patches/build.patch | 66 +++++++++++++++++++++++++++ .forgejo/workflows/build-aarch64.yaml | 57 +++++++++++++++++++++++ .forgejo/workflows/build-x86_64.yaml | 57 +++++++++++++++++++++++ .forgejo/workflows/lint.yaml | 21 +++++++++ 5 files changed, 235 insertions(+) create mode 100755 .forgejo/bin/deploy.sh create mode 100644 .forgejo/patches/build.patch create mode 100644 .forgejo/workflows/build-aarch64.yaml create mode 100644 .forgejo/workflows/build-x86_64.yaml create mode 100644 .forgejo/workflows/lint.yaml diff --git a/.forgejo/bin/deploy.sh b/.forgejo/bin/deploy.sh new file mode 100755 index 0000000..e8e811c --- /dev/null +++ b/.forgejo/bin/deploy.sh @@ -0,0 +1,34 @@ +#!/bin/sh + +# shellcheck disable=SC3040 +set -eu -o pipefail + +readonly REPOS="backports ilot" +readonly BASEBRANCH=$GITHUB_BASE_REF +readonly TARGET_REPO=$CI_ALPINE_REPO + +apkgs=$(find package -type f -name "*.apk") + +for apk in $apkgs; do + branch=$(echo $apk | awk -F '/' '{print $2}') + arch=$(echo $apk | awk -F '/' '{print $3}') + name=$(echo $apk | awk -F '/' '{print $4}') + + if [ "$(curl -s $GITHUB_SERVER_URL/api/v1/repos/$GITHUB_REPOSITORY/pulls/$GITHUB_EVENT_NUMBER | jq .draft)" == "true" ]; then + # if draft, send to -testing branch + branch="$branch-testing" + else + # if not draft, assume that this was sent to $branch-testing and nuke it + curl -s --user $FORGE_REPO_USER:$FORGE_REPO_TOKEN -X DELETE $TARGET_REPO/$BASEBRANCH/$branch-testing/$arch/$name + fi + + echo "Sending $name of arch $arch to $TARGET_REPO/$BASEBRANCH/$branch" + return=$(curl -s --user $FORGE_REPO_USER:$FORGE_REPO_TOKEN --upload-file $apk $TARGET_REPO/$BASEBRANCH/$branch 2>&1) + echo $return + if [ "$return" == "package file already exists" ]; then + echo "Package already exists, refreshing..." + curl -s --user $FORGE_REPO_USER:$FORGE_REPO_TOKEN -X DELETE $TARGET_REPO/$BASEBRANCH/$branch/$arch/$name + curl -s --user $FORGE_REPO_USER:$FORGE_REPO_TOKEN --upload-file $apk $TARGET_REPO/$BASEBRANCH/$branch + fi +done + diff --git a/.forgejo/patches/build.patch b/.forgejo/patches/build.patch new file mode 100644 index 0000000..6de7c04 --- /dev/null +++ b/.forgejo/patches/build.patch @@ -0,0 +1,66 @@ +diff --git a/usr/local/bin/build.sh.orig b/usr/local/bin/build.sh +old mode 100644 +new mode 100755 +index c3b8f7a..f609018 +--- a/usr/local/bin/build.sh.orig ++++ b/usr/local/bin/build.sh +@@ -7,13 +7,15 @@ + set -eu -o pipefail + + readonly APORTSDIR=$CI_PROJECT_DIR +-readonly REPOS="main community testing non-free" ++readonly REPOS="ilot backports" ++readonly ALPINE_REPOS="main community testing" + readonly ARCH=$(apk --print-arch) + # gitlab variables + readonly BASEBRANCH=$CI_MERGE_REQUEST_TARGET_BRANCH_NAME + + : "${REPODEST:=$HOME/packages}" +-: "${MIRROR:=https://dl-cdn.alpinelinux.org/alpine}" ++: "${MIRROR:=https://forge.ilot.io/api/packages/ilot/alpine}" ++: "${ALPINE_MIRROR:=http://dl-cdn.alpinelinux.org/alpine}" + : "${MAX_ARTIFACT_SIZE:=300000000}" #300M + : "${CI_DEBUG_BUILD:=}" + +@@ -68,8 +70,8 @@ report() { + + get_release() { + case $BASEBRANCH in +- *-stable) echo v"${BASEBRANCH%-*}";; +- master) echo edge;; ++ v*) echo "$BASEBRANCH";; ++ edge) echo edge;; + *) die "Branch \"$BASEBRANCH\" not supported!" + esac + } +@@ -101,11 +103,11 @@ set_repositories_for() { + release=$(get_release) + for repo in $REPOS; do + [ "$repo" = "non-free" ] && continue +- [ "$release" != "edge" ] && [ "$repo" == "testing" ] && continue ++ [ "$release" == "edge" ] && [ "$repo" == "backports" ] && continue + repos="$repos $MIRROR/$release/$repo $REPODEST/$repo" + [ "$repo" = "$target_repo" ] && break + done +- doas sh -c "printf '%s\n' $repos > /etc/apk/repositories" ++ doas sh -c "printf '%s\n' $repos >> /etc/apk/repositories" + doas apk update + } + +@@ -118,7 +120,15 @@ apply_offset_limit() { + } + + setup_system() { +- doas sh -c "echo $MIRROR/$(get_release)/main > /etc/apk/repositories" ++ local repos='' repo='' ++ local release ++ ++ release=$(get_release) ++ for repo in $ALPINE_REPOS; do ++ [ "$release" != "edge" ] && [ "$repo" == "testing" ] && continue ++ repos="$repos $ALPINE_MIRROR/$release/$repo" ++ done ++ doas sh -c "printf '%s\n' $repos > /etc/apk/repositories" + doas apk -U upgrade -a || apk fix || die "Failed to up/downgrade system" + abuild-keygen -ain + doas sed -i -E 's/export JOBS=[0-9]+$/export JOBS=$(nproc)/' /etc/abuild.conf diff --git a/.forgejo/workflows/build-aarch64.yaml b/.forgejo/workflows/build-aarch64.yaml new file mode 100644 index 0000000..b1fe4fe --- /dev/null +++ b/.forgejo/workflows/build-aarch64.yaml @@ -0,0 +1,57 @@ +on: + pull_request: + types: [ assigned, opened, synchronize, reopened ] + +concurrency: + group: ${{ github.head_ref || github.ref_name }} + cancel-in-progress: true + +jobs: + build-aarch64: + runs-on: aarch64 + container: + image: alpinelinux/alpine-gitlab-ci:latest + env: + CI_PROJECT_DIR: ${{ github.workspace }} + CI_DEBUG_BUILD: ${{ runner.debug }} + CI_MERGE_REQUEST_PROJECT_URL: ${{ github.server_url }}/${{ github.repository }} + CI_MERGE_REQUEST_TARGET_BRANCH_NAME: ${{ github.base_ref }} + steps: + - name: Environment setup + run: | + doas apk add nodejs git patch curl + cd /etc/apk/keys + doas curl -JO https://forge.ilot.io/api/packages/ilot/alpine/key + - name: Repo pull + uses: actions/checkout@v4 + with: + fetch-depth: 500 + - name: Package build + run: | + doas patch -d / -p1 -i ${{ github.workspace }}/.forgejo/patches/build.patch + build.sh + - name: Package upload + uses: forgejo/upload-artifact@v3 + with: + name: package + path: packages + + deploy-aarch64: + needs: [build-aarch64] + runs-on: aarch64 + container: + image: alpine:latest + env: + CI_ALPINE_REPO: 'https://forge.ilot.io/api/packages/ilot/alpine' + FORGE_REPO_TOKEN: ${{ secrets.FORGE_REPO_TOKEN }} + FORGE_REPO_USER: ${{ vars.FORGE_REPO_USER }} + GITHUB_EVENT_NUMBER: ${{ github.event.number }} + steps: + - name: Setting up environment + run: apk add nodejs curl findutils git gawk jq + - name: Repo pull + uses: actions/checkout@v4 + - name: Package download + uses: forgejo/download-artifact@v3 + - name: Package deployment + run: ${{ github.workspace }}/.forgejo/bin/deploy.sh diff --git a/.forgejo/workflows/build-x86_64.yaml b/.forgejo/workflows/build-x86_64.yaml new file mode 100644 index 0000000..2725613 --- /dev/null +++ b/.forgejo/workflows/build-x86_64.yaml @@ -0,0 +1,57 @@ +on: + pull_request: + types: [ assigned, opened, synchronize, reopened ] + +concurrency: + group: ${{ github.head_ref || github.ref_name }} + cancel-in-progress: true + +jobs: + build-x86_64: + runs-on: x86_64 + container: + image: alpinelinux/alpine-gitlab-ci:latest + env: + CI_PROJECT_DIR: ${{ github.workspace }} + CI_DEBUG_BUILD: ${{ runner.debug }} + CI_MERGE_REQUEST_PROJECT_URL: ${{ github.server_url }}/${{ github.repository }} + CI_MERGE_REQUEST_TARGET_BRANCH_NAME: ${{ github.base_ref }} + steps: + - name: Environment setup + run: | + doas apk add nodejs git patch curl + cd /etc/apk/keys + doas curl -JO https://forge.ilot.io/api/packages/ilot/alpine/key + - name: Repo pull + uses: actions/checkout@v4 + with: + fetch-depth: 500 + - name: Package build + run: | + doas patch -d / -p1 -i ${{ github.workspace }}/.forgejo/patches/build.patch + build.sh + - name: Package upload + uses: forgejo/upload-artifact@v3 + with: + name: package + path: packages + + deploy-x86_64: + needs: [build-x86_64] + runs-on: x86_64 + container: + image: alpine:latest + env: + CI_ALPINE_REPO: 'https://forge.ilot.io/api/packages/ilot/alpine' + FORGE_REPO_TOKEN: ${{ secrets.FORGE_REPO_TOKEN }} + FORGE_REPO_USER: ${{ vars.FORGE_REPO_USER }} + GITHUB_EVENT_NUMBER: ${{ github.event.number }} + steps: + - name: Setting up environment + run: apk add nodejs curl findutils git gawk jq + - name: Repo pull + uses: actions/checkout@v4 + - name: Package download + uses: forgejo/download-artifact@v3 + - name: Package deployment + run: ${{ github.workspace }}/.forgejo/bin/deploy.sh diff --git a/.forgejo/workflows/lint.yaml b/.forgejo/workflows/lint.yaml new file mode 100644 index 0000000..3614deb --- /dev/null +++ b/.forgejo/workflows/lint.yaml @@ -0,0 +1,21 @@ +on: + pull_request: + types: [ assigned, opened, synchronize, reopened ] + +jobs: + lint: + run-name: lint + runs-on: x86_64 + container: + image: alpinelinux/apkbuild-lint-tools:latest + env: + CI_PROJECT_DIR: ${{ github.workspace }} + CI_DEBUG_BUILD: ${{ runner.debug }} + CI_MERGE_REQUEST_PROJECT_URL: ${{ github.server_url }}/${{ github.repository }} + CI_MERGE_REQUEST_TARGET_BRANCH_NAME: ${{ github.base_ref }} + steps: + - run: doas apk add nodejs git + - uses: actions/checkout@v4 + with: + fetch-depth: 500 + - run: lint From 15acc988544874da70f57c85198cff2e942a945a Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Sat, 24 Aug 2024 21:12:45 -0400 Subject: [PATCH 29/38] ilot/*: bump pkgrel, disable peertube and loomio --- ilot/authentik/APKBUILD | 2 +- ilot/freescout/APKBUILD | 2 +- ilot/listmonk/APKBUILD | 2 +- ilot/loomio/APKBUILD | 5 +++-- ilot/peertube/APKBUILD | 5 +++-- ilot/php82-pecl-inotify/APKBUILD | 2 +- ilot/php83-pecl-inotify/APKBUILD | 2 +- ilot/py3-django-rest-framework/APKBUILD | 2 +- ilot/py3-django-tenants/APKBUILD | 2 +- ilot/py3-scim2-filter-parser/APKBUILD | 2 +- ilot/py3-tenant-schemas-celery/APKBUILD | 2 +- ilot/uptime-kuma/APKBUILD | 2 +- ilot/wikijs/APKBUILD | 3 +-- 13 files changed, 17 insertions(+), 16 deletions(-) diff --git a/ilot/authentik/APKBUILD b/ilot/authentik/APKBUILD index d10a575..72e65ad 100644 --- a/ilot/authentik/APKBUILD +++ b/ilot/authentik/APKBUILD @@ -2,7 +2,7 @@ # Maintainer: Antoine Martin (ayakael) pkgname=authentik pkgver=2024.4.3 -pkgrel=1 +pkgrel=2 pkgdesc="An open-source Identity Provider focused on flexibility and versatility" url="https://github.com/goauthentik/authentik" # s390x: missing py3-celery py3-flower and py3-kombu diff --git a/ilot/freescout/APKBUILD b/ilot/freescout/APKBUILD index 1fd520c..d083ae2 100644 --- a/ilot/freescout/APKBUILD +++ b/ilot/freescout/APKBUILD @@ -2,7 +2,7 @@ # Contributor: Antoine Martin (ayakael) pkgname=freescout pkgver=1.8.139 -pkgrel=0 +pkgrel=1 pkgdesc="Free self-hosted help desk & shared mailbox" arch="noarch" url="freescout.net" diff --git a/ilot/listmonk/APKBUILD b/ilot/listmonk/APKBUILD index 00951f0..0ad6acd 100644 --- a/ilot/listmonk/APKBUILD +++ b/ilot/listmonk/APKBUILD @@ -2,7 +2,7 @@ # Maintainer: Antoine Martin (ayakael) pkgname=listmonk pkgver=3.0.0 -pkgrel=0 +pkgrel=1 pkgdesc='Self-hosted newsletter and mailing list manager with a modern dashboard' arch="all" url=https://listmonk.app diff --git a/ilot/loomio/APKBUILD b/ilot/loomio/APKBUILD index 38eb631..d0f99c6 100644 --- a/ilot/loomio/APKBUILD +++ b/ilot/loomio/APKBUILD @@ -4,10 +4,11 @@ pkgname=loomio pkgver=2.21.4 _gittag=v$pkgver -pkgrel=0 +pkgrel=1 pkgdesc="A collaborative decision making tool" url="https://github.com/loomio/loomio" -arch="x86_64" +# build failure +#arch="x86_64" license="MIT" depends=" postgresql diff --git a/ilot/peertube/APKBUILD b/ilot/peertube/APKBUILD index f50d1c1..1c240ed 100644 --- a/ilot/peertube/APKBUILD +++ b/ilot/peertube/APKBUILD @@ -2,9 +2,10 @@ # Contributor: Antoine Martin (ayakael) pkgname=peertube pkgver=6.0.2 -pkgrel=0 +pkgrel=1 pkgdesc="ActivityPub-federated video streaming platform using P2P directly in your web browser" -arch="x86_64" +# build failure +#arch="x86_64" url="https://joinpeertube.org/" license="AGPL" depends=" diff --git a/ilot/php82-pecl-inotify/APKBUILD b/ilot/php82-pecl-inotify/APKBUILD index 44903a1..d2bb518 100644 --- a/ilot/php82-pecl-inotify/APKBUILD +++ b/ilot/php82-pecl-inotify/APKBUILD @@ -3,7 +3,7 @@ pkgname=php82-pecl-inotify _extname=inotify pkgver=3.0.0 -pkgrel=0 +pkgrel=1 pkgdesc="Inotify bindings for PHP 8.3" url="https://pecl.php.net/package/inotify" arch="all" diff --git a/ilot/php83-pecl-inotify/APKBUILD b/ilot/php83-pecl-inotify/APKBUILD index 771466f..48f2bbf 100644 --- a/ilot/php83-pecl-inotify/APKBUILD +++ b/ilot/php83-pecl-inotify/APKBUILD @@ -3,7 +3,7 @@ pkgname=php83-pecl-inotify _extname=inotify pkgver=3.0.0 -pkgrel=0 +pkgrel=1 pkgdesc="Inotify bindings for PHP 8.3" url="https://pecl.php.net/package/inotify" arch="all" diff --git a/ilot/py3-django-rest-framework/APKBUILD b/ilot/py3-django-rest-framework/APKBUILD index 4a82cb3..82a1497 100644 --- a/ilot/py3-django-rest-framework/APKBUILD +++ b/ilot/py3-django-rest-framework/APKBUILD @@ -4,7 +4,7 @@ pkgname=py3-django-rest-framework _pkgname=django-rest-framework pkgver=3.14.0 -pkgrel=0 +pkgrel=1 pkgdesc="Web APIs for Django" url="https://github.com/encode/django-rest-framework" arch="noarch" diff --git a/ilot/py3-django-tenants/APKBUILD b/ilot/py3-django-tenants/APKBUILD index f12eac2..0183781 100644 --- a/ilot/py3-django-tenants/APKBUILD +++ b/ilot/py3-django-tenants/APKBUILD @@ -4,7 +4,7 @@ pkgname=py3-django-tenants #_pkgreal is used by apkbuild-pypi to find modules at PyPI _pkgreal=django-tenants pkgver=3.6.1 -pkgrel=0 +pkgrel=1 pkgdesc="Tenant support for Django using PostgreSQL schemas." url="https://pypi.python.org/project/django-tenants" arch="noarch" diff --git a/ilot/py3-scim2-filter-parser/APKBUILD b/ilot/py3-scim2-filter-parser/APKBUILD index 784a660..15d12e5 100644 --- a/ilot/py3-scim2-filter-parser/APKBUILD +++ b/ilot/py3-scim2-filter-parser/APKBUILD @@ -4,7 +4,7 @@ pkgname=py3-scim2-filter-parser #_pkgreal is used by apkbuild-pypi to find modules at PyPI _pkgreal=scim2-filter-parser pkgver=0.5.0 -pkgrel=0 +pkgrel=1 pkgdesc="A customizable parser/transpiler for SCIM2.0 filters" url="https://pypi.python.org/project/scim2-filter-parser" arch="noarch" diff --git a/ilot/py3-tenant-schemas-celery/APKBUILD b/ilot/py3-tenant-schemas-celery/APKBUILD index 4398eae..c5f9029 100644 --- a/ilot/py3-tenant-schemas-celery/APKBUILD +++ b/ilot/py3-tenant-schemas-celery/APKBUILD @@ -4,7 +4,7 @@ pkgname=py3-tenant-schemas-celery #_pkgreal is used by apkbuild-pypi to find modules at PyPI _pkgreal=tenant-schemas-celery pkgver=2.2.0 -pkgrel=0 +pkgrel=1 pkgdesc="Celery integration for django-tenant-schemas and django-tenants" url="https://pypi.python.org/project/tenant-schemas-celery" arch="noarch" diff --git a/ilot/uptime-kuma/APKBUILD b/ilot/uptime-kuma/APKBUILD index cea07d0..f0acb67 100644 --- a/ilot/uptime-kuma/APKBUILD +++ b/ilot/uptime-kuma/APKBUILD @@ -2,7 +2,7 @@ # Maintainer: Antoine Martin (ayakael) pkgname=uptime-kuma pkgver=1.23.13 -pkgrel=0 +pkgrel=1 pkgdesc='A fancy self-hosted monitoring tool' arch="all" url="https://github.com/louislam/uptime-kuma" diff --git a/ilot/wikijs/APKBUILD b/ilot/wikijs/APKBUILD index aeaad93..43d8189 100644 --- a/ilot/wikijs/APKBUILD +++ b/ilot/wikijs/APKBUILD @@ -1,9 +1,8 @@ # Maintainer: Antoine Martin (ayakael) # Contributor: Antoine Martin (ayakael) - pkgname=wikijs pkgver=2.5.303 -pkgrel=0 +pkgrel=1 pkgdesc="Wiki.js | A modern, lightweight and powerful wiki app built on Node.js" license="AGPL-3.0" arch="!armv7 x86_64" From 03803dabaed5cb33812a82bd745e0b10d8641ca0 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Sat, 24 Aug 2024 21:23:33 -0400 Subject: [PATCH 30/38] forgejo-ci: build.sh is now local rather than patched --- .forgejo/bin/build.sh | 260 ++++++++++++++++++++++++++ .forgejo/patches/build.patch | 66 ------- .forgejo/workflows/build-aarch64.yaml | 4 +- .forgejo/workflows/build-x86_64.yaml | 4 +- 4 files changed, 262 insertions(+), 72 deletions(-) create mode 100755 .forgejo/bin/build.sh delete mode 100644 .forgejo/patches/build.patch diff --git a/.forgejo/bin/build.sh b/.forgejo/bin/build.sh new file mode 100755 index 0000000..b6dcbe0 --- /dev/null +++ b/.forgejo/bin/build.sh @@ -0,0 +1,260 @@ +#!/bin/sh +# shellcheck disable=SC3043 + +. /usr/local/lib/functions.sh + +# shellcheck disable=SC3040 +set -eu -o pipefail + +readonly APORTSDIR=$CI_PROJECT_DIR +readonly REPOS="ilot backports" +readonly ALPINE_REPOS="main community testing" +readonly ARCH=$(apk --print-arch) +# gitlab variables +readonly BASEBRANCH=$CI_MERGE_REQUEST_TARGET_BRANCH_NAME + +: "${REPODEST:=$HOME/packages}" +: "${MIRROR:=https://forge.ilot.io/api/packages/ilot/alpine}" +: "${ALPINE_MIRROR:=http://dl-cdn.alpinelinux.org/alpine}" +: "${MAX_ARTIFACT_SIZE:=300000000}" #300M +: "${CI_DEBUG_BUILD:=}" + +: "${CI_ALPINE_BUILD_OFFSET:=0}" +: "${CI_ALPINE_BUILD_LIMIT:=9999}" + +msg() { + local color=${2:-green} + case "$color" in + red) color="31";; + green) color="32";; + yellow) color="33";; + blue) color="34";; + *) color="32";; + esac + printf "\033[1;%sm>>>\033[1;0m %s\n" "$color" "$1" | xargs >&2 +} + +verbose() { + echo "> " "$@" + # shellcheck disable=SC2068 + $@ +} + +debugging() { + [ -n "$CI_DEBUG_BUILD" ] +} + +debug() { + if debugging; then + verbose "$@" + fi +} + +die() { + msg "$1" red + exit 1 +} + +capture_stderr() { + "$@" 2>&1 +} + +report() { + report=$1 + + reportsdir=$APORTSDIR/logs/ + mkdir -p "$reportsdir" + + tee -a "$reportsdir/$report.log" +} + +get_release() { + case $BASEBRANCH in + v*) echo "$BASEBRANCH";; + edge) echo edge;; + *) die "Branch \"$BASEBRANCH\" not supported!" + esac +} + +build_aport() { + local repo="$1" aport="$2" + cd "$APORTSDIR/$repo/$aport" + if abuild -r 2>&1 | report "build-$aport"; then + checkapk 2>&1 | report "checkapk-$aport" || true + aport_ok="$aport_ok $repo/$aport" + else + aport_ng="$aport_ng $repo/$aport" + fi +} + +check_aport() { + local repo="$1" aport="$2" + cd "$APORTSDIR/$repo/$aport" + if ! abuild check_arch 2>/dev/null; then + aport_na="$aport_na $repo/$aport" + return 1 + fi +} + +set_repositories_for() { + local target_repo="$1" repos='' repo='' + local release + + release=$(get_release) + for repo in $REPOS; do + [ "$repo" = "non-free" ] && continue + [ "$release" == "edge" ] && [ "$repo" == "backports" ] && continue + repos="$repos $MIRROR/$release/$repo $REPODEST/$repo" + [ "$repo" = "$target_repo" ] && break + done + doas sh -c "printf '%s\n' $repos >> /etc/apk/repositories" + doas apk update || true +} + +apply_offset_limit() { + start=$1 + limit=$2 + end=$((start+limit)) + + sed -n "$((start+1)),${end}p" +} + +setup_system() { + local repos='' repo='' + local release + + release=$(get_release) + for repo in $ALPINE_REPOS; do + [ "$release" != "edge" ] && [ "$repo" == "testing" ] && continue + repos="$repos $ALPINE_MIRROR/$release/$repo" + done + doas sh -c "printf '%s\n' $repos > /etc/apk/repositories" + doas apk -U upgrade -a || apk fix || die "Failed to up/downgrade system" + abuild-keygen -ain + doas sed -i -E 's/export JOBS=[0-9]+$/export JOBS=$(nproc)/' /etc/abuild.conf + ( . /etc/abuild.conf && echo "Building with $JOBS jobs" ) + mkdir -p "$REPODEST" + git config --global init.defaultBranch master +} + +sysinfo() { + printf ">>> Host system information (arch: %s, release: %s) <<<\n" "$ARCH" "$(get_release)" + printf "- Number of Cores: %s\n" "$(nproc)" + printf "- Memory: %s Gb\n" "$(awk '/^MemTotal/ {print ($2/1024/1024)}' /proc/meminfo)" + printf "- Free space: %s\n" "$(df -hP / | awk '/\/$/ {print $4}')" +} + +copy_artifacts() { + cd "$APORTSDIR" + + packages_size="$(du -sk "$REPODEST" | awk '{print $1 * 1024}')" + if [ -z "$packages_size" ]; then + return + fi + + echo "Artifact size: $packages_size bytes" + + mkdir -p keys/ packages/ + + if [ "$packages_size" -lt $MAX_ARTIFACT_SIZE ]; then + msg "Copying packages for artifact upload" + cp -ar "$REPODEST"/* packages/ 2>/dev/null + cp ~/.abuild/*.rsa.pub keys/ + else + msg "Artifact size $packages_size larger than max ($MAX_ARTIFACT_SIZE), skipping uploading them" yellow + fi +} + +section_start setup "Setting up the system" collapse + +if debugging; then + set -x +fi + +aport_ok= +aport_na= +aport_ng= +failed= + +sysinfo || true +setup_system || die "Failed to setup system" + +# git no longer allows to execute in repositories owned by different users +doas chown -R buildozer: . + +fetch_flags="-qn" +debugging && fetch_flags="-v" + +git fetch $fetch_flags "$CI_MERGE_REQUEST_PROJECT_URL" \ + "+refs/heads/$BASEBRANCH:refs/heads/$BASEBRANCH" + +if debugging; then + merge_base=$(git merge-base "$BASEBRANCH" HEAD) || echo "Could not determine merge-base" + echo "Merge base: $merge_base" + git --version + git config -l + [ -n "$merge_base" ] && git tag -f merge-base "$merge_base" + git --no-pager log -200 --oneline --graph --decorate --all +fi + +section_end setup + +build_start=$CI_ALPINE_BUILD_OFFSET +build_limit=$CI_ALPINE_BUILD_LIMIT + +for repo in $(changed_repos); do + set_repositories_for "$repo" + built_aports=0 + changed_aports_in_repo=$(changed_aports "$repo") + changed_aports_in_repo_count=$(echo "$changed_aports_in_repo" | wc -l) + changed_aports_to_build=$(echo "$changed_aports_in_repo" | apply_offset_limit "$build_start" "$build_limit") + + msg "Changed aports in $repo:" + # shellcheck disable=SC2086 # Splitting is expected here + printf " - %s\n" $changed_aports_to_build + for pkgname in $changed_aports_to_build; do + section_start "build_$pkgname" "Building package $pkgname" + built_aports=$((built_aports+1)) + if check_aport "$repo" "$pkgname"; then + build_aport "$repo" "$pkgname" + fi + section_end "build_$pkgname" + done + + build_start=$((build_start-(changed_aports_in_repo_count-built_aports))) + build_limit=$((build_limit-built_aports)) + + if [ $build_limit -le 0 ]; then + msg "Limit reached, breaking" + break + fi +done + +section_start artifacts "Handeling artifacts" collapse +copy_artifacts || true +section_end artifacts + +section_start summary "Build summary" + +echo "### Build summary ###" + +for ok in $aport_ok; do + msg "$ok: build succesfully" +done + +for na in $aport_na; do + msg "$na: disabled for $ARCH" yellow +done + +for ng in $aport_ng; do + msg "$ng: build failed" red + failed=true +done +section_end summary + +if [ "$failed" = true ]; then + exit 1 +elif [ -z "$aport_ok" ]; then + msg "No packages found to be built." yellow +fi + diff --git a/.forgejo/patches/build.patch b/.forgejo/patches/build.patch deleted file mode 100644 index 6de7c04..0000000 --- a/.forgejo/patches/build.patch +++ /dev/null @@ -1,66 +0,0 @@ -diff --git a/usr/local/bin/build.sh.orig b/usr/local/bin/build.sh -old mode 100644 -new mode 100755 -index c3b8f7a..f609018 ---- a/usr/local/bin/build.sh.orig -+++ b/usr/local/bin/build.sh -@@ -7,13 +7,15 @@ - set -eu -o pipefail - - readonly APORTSDIR=$CI_PROJECT_DIR --readonly REPOS="main community testing non-free" -+readonly REPOS="ilot backports" -+readonly ALPINE_REPOS="main community testing" - readonly ARCH=$(apk --print-arch) - # gitlab variables - readonly BASEBRANCH=$CI_MERGE_REQUEST_TARGET_BRANCH_NAME - - : "${REPODEST:=$HOME/packages}" --: "${MIRROR:=https://dl-cdn.alpinelinux.org/alpine}" -+: "${MIRROR:=https://forge.ilot.io/api/packages/ilot/alpine}" -+: "${ALPINE_MIRROR:=http://dl-cdn.alpinelinux.org/alpine}" - : "${MAX_ARTIFACT_SIZE:=300000000}" #300M - : "${CI_DEBUG_BUILD:=}" - -@@ -68,8 +70,8 @@ report() { - - get_release() { - case $BASEBRANCH in -- *-stable) echo v"${BASEBRANCH%-*}";; -- master) echo edge;; -+ v*) echo "$BASEBRANCH";; -+ edge) echo edge;; - *) die "Branch \"$BASEBRANCH\" not supported!" - esac - } -@@ -101,11 +103,11 @@ set_repositories_for() { - release=$(get_release) - for repo in $REPOS; do - [ "$repo" = "non-free" ] && continue -- [ "$release" != "edge" ] && [ "$repo" == "testing" ] && continue -+ [ "$release" == "edge" ] && [ "$repo" == "backports" ] && continue - repos="$repos $MIRROR/$release/$repo $REPODEST/$repo" - [ "$repo" = "$target_repo" ] && break - done -- doas sh -c "printf '%s\n' $repos > /etc/apk/repositories" -+ doas sh -c "printf '%s\n' $repos >> /etc/apk/repositories" - doas apk update - } - -@@ -118,7 +120,15 @@ apply_offset_limit() { - } - - setup_system() { -- doas sh -c "echo $MIRROR/$(get_release)/main > /etc/apk/repositories" -+ local repos='' repo='' -+ local release -+ -+ release=$(get_release) -+ for repo in $ALPINE_REPOS; do -+ [ "$release" != "edge" ] && [ "$repo" == "testing" ] && continue -+ repos="$repos $ALPINE_MIRROR/$release/$repo" -+ done -+ doas sh -c "printf '%s\n' $repos > /etc/apk/repositories" - doas apk -U upgrade -a || apk fix || die "Failed to up/downgrade system" - abuild-keygen -ain - doas sed -i -E 's/export JOBS=[0-9]+$/export JOBS=$(nproc)/' /etc/abuild.conf diff --git a/.forgejo/workflows/build-aarch64.yaml b/.forgejo/workflows/build-aarch64.yaml index b1fe4fe..8e19c74 100644 --- a/.forgejo/workflows/build-aarch64.yaml +++ b/.forgejo/workflows/build-aarch64.yaml @@ -27,9 +27,7 @@ jobs: with: fetch-depth: 500 - name: Package build - run: | - doas patch -d / -p1 -i ${{ github.workspace }}/.forgejo/patches/build.patch - build.sh + run: ${{ github.workspace }}/.forgejo/bin/build.sh - name: Package upload uses: forgejo/upload-artifact@v3 with: diff --git a/.forgejo/workflows/build-x86_64.yaml b/.forgejo/workflows/build-x86_64.yaml index 2725613..9a7dac2 100644 --- a/.forgejo/workflows/build-x86_64.yaml +++ b/.forgejo/workflows/build-x86_64.yaml @@ -27,9 +27,7 @@ jobs: with: fetch-depth: 500 - name: Package build - run: | - doas patch -d / -p1 -i ${{ github.workspace }}/.forgejo/patches/build.patch - build.sh + run: ${{ github.workspace }}/.forgejo/bin/build.sh - name: Package upload uses: forgejo/upload-artifact@v3 with: From d985367f7b1b8dbc00a1105770a5294ecb2063f7 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Sun, 25 Aug 2024 09:38:13 -0400 Subject: [PATCH 31/38] ilot/authentik: upgrade to 2024.4.4 --- ilot/authentik/APKBUILD | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ilot/authentik/APKBUILD b/ilot/authentik/APKBUILD index 72e65ad..615f078 100644 --- a/ilot/authentik/APKBUILD +++ b/ilot/authentik/APKBUILD @@ -1,8 +1,8 @@ # Contributor: Antoine Martin (ayakael) # Maintainer: Antoine Martin (ayakael) pkgname=authentik -pkgver=2024.4.3 -pkgrel=2 +pkgver=2024.4.4 +pkgrel=0 pkgdesc="An open-source Identity Provider focused on flexibility and versatility" url="https://github.com/goauthentik/authentik" # s390x: missing py3-celery py3-flower and py3-kombu @@ -247,7 +247,7 @@ package() { } sha512sums=" -121ed925d81a5cb2a14fed8ec8b324352e40b1fcbba83573bfdc1d1f66a91d9670cd64d7ef752c8a2df6c34fc3e19e8aec5c6752d33e87b487a462a590212ab0 authentik-2024.4.3.tar.gz +22c8ff16b93b9fcb84478b6476dd4f6413719037affc7756f20ba1dc3afff1fbaae2f1fc89d7b3a9c4372fcc856009d8a4ef5eb7854855e4528523fb456a2491 authentik-2024.4.4.tar.gz 4defb4fe3a4230f4aa517fbecd5e5b8bcef2a64e1b40615660ae9eec33597310a09df5e126f4d39ce7764bd1716c0a7040637699135c103cbc1879593c6c06f1 authentik.openrc 6cb03b9b69df39bb4539fe05c966536314d766b2e9307a92d87070ba5f5b7e7ab70f1b5ee1ab3c0c50c23454f9c5a4caec29e63fdf411bbb7a124ad687569b89 authentik-worker.openrc 351e6920d987861f8bf0d7ab2f942db716a8dbdad1f690ac662a6ef29ac0fd46cf817cf557de08f1c024703503d36bc8b46f0d9eb1ecaeb399dce4c3bb527d17 authentik-ldap.openrc From 61ef8d893c037eed10d90d5673c532dcec48632c Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Sun, 25 Aug 2024 09:42:24 -0400 Subject: [PATCH 32/38] ilot/py3-django-rest-framework: bump --- ilot/py3-django-rest-framework/APKBUILD | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ilot/py3-django-rest-framework/APKBUILD b/ilot/py3-django-rest-framework/APKBUILD index 82a1497..69f9f63 100644 --- a/ilot/py3-django-rest-framework/APKBUILD +++ b/ilot/py3-django-rest-framework/APKBUILD @@ -4,7 +4,7 @@ pkgname=py3-django-rest-framework _pkgname=django-rest-framework pkgver=3.14.0 -pkgrel=1 +pkgrel=2 pkgdesc="Web APIs for Django" url="https://github.com/encode/django-rest-framework" arch="noarch" From c9dc783fcb66c849355021bab69eb5fc5c907086 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Sun, 25 Aug 2024 09:11:54 -0400 Subject: [PATCH 33/38] backports/forgejo-runner: new aport --- backports/forgejo-runner/APKBUILD | 47 +++++++++++++++++++ backports/forgejo-runner/forgejo-runner.confd | 17 +++++++ backports/forgejo-runner/forgejo-runner.initd | 38 +++++++++++++++ .../forgejo-runner/forgejo-runner.logrotate | 5 ++ .../forgejo-runner/forgejo-runner.pre-install | 14 ++++++ .../forgejo-runner/forgejo-runner.pre-upgrade | 1 + 6 files changed, 122 insertions(+) create mode 100644 backports/forgejo-runner/APKBUILD create mode 100644 backports/forgejo-runner/forgejo-runner.confd create mode 100644 backports/forgejo-runner/forgejo-runner.initd create mode 100644 backports/forgejo-runner/forgejo-runner.logrotate create mode 100644 backports/forgejo-runner/forgejo-runner.pre-install create mode 120000 backports/forgejo-runner/forgejo-runner.pre-upgrade diff --git a/backports/forgejo-runner/APKBUILD b/backports/forgejo-runner/APKBUILD new file mode 100644 index 0000000..1005964 --- /dev/null +++ b/backports/forgejo-runner/APKBUILD @@ -0,0 +1,47 @@ +# Contributor: Patrycja Rosa +# Maintainer: Patrycja Rosa +pkgname=forgejo-runner +pkgver=3.5.0 +pkgrel=2 +pkgdesc="CI/CD job runner for Forgejo" +url="https://code.forgejo.org/forgejo/runner" +arch="all" +license="MIT" +makedepends="go" +install="$pkgname.pre-install $pkgname.pre-upgrade" +subpackages="$pkgname-openrc" +source="$pkgname-$pkgver.tar.gz::https://code.forgejo.org/forgejo/runner/archive/v$pkgver.tar.gz + + forgejo-runner.logrotate + forgejo-runner.initd + forgejo-runner.confd + " +builddir="$srcdir/runner" +options="!check" # tests require running forgejo + +build() { + go build \ + -o forgejo-runner \ + -ldflags "-X gitea.com/gitea/act_runner/internal/pkg/ver.version=$pkgver" + ./forgejo-runner generate-config > config.example.yaml +} + +check() { + go test ./... +} + +package() { + install -Dm755 forgejo-runner -t "$pkgdir"/usr/bin/ + install -Dm644 config.example.yaml -t "$pkgdir"/etc/forgejo-runner/ + + install -Dm755 "$srcdir"/forgejo-runner.initd "$pkgdir"/etc/init.d/forgejo-runner + install -Dm644 "$srcdir"/forgejo-runner.confd "$pkgdir"/etc/conf.d/forgejo-runner + install -Dm644 "$srcdir"/forgejo-runner.logrotate "$pkgdir"/etc/logrotate.d/forgejo-runner +} + +sha512sums=" +e78968a5f9b6e797fb759a5c8cbf46a5c2fef2083dabc88599c9017729faface963576c63a948b0add424cb267902e864fb1a1b619202660296976d93e670713 forgejo-runner-3.5.0.tar.gz +a3c7238b0c63053325d31e09277edd88690ef5260854517f82d9042d6173fb5d24ebfe36e1d7363673dd8801972638a6e69b6af8ad43debb6057515c73655236 forgejo-runner.logrotate +bb0c6fbe90109c77f9ef9cb0d35d20b8033be0e4b7a60839b596aa5528dfa24309ec894d8c04066bf8fb30143e63a5fd8cc6fc89aac364422b583e0f840e2da6 forgejo-runner.initd +e11eab27f88f1181112389befa7de3aa0bac7c26841861918707ede53335535425c805e6682e25704e9c8a6aecba3dc13e20900a99df1183762b012b62f26d5f forgejo-runner.confd +" diff --git a/backports/forgejo-runner/forgejo-runner.confd b/backports/forgejo-runner/forgejo-runner.confd new file mode 100644 index 0000000..874e695 --- /dev/null +++ b/backports/forgejo-runner/forgejo-runner.confd @@ -0,0 +1,17 @@ +# Configuration for /etc/init.d/forgejo-runner + +# Path to the config file (--config). +#cfgfile="/etc/forgejo-runner/config.yaml" + +# Path to the working directory (--working-directory). +#datadir="/var/lib/forgejo-runner" + +# Path to the log file where stdout/stderr will be redirected. +# Leave empty/commented out to use syslog instead. +#output_log="/var/log/forgejo-runner.log" + +# You may change this to root, e.g. to run jobs in LXC +#command_user="forgejo-runner" + +# Comment out to run without process supervisor. +supervisor=supervise-daemon diff --git a/backports/forgejo-runner/forgejo-runner.initd b/backports/forgejo-runner/forgejo-runner.initd new file mode 100644 index 0000000..c54acdd --- /dev/null +++ b/backports/forgejo-runner/forgejo-runner.initd @@ -0,0 +1,38 @@ +#!/sbin/openrc-run + +description="Forgejo CI Runner" +name="Forgejo Runner" + +: ${cfgfile:="/etc/forgejo-runner/config.yaml"} +: ${datadir:="/var/lib/forgejo-runner"} +: ${command_user:="forgejo-runner"} + +command="/usr/bin/forgejo-runner" +command_args="daemon --config $cfgfile" +command_background="yes" +directory="$datadir" +pidfile="/run/$RC_SVCNAME.pid" + +depend() { + need net + use dns logger +} + +start_pre() { + checkpath -d -o "$command_user" /etc/forgejo-runner + checkpath -d -o "$command_user" "$datadir" + + if ! [ -e "$cfgfile" ]; then + eerror "Config file $cfgfile doesn't exist." + eerror "You can generate it with: forgejo-runner generate-config," + eerror "or use the auto-generated one in /etc/forgejo-runner/config.example.yaml" + return 1 + fi + + if [ "$error_log" ]; then + output_log="$error_log" + else + output_logger="logger -t '${RC_SVCNAME}' -p daemon.info" + error_logger="logger -t '${RC_SVCNAME}' -p daemon.error" + fi +} diff --git a/backports/forgejo-runner/forgejo-runner.logrotate b/backports/forgejo-runner/forgejo-runner.logrotate new file mode 100644 index 0000000..1a0539e --- /dev/null +++ b/backports/forgejo-runner/forgejo-runner.logrotate @@ -0,0 +1,5 @@ +/var/log/forgejo-runner.log { + copytruncate + missingok + notifempty +} diff --git a/backports/forgejo-runner/forgejo-runner.pre-install b/backports/forgejo-runner/forgejo-runner.pre-install new file mode 100644 index 0000000..5ce27be --- /dev/null +++ b/backports/forgejo-runner/forgejo-runner.pre-install @@ -0,0 +1,14 @@ +#!/bin/sh + +addgroup -S forgejo-runner 2>/dev/null +adduser -S -D -H -h /var/lib/forgejo-runner -s /sbin/nologin -G forgejo-runner -g forgejo-runner forgejo-runner 2>/dev/null + +cat >&2 < Date: Sun, 25 Aug 2024 09:12:12 -0400 Subject: [PATCH 34/38] ilot/forgejo-aneksajo: new aport --- ilot/forgejo-aneksajo/APKBUILD | 112 ++++++++++++++++++ ilot/forgejo-aneksajo/forgejo-aneksajo.ini | 26 ++++ ilot/forgejo-aneksajo/forgejo-aneksajo.initd | 15 +++ .../forgejo-aneksajo.pre-install | 7 ++ 4 files changed, 160 insertions(+) create mode 100644 ilot/forgejo-aneksajo/APKBUILD create mode 100644 ilot/forgejo-aneksajo/forgejo-aneksajo.ini create mode 100644 ilot/forgejo-aneksajo/forgejo-aneksajo.initd create mode 100644 ilot/forgejo-aneksajo/forgejo-aneksajo.pre-install diff --git a/ilot/forgejo-aneksajo/APKBUILD b/ilot/forgejo-aneksajo/APKBUILD new file mode 100644 index 0000000..ca50a59 --- /dev/null +++ b/ilot/forgejo-aneksajo/APKBUILD @@ -0,0 +1,112 @@ +# Contributor: Carlo Landmeter +# Contributor: 6543 <6543@obermui.de> +# Contributor: techknowlogick +# Contributor: Patrycja Rosa +# Maintainer: Antoine Martin (ayakael) +pkgname=forgejo-aneksajo +pkgver=8.0.1 +_gittag=v$pkgver-git-annex0 +pkgrel=0 +pkgdesc="Self-hosted Git service written in Go with git-annex support" +url="https://forgejo.org" +# riscv64: builds fail https://codeberg.org/forgejo/forgejo/issues/3025 +arch="all !riscv64" +license="MIT" +depends="git git-lfs gnupg" +makedepends="go nodejs npm" +checkdepends="bash openssh openssh-keygen sqlite tzdata" +install="$pkgname.pre-install" +pkgusers="forgejo" +pkggroups="www-data" +subpackages="$pkgname-openrc" +source="$pkgname-$_gittag.tar.gz::https://codeberg.org/matrss/forgejo-aneksajo/archive/$_gittag.tar.gz + $pkgname.initd + $pkgname.ini + " +builddir="$srcdir/forgejo-aneksajo" +options="!check net chmod-clean" # broken with GIT_CEILING + +# secfixes: +# 7.0.4-r0: +# - CVE-2024-24789 +# 7.0.3-r0: +# - CVE-2024-24788 +# 1.21.10.0-r0: +# - CVE-2023-45288 +# 1.21.3.0-r0: +# - CVE-2023-48795 + +export GOCACHE="${GOCACHE:-"$srcdir/go-cache"}" +export GOTMPDIR="${GOTMPDIR:-"$srcdir"}" +export GOMODCACHE="${GOMODCACHE:-"$srcdir/go"}" + +# Skip tests for archs that fail unrelated in CI +case "$CARCH" in +s390x|x86|armhf|armv7) options="$options !check" ;; +esac + +prepare() { + default_prepare + + npm ci +} + +build() { + # XXX: LARGEFILE64 + export CGO_CFLAGS="$CFLAGS -O2 -D_LARGEFILE64_SOURCE" + export TAGS="bindata sqlite sqlite_unlock_notify" + export GITEA_VERSION="$pkgver" + export EXTRA_GOFLAGS="$GOFLAGS" + export CGO_LDFLAGS="$LDFLAGS" + unset LDFLAGS + ## make FHS compliant + local setting="code.gitea.io/gitea/modules/setting" + export LDFLAGS="$LDFLAGS -X $setting.CustomConf=/etc/forgejo/app.ini" + export LDFLAGS="$LDFLAGS -X $setting.AppWorkPath=/var/lib/forgejo/" + + make -j1 build +} + +check() { + local home="$srcdir"/home + mkdir -p "$home" + install -d -m700 "$home"/.ssh + touch "$home"/.gitconfig + + env GITEA_ROOT="$home" HOME="$home" GITEA_WORK_DIR="$(pwd)" timeout -s ABRT 20m make -j1 test-sqlite + ## "make test" - modified (exclude broken tests) + ## 'code.gitea.io/gitea/modules/migrations': github hase rate limits! 403 API + local tests=$(go list ./... | grep -v /vendor/ | + grep -v 'code.gitea.io/gitea/modules/migrations' | + grep -v 'code.gitea.io/gitea/modules/charset' | + grep -v 'code.gitea.io/gitea/models/migrations' | + grep -v 'code.gitea.io/gitea/services/migrations' | + grep -v 'code.gitea.io/gitea/integrations') + env GITEA_CONF="$PWD/tests/sqlite.ini" GITEA_ROOT="$home" HOME="$home" GO111MODULE=on go test -mod=vendor -tags='sqlite sqlite_unlock_notify' $tests + +} + +package() { + for dir in $pkgname $pkgname/git $pkgname/data $pkgname/db $pkgname/custom; do + install -dm750 -o forgejo -g www-data \ + "$pkgdir"/var/lib/$dir + done + + install -dm755 -o forgejo -g www-data "$pkgdir"/var/log/forgejo + + # TODO: rename when upstream does + install -Dm755 -g www-data gitea "$pkgdir"/usr/bin/forgejo + + install -Dm644 -o forgejo -g www-data "$srcdir"/forgejo-aneksajo.ini \ + "$pkgdir"/etc/forgejo/app.ini + chown forgejo:www-data "$pkgdir"/etc/forgejo + + install -Dm755 "$srcdir"/forgejo-aneksajo.initd \ + "$pkgdir"/etc/init.d/forgejo +} + +sha512sums=" +d8e273d369c934eec7ff84795cd0d896cda53bc1a2d17f610dd8476ff92dc50c4a24c4598366ef8aac3be52ddef6630489043183085334376c30bc5d4d5f15c2 forgejo-aneksajo-v8.0.1-git-annex0.tar.gz +eb93a9f6c8f204de5c813f58727015f53f9feaab546589e016c60743131559f04fc1518f487b6d2a0e7fa8fab6d4a67cd0cd9713a7ccd9dec767a8c1ddebe129 forgejo-aneksajo.initd +b537b41b6b3a945274a6028800f39787b48c318425a37cf5d40ace0d1b305444fd07f17b4acafcd31a629bedd7d008b0bb3e30f82ffeb3d7e7e947bdbe0ff4f3 forgejo-aneksajo.ini +" diff --git a/ilot/forgejo-aneksajo/forgejo-aneksajo.ini b/ilot/forgejo-aneksajo/forgejo-aneksajo.ini new file mode 100644 index 0000000..3b46259 --- /dev/null +++ b/ilot/forgejo-aneksajo/forgejo-aneksajo.ini @@ -0,0 +1,26 @@ +# Configuration cheat sheet: https://forgejo.org/docs/latest/admin/config-cheat-sheet/ + +RUN_USER = forgejo +RUN_MODE = prod + +[repository] +ROOT = /var/lib/forgejo/git +SCRIPT_TYPE = sh + +[server] +STATIC_ROOT_PATH = /usr/share/webapps/forgejo +APP_DATA_PATH = /var/lib/forgejo/data +LFS_START_SERVER = true + +[database] +DB_TYPE = sqlite3 +PATH = /var/lib/forgejo/db/forgejo.db +SSL_MODE = disable + +[session] +PROVIDER = file + +[log] +ROOT_PATH = /var/log/forgejo +MODE = file +LEVEL = Info diff --git a/ilot/forgejo-aneksajo/forgejo-aneksajo.initd b/ilot/forgejo-aneksajo/forgejo-aneksajo.initd new file mode 100644 index 0000000..24dd085 --- /dev/null +++ b/ilot/forgejo-aneksajo/forgejo-aneksajo.initd @@ -0,0 +1,15 @@ +#!/sbin/openrc-run + +supervisor=supervise-daemon +name=forgejo +command="/usr/bin/forgejo" +command_user="${FORGEJO_USER:-forgejo}:www-data" +command_args="web --config '${FORGEJO_CONF:-/etc/forgejo/app.ini}'" +supervise_daemon_args="--env FORGEJO_WORK_DIR='${FORGEJO_WORK_DIR:-/var/lib/forgejo}' --chdir '${FORGEJO_WORK_DIR:-/var/lib/forgejo}' --stdout '${FORGEJO_LOG_FILE:-/var/log/forgejo/http.log}' --stderr '${FORGEJO_LOG_FILE:-/var/log/forgejo/http.log}'" +pidfile="/run/forgejo.pid" + +depend() { + use logger dns + need net + after firewall mysql postgresql +} diff --git a/ilot/forgejo-aneksajo/forgejo-aneksajo.pre-install b/ilot/forgejo-aneksajo/forgejo-aneksajo.pre-install new file mode 100644 index 0000000..c7e8b7b --- /dev/null +++ b/ilot/forgejo-aneksajo/forgejo-aneksajo.pre-install @@ -0,0 +1,7 @@ +#!/bin/sh + +addgroup -S -g 82 www-data 2>/dev/null +adduser -S -D -h /var/lib/forgejo -s /bin/sh -G www-data -g forgejo forgejo 2>/dev/null \ + && passwd -u forgejo 2>/dev/null + +exit 0 From a882011e973e7783064c049d5dc0ffaa4b2294cf Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Sun, 25 Aug 2024 09:15:05 -0400 Subject: [PATCH 35/38] ilot/codeberg-pages-server: new aport --- ilot/codeberg-pages-server/APKBUILD | 45 +++++++++++++++++++ .../codeberg-pages-server.openrc | 30 +++++++++++++ .../codeberg-pages-server.post-install | 39 ++++++++++++++++ .../codeberg-pages-server.post-upgrade | 1 + .../codeberg-pages-server.pre-install | 26 +++++++++++ .../upgrade-go-sqlite3-to-1.14.19.patch | 26 +++++++++++ 6 files changed, 167 insertions(+) create mode 100644 ilot/codeberg-pages-server/APKBUILD create mode 100644 ilot/codeberg-pages-server/codeberg-pages-server.openrc create mode 100755 ilot/codeberg-pages-server/codeberg-pages-server.post-install create mode 120000 ilot/codeberg-pages-server/codeberg-pages-server.post-upgrade create mode 100644 ilot/codeberg-pages-server/codeberg-pages-server.pre-install create mode 100644 ilot/codeberg-pages-server/upgrade-go-sqlite3-to-1.14.19.patch diff --git a/ilot/codeberg-pages-server/APKBUILD b/ilot/codeberg-pages-server/APKBUILD new file mode 100644 index 0000000..5eab680 --- /dev/null +++ b/ilot/codeberg-pages-server/APKBUILD @@ -0,0 +1,45 @@ +# Contributor: Antoine Martin (ayakael) +# Maintainer: Antoine Martin (ayakael) +pkgname=codeberg-pages-server +pkgver=5.1 +pkgrel=0 +pkgdesc="The Codeberg Pages Server – with custom domain support, per-repo pages using the "pages" branch, caching and more." +url="https://codeberg.org/Codeberg/pages-server" +arch="all" +license="EUPL-1.2" +depends="nginx" +makedepends="go just" +# tests disabled for now +options="!check" +install="$pkgname.post-install $pkgname.post-upgrade $pkgname.pre-install" +source=" + $pkgname-$pkgver.tar.gz::https://codeberg.org/Codeberg/pages-server/archive/v$pkgver.tar.gz + codeberg-pages-server.openrc + upgrade-go-sqlite3-to-1.14.19.patch + " +builddir="$srcdir/"pages-server +subpackages="$pkgname-openrc" +pkgusers="git" +pkggroups="www-data" + +export GOPATH=$srcdir/go +export GOCACHE=$srcdir/go-build +export GOTMPDIR=$srcdir + +build() { + just build +} + +package() { + msg "Packaging $pkgname" + install -Dm755 "$builddir"/build/codeberg-pages-server "$pkgdir"/usr/bin/codeberg-pages-server + + install -Dm755 "$srcdir"/$pkgname.openrc \ + "$pkgdir"/etc/init.d/$pkgname +} + +sha512sums=" +55a1dd5ed0f1cb2aaad1066eca8bfbd1d537169ed3712c748163ebff64edc45d05ac1f6f062433e232e2638a790232438282f96dd7410eb4cbaff7208f5f2427 codeberg-pages-server-5.1.tar.gz +4defb4fe3a4230f4aa517fbecd5e5b8bcef2a64e1b40615660ae9eec33597310a09df5e126f4d39ce7764bd1716c0a7040637699135c103cbc1879593c6c06f1 codeberg-pages-server.openrc +895f1c8d22fcf1d5491a6fe0ce5d93201f83b6dd5fc81b24016b609988fb6c66fdde75bb3830f385a5c83d96366ca3a5f4f9524f52058b6c5dfd8b80d14bac5b upgrade-go-sqlite3-to-1.14.19.patch +" diff --git a/ilot/codeberg-pages-server/codeberg-pages-server.openrc b/ilot/codeberg-pages-server/codeberg-pages-server.openrc new file mode 100644 index 0000000..a036393 --- /dev/null +++ b/ilot/codeberg-pages-server/codeberg-pages-server.openrc @@ -0,0 +1,30 @@ +#!/sbin/openrc-run + +name="$RC_SVCNAME" +cfgfile="/etc/conf.d/$RC_SVCNAME.conf" +pidfile="/run/$RC_SVCNAME.pid" +working_directory="/usr/share/webapps/authentik" +command="/usr/share/webapps/authentik/server" +command_user="authentik" +command_group="authentik" +start_stop_daemon_args="" +command_background="yes" +output_log="/var/log/authentik/$RC_SVCNAME.log" +error_log="/var/log/authentik/$RC_SVCNAME.err" + +depend() { + need redis + need postgresql +} + +start_pre() { + cd "$working_directory" + checkpath --directory --owner $command_user:$command_group --mode 0775 \ + /var/log/authentik \ + /var/lib/authentik/certs +} + +stop_pre() { + ebegin "Killing child processes" + kill $(ps -o pid= --ppid $(cat $pidfile)) || true +} diff --git a/ilot/codeberg-pages-server/codeberg-pages-server.post-install b/ilot/codeberg-pages-server/codeberg-pages-server.post-install new file mode 100755 index 0000000..a715d20 --- /dev/null +++ b/ilot/codeberg-pages-server/codeberg-pages-server.post-install @@ -0,0 +1,39 @@ +#!/bin/sh +set -eu + +group=authentik +config_file='/etc/authentik/config.yml' + +setcap 'cap_net_bind_service=+ep' /usr/share/webapps/authentik/server + +if [ $(grep '@@SECRET_KEY@@' "$config_file") ]; then + echo "* Generating random secret in $config_file" >&2 + + secret_key="$(pwgen -s 50 1)" + sed -i "s|@@SECRET_KEY@@|$secret_key|" "$config_file" + chown root:$group "$config_file" +fi + +if [ "${0##*.}" = 'post-upgrade' ]; then + cat >&2 <<-EOF + * + * To finish Authentik upgrade run: + * + * authentik-manage migrate + * + EOF +else + cat >&2 <<-EOF + * + * 1. Adjust settings in /etc/authentik/config.yml. + * + * 2. Create database for Authentik: + * + * psql -c "CREATE ROLE authentik PASSWORD 'top-secret' INHERIT LOGIN;" + * psql -c "CREATE DATABASE authentik OWNER authentik ENCODING 'UTF-8';" + * + * 3. Run "authentik-manage migrate" + * 4. Setup admin user at https:///if/flow/initial-setup/ + * + EOF +fi diff --git a/ilot/codeberg-pages-server/codeberg-pages-server.post-upgrade b/ilot/codeberg-pages-server/codeberg-pages-server.post-upgrade new file mode 120000 index 0000000..d7ffea2 --- /dev/null +++ b/ilot/codeberg-pages-server/codeberg-pages-server.post-upgrade @@ -0,0 +1 @@ +codeberg-pages-server.post-install \ No newline at end of file diff --git a/ilot/codeberg-pages-server/codeberg-pages-server.pre-install b/ilot/codeberg-pages-server/codeberg-pages-server.pre-install new file mode 100644 index 0000000..792f304 --- /dev/null +++ b/ilot/codeberg-pages-server/codeberg-pages-server.pre-install @@ -0,0 +1,26 @@ +#!/bin/sh +# It's very important to set user/group correctly. + +authentik_dir='/var/lib/authentik' + +if ! getent group authentik 1>/dev/null; then + echo '* Creating group authentik' 1>&2 + + addgroup -S authentik +fi + +if ! id authentik 2>/dev/null 1>&2; then + echo '* Creating user authentik' 1>&2 + + adduser -DHS -G authentik -h "$authentik_dir" -s /bin/sh \ + -g "added by apk for authentik" authentik + passwd -u authentik 1>/dev/null # unlock +fi + +if ! id -Gn authentik | grep -Fq redis; then + echo '* Adding user authentik to group redis' 1>&2 + + addgroup authentik redis +fi + +exit 0 diff --git a/ilot/codeberg-pages-server/upgrade-go-sqlite3-to-1.14.19.patch b/ilot/codeberg-pages-server/upgrade-go-sqlite3-to-1.14.19.patch new file mode 100644 index 0000000..fabb214 --- /dev/null +++ b/ilot/codeberg-pages-server/upgrade-go-sqlite3-to-1.14.19.patch @@ -0,0 +1,26 @@ +diff --git a/go.mod.orig b/go.mod +index eba292e..00310e5 100644 +--- a/go.mod.orig ++++ b/go.mod +@@ -11,7 +11,7 @@ require ( + github.com/go-sql-driver/mysql v1.6.0 + github.com/joho/godotenv v1.4.0 + github.com/lib/pq v1.10.7 +- github.com/mattn/go-sqlite3 v1.14.16 ++ github.com/mattn/go-sqlite3 v1.14.19 + github.com/microcosm-cc/bluemonday v1.0.26 + github.com/reugn/equalizer v0.0.0-20210216135016-a959c509d7ad + github.com/rs/zerolog v1.27.0 +diff --git a/go.sum.orig b/go.sum +index 7ea8b78..19145ea 100644 +--- a/go.sum.orig ++++ b/go.sum +@@ -479,6 +479,8 @@ github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m + github.com/mattn/go-sqlite3 v1.14.9/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= + github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y= + github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= ++github.com/mattn/go-sqlite3 v1.14.19 h1:fhGleo2h1p8tVChob4I9HpmVFIAkKGpiukdrgQbWfGI= ++github.com/mattn/go-sqlite3 v1.14.19/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= + github.com/mattn/go-tty v0.0.0-20180219170247-931426f7535a/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE= + github.com/mattn/go-tty v0.0.3/go.mod h1:ihxohKRERHTVzN+aSVRwACLCeqIoZAWpoICkkvrWyR0= + github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= From 655cadf71de1a380f075a65a78aa402837ed2e44 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Sun, 25 Aug 2024 12:55:18 -0400 Subject: [PATCH 36/38] forgejo-ci: remove packages from all arches when deploying --- .forgejo/bin/deploy.sh | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/.forgejo/bin/deploy.sh b/.forgejo/bin/deploy.sh index e8e811c..4d02695 100755 --- a/.forgejo/bin/deploy.sh +++ b/.forgejo/bin/deploy.sh @@ -14,21 +14,18 @@ for apk in $apkgs; do arch=$(echo $apk | awk -F '/' '{print $3}') name=$(echo $apk | awk -F '/' '{print $4}') + # always clear out package before deploying + for delarch in x86_64 aarch64 armv7 armhf s390x ppc64le riscv64 loongarch64 x86; do + curl -s --user $FORGE_REPO_USER:$FORGE_REPO_TOKEN -X DELETE $TARGET_REPO/$BASEBRANCH/$branch/$delarch/$name 2>&1 > /dev/null + done + if [ "$(curl -s $GITHUB_SERVER_URL/api/v1/repos/$GITHUB_REPOSITORY/pulls/$GITHUB_EVENT_NUMBER | jq .draft)" == "true" ]; then # if draft, send to -testing branch branch="$branch-testing" - else - # if not draft, assume that this was sent to $branch-testing and nuke it - curl -s --user $FORGE_REPO_USER:$FORGE_REPO_TOKEN -X DELETE $TARGET_REPO/$BASEBRANCH/$branch-testing/$arch/$name fi echo "Sending $name of arch $arch to $TARGET_REPO/$BASEBRANCH/$branch" - return=$(curl -s --user $FORGE_REPO_USER:$FORGE_REPO_TOKEN --upload-file $apk $TARGET_REPO/$BASEBRANCH/$branch 2>&1) - echo $return - if [ "$return" == "package file already exists" ]; then - echo "Package already exists, refreshing..." - curl -s --user $FORGE_REPO_USER:$FORGE_REPO_TOKEN -X DELETE $TARGET_REPO/$BASEBRANCH/$branch/$arch/$name - curl -s --user $FORGE_REPO_USER:$FORGE_REPO_TOKEN --upload-file $apk $TARGET_REPO/$BASEBRANCH/$branch - fi + curl -s --user $FORGE_REPO_USER:$FORGE_REPO_TOKEN --upload-file $apk $TARGET_REPO/$BASEBRANCH/$branch + done From f6c84e562f0fb05132ca9d3af67f4d1f471e2fb1 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Sun, 25 Aug 2024 15:09:21 -0400 Subject: [PATCH 37/38] forge-ci: remove packages after WIP check --- .forgejo/bin/deploy.sh | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.forgejo/bin/deploy.sh b/.forgejo/bin/deploy.sh index 4d02695..a2fd7d4 100755 --- a/.forgejo/bin/deploy.sh +++ b/.forgejo/bin/deploy.sh @@ -14,16 +14,16 @@ for apk in $apkgs; do arch=$(echo $apk | awk -F '/' '{print $3}') name=$(echo $apk | awk -F '/' '{print $4}') - # always clear out package before deploying - for delarch in x86_64 aarch64 armv7 armhf s390x ppc64le riscv64 loongarch64 x86; do - curl -s --user $FORGE_REPO_USER:$FORGE_REPO_TOKEN -X DELETE $TARGET_REPO/$BASEBRANCH/$branch/$delarch/$name 2>&1 > /dev/null - done - if [ "$(curl -s $GITHUB_SERVER_URL/api/v1/repos/$GITHUB_REPOSITORY/pulls/$GITHUB_EVENT_NUMBER | jq .draft)" == "true" ]; then # if draft, send to -testing branch branch="$branch-testing" fi + # always clear out package before deploying + for delarch in x86_64 aarch64 armv7 armhf s390x ppc64le riscv64 loongarch64 x86; do + curl -s --user $FORGE_REPO_USER:$FORGE_REPO_TOKEN -X DELETE $TARGET_REPO/$BASEBRANCH/$branch/$delarch/$name 2>&1 > /dev/null + done + echo "Sending $name of arch $arch to $TARGET_REPO/$BASEBRANCH/$branch" curl -s --user $FORGE_REPO_USER:$FORGE_REPO_TOKEN --upload-file $apk $TARGET_REPO/$BASEBRANCH/$branch From fe5359e933f4079d5272814b23df69e0429fa18e Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Sun, 25 Aug 2024 15:16:10 -0400 Subject: [PATCH 38/38] remove install files --- ilot/codeberg-pages-server/APKBUILD | 1 - .../codeberg-pages-server.post-install | 39 ------------------- .../codeberg-pages-server.post-upgrade | 1 - .../codeberg-pages-server.pre-install | 26 ------------- 4 files changed, 67 deletions(-) delete mode 100755 ilot/codeberg-pages-server/codeberg-pages-server.post-install delete mode 120000 ilot/codeberg-pages-server/codeberg-pages-server.post-upgrade delete mode 100644 ilot/codeberg-pages-server/codeberg-pages-server.pre-install diff --git a/ilot/codeberg-pages-server/APKBUILD b/ilot/codeberg-pages-server/APKBUILD index 5eab680..4478fdd 100644 --- a/ilot/codeberg-pages-server/APKBUILD +++ b/ilot/codeberg-pages-server/APKBUILD @@ -11,7 +11,6 @@ depends="nginx" makedepends="go just" # tests disabled for now options="!check" -install="$pkgname.post-install $pkgname.post-upgrade $pkgname.pre-install" source=" $pkgname-$pkgver.tar.gz::https://codeberg.org/Codeberg/pages-server/archive/v$pkgver.tar.gz codeberg-pages-server.openrc diff --git a/ilot/codeberg-pages-server/codeberg-pages-server.post-install b/ilot/codeberg-pages-server/codeberg-pages-server.post-install deleted file mode 100755 index a715d20..0000000 --- a/ilot/codeberg-pages-server/codeberg-pages-server.post-install +++ /dev/null @@ -1,39 +0,0 @@ -#!/bin/sh -set -eu - -group=authentik -config_file='/etc/authentik/config.yml' - -setcap 'cap_net_bind_service=+ep' /usr/share/webapps/authentik/server - -if [ $(grep '@@SECRET_KEY@@' "$config_file") ]; then - echo "* Generating random secret in $config_file" >&2 - - secret_key="$(pwgen -s 50 1)" - sed -i "s|@@SECRET_KEY@@|$secret_key|" "$config_file" - chown root:$group "$config_file" -fi - -if [ "${0##*.}" = 'post-upgrade' ]; then - cat >&2 <<-EOF - * - * To finish Authentik upgrade run: - * - * authentik-manage migrate - * - EOF -else - cat >&2 <<-EOF - * - * 1. Adjust settings in /etc/authentik/config.yml. - * - * 2. Create database for Authentik: - * - * psql -c "CREATE ROLE authentik PASSWORD 'top-secret' INHERIT LOGIN;" - * psql -c "CREATE DATABASE authentik OWNER authentik ENCODING 'UTF-8';" - * - * 3. Run "authentik-manage migrate" - * 4. Setup admin user at https:///if/flow/initial-setup/ - * - EOF -fi diff --git a/ilot/codeberg-pages-server/codeberg-pages-server.post-upgrade b/ilot/codeberg-pages-server/codeberg-pages-server.post-upgrade deleted file mode 120000 index d7ffea2..0000000 --- a/ilot/codeberg-pages-server/codeberg-pages-server.post-upgrade +++ /dev/null @@ -1 +0,0 @@ -codeberg-pages-server.post-install \ No newline at end of file diff --git a/ilot/codeberg-pages-server/codeberg-pages-server.pre-install b/ilot/codeberg-pages-server/codeberg-pages-server.pre-install deleted file mode 100644 index 792f304..0000000 --- a/ilot/codeberg-pages-server/codeberg-pages-server.pre-install +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/sh -# It's very important to set user/group correctly. - -authentik_dir='/var/lib/authentik' - -if ! getent group authentik 1>/dev/null; then - echo '* Creating group authentik' 1>&2 - - addgroup -S authentik -fi - -if ! id authentik 2>/dev/null 1>&2; then - echo '* Creating user authentik' 1>&2 - - adduser -DHS -G authentik -h "$authentik_dir" -s /bin/sh \ - -g "added by apk for authentik" authentik - passwd -u authentik 1>/dev/null # unlock -fi - -if ! id -Gn authentik | grep -Fq redis; then - echo '* Adding user authentik to group redis' 1>&2 - - addgroup authentik redis -fi - -exit 0