diff --git a/.forgejo/bin/check_ver.sh b/.forgejo/bin/check_ver.sh index 05c2b54..66c7fd0 100755 --- a/.forgejo/bin/check_ver.sh +++ b/.forgejo/bin/check_ver.sh @@ -18,30 +18,8 @@ for pkg in $owned_by_you; do downstream_version=$(sed -n "/^P:$pkg$/,/^$/p" APKINDEX | awk -F ':' '{if($1=="V"){print $2}}' | sort -V | tail -n 1) downstream_version=${downstream_version/-*} - # special cases - case $pkg in - forgejo-aneksajo)upstream_version=${upstream_version/-git-annex/_git};; - authentik) - upstream_version=$(curl --fail -X GET -sS -H 'Content-Type: application/json' "https://release-monitoring.org/api/v2/projects/?name=$pkg&distribution=Alpine" | jq -r '.items.[].stable_versions' | jq -r ".[] | match(\"${downstream_version%.*}.*\").string" | head -n 1) - latest_version=$(curl --fail -X GET -sS -H 'Content-Type: application/json' "https://release-monitoring.org/api/v2/packages/?name=$pkg&distribution=Alpine" | jq -r '.items.[].stable_version' ) - # append version number to signal that this is not latest major version - if [ "${upstream_version%.*}" != "${latest_version%.*}" ]; then - echo "$pkg${latest_version%.*} major version available" - echo "$pkg${latest_version%.*} $downstream_version $latest_version $repo" >> out_of_date - pkg=$pkg${upstream_version%.*} - fi - ;; - mastodon) - upstream_version=$(curl --fail -X GET -sS -H 'Content-Type: application/json' "https://release-monitoring.org/api/v2/projects/?name=$pkg&distribution=Alpine" | jq -r '.items.[].stable_versions' | jq -r ".[] | match(\"${downstream_version%.*}.*\").string" | head -n 1) - latest_version=$(curl --fail -X GET -sS -H 'Content-Type: application/json' "https://release-monitoring.org/api/v2/packages/?name=$pkg&distribution=Alpine" | jq -r '.items.[].stable_version' ) - # append version number to signal that this is not latest major version - if [ "${upstream_version%.*}" != "${latest_version%.*}" ]; then - echo "$pkg${latest_version%.*} major version available" - echo "$pkg${latest_version%.*} $downstream_version $latest_version $repo" >> out_of_date - pkg=$pkg${upstream_version%.*} - fi - ;; - esac + # special case for forgejo-aneksajo: + upstream_version=${upstream_version/-git-annex/_git} if [ -z "$upstream_version" ]; then echo "$pkg not in anitya" diff --git a/.forgejo/bin/create_issue.sh b/.forgejo/bin/create_issue.sh index 995e519..d162758 100755 --- a/.forgejo/bin/create_issue.sh +++ b/.forgejo/bin/create_issue.sh @@ -1,7 +1,7 @@ #!/bin/bash # expects: -# env variable ISSUE_TOKEN +# env variable FORGEJO_TOKEN # file out_of_date IFS=' @@ -15,12 +15,12 @@ does_it_exist() { repo=$4 query="$repo/$name: upgrade to $upstream_version" - query="%22$(echo $query | sed 's| |%20|g' | sed 's|:|%3A|g' | sed 's|/|%2F|g' )%22" + query="$(echo $query | sed 's| |%20|g' | sed 's|:|%3A|g' | sed 's|/|%2F|g' )" result="$(curl --silent -X 'GET' \ - "$GITHUB_SERVER_URL/api/v1/repos/$GITHUB_REPOSITORY/issues?state=open&q=$query&type=issues&sort=latest" \ + "$GITHUB_SERVER_URL/api/v1/repos/$GITHUB_REPOSITORY/issues?state=open&q=$query&type=issues" \ -H 'accept: application/json' \ - -H "Authorization: token $ISSUE_TOKEN" + -H "authorization: Basic $FORGEJO_TOKEN" )" if [ "$result" == "[]" ]; then @@ -35,12 +35,12 @@ is_it_old() { repo=$4 query="$repo/$name: upgrade to" - query="%22$(echo $query | sed 's| |%20|g' | sed 's|:|%3A|g' | sed 's|/|%2F|g' )%22" + query="$(echo $query | sed 's| |%20|g' | sed 's|:|%3A|g' | sed 's|/|%2F|g' )" result="$(curl --silent -X 'GET' \ - "$GITHUB_SERVER_URL/api/v1/repos/$GITHUB_REPOSITORY/issues?state=open&q=$query&type=issues&sort=latest" \ + "$GITHUB_SERVER_URL/api/v1/repos/$GITHUB_REPOSITORY/issues?state=open&q=$query&type=issues" \ -H 'accept: application/json' \ - -H "authorization: token $ISSUE_TOKEN" + -H "authorization: Basic $FORGEJO_TOKEN" )" result_title="$(echo $result | jq -r '.[].title' )" @@ -64,7 +64,7 @@ update_title() { result=$(curl --silent -X 'PATCH' \ "$GITHUB_SERVER_URL/api/v1/repos/$GITHUB_REPOSITORY/issues/$id" \ -H 'accept: application/json' \ - -H "authorization: token $ISSUE_TOKEN" \ + -H "authorization: Basic $FORGEJO_TOKEN" \ -H 'Content-Type: application/json' \ -d "{ \"title\": \"$repo/$name: upgrade to $upstream_version\" @@ -83,7 +83,7 @@ create_issue() { result=$(curl --silent -X 'POST' \ "$GITHUB_SERVER_URL/api/v1/repos/$GITHUB_REPOSITORY/issues" \ -H 'accept: application/json' \ - -H "authorization: token $ISSUE_TOKEN" \ + -H "authorization: Basic $FORGEJO_TOKEN" \ -H 'Content-Type: application/json' \ -d "{ \"title\": \"$repo/$name: upgrade to $upstream_version\", @@ -126,12 +126,12 @@ fi if [ -f not_in_anitya ]; then query="Add missing $repo packages to anitya" - query="%22$(echo $query | sed 's| |%20|g')%22" + query="$(echo $query | sed 's| |%20|g')" result="$(curl --silent -X 'GET' \ - "$GITHUB_SERVER_URL/api/v1/repos/$GITHUB_REPOSITORY/issues?state=open&q=$query&type=issues&sort=latest" \ + "$GITHUB_SERVER_URL/api/v1/repos/$GITHUB_REPOSITORY/issues?state=open&q=$query&type=issues" \ -H 'accept: application/json' \ - -H "authorization: token $ISSUE_TOKEN" + -H "authorization: Basic $FORGEJO_TOKEN" )" if [ "$result" == "[]" ]; then @@ -139,7 +139,7 @@ if [ -f not_in_anitya ]; then result=$(curl --silent -X 'POST' \ "$GITHUB_SERVER_URL/api/v1/repos/$GITHUB_REPOSITORY/issues" \ -H 'accept: application/json' \ - -H "authorization: token $ISSUE_TOKEN" \ + -H "authorization: Basic $FORGEJO_TOKEN" \ -H 'Content-Type: application/json' \ -d "{ \"title\": \"Add missing $repo packages to anitya\", @@ -155,7 +155,7 @@ if [ -f not_in_anitya ]; then result=$(curl --silent -X 'PATCH' \ "$GITHUB_SERVER_URL/api/v1/repos/$GITHUB_REPOSITORY/issues/$result_id" \ -H 'accept: application/json' \ - -H "authorization: token $ISSUE_TOKEN" \ + -H "authorization: Basic $FORGEJO_TOKEN" \ -H 'Content-Type: application/json' \ -d "{ \"body\": \"- [ ] $(sed '{:q;N;s/\n/\\n- [ ] /g;t q}' not_in_anitya)\" diff --git a/.forgejo/workflows/build-aarch64.yaml b/.forgejo/workflows/build-aarch64.yaml index 2a4dfc0..0364014 100644 --- a/.forgejo/workflows/build-aarch64.yaml +++ b/.forgejo/workflows/build-aarch64.yaml @@ -19,7 +19,6 @@ jobs: steps: - name: Environment setup run: | - doas apk upgrade -a doas apk add nodejs git patch curl net-tools doas hostname host.docker.internal cd /etc/apk/keys @@ -48,7 +47,7 @@ jobs: GITHUB_EVENT_NUMBER: ${{ github.event.number }} steps: - name: Setting up environment - run: apk add nodejs-current curl findutils git gawk jq + run: apk add nodejs curl findutils git gawk jq - name: Repo pull uses: actions/checkout@v4 - name: Package download diff --git a/.forgejo/workflows/build-x86_64.yaml b/.forgejo/workflows/build-x86_64.yaml index aaffb72..c805199 100644 --- a/.forgejo/workflows/build-x86_64.yaml +++ b/.forgejo/workflows/build-x86_64.yaml @@ -19,7 +19,6 @@ jobs: steps: - name: Environment setup run: | - doas apk upgrade -a doas apk add nodejs git patch curl net-tools doas hostname host.docker.internal cd /etc/apk/keys @@ -48,7 +47,7 @@ jobs: GITHUB_EVENT_NUMBER: ${{ github.event.number }} steps: - name: Setting up environment - run: apk add nodejs-current curl findutils git gawk jq + run: apk add nodejs curl findutils git gawk jq - name: Repo pull uses: actions/checkout@v4 - name: Package download diff --git a/.forgejo/workflows/check-ilot.yml b/.forgejo/workflows/check-ilot.yml index b57e80d..652930d 100644 --- a/.forgejo/workflows/check-ilot.yml +++ b/.forgejo/workflows/check-ilot.yml @@ -11,12 +11,12 @@ jobs: container: image: alpine:latest env: - downstream: https://forge.ilot.io/api/packages/ilot/alpine/v3.21/ilot - ISSUE_TOKEN: ${{ secrets.issue_token }} + downstream: https://forge.ilot.io/api/packages/ilot/alpine/v3.20/ilot + FORGEJO_TOKEN: ${{ secrets.forgejo_token }} LABEL_NUMBER: 8 steps: - name: Environment setup - run: apk add grep coreutils gawk curl wget bash nodejs-current git jq sed + run: apk add grep coreutils gawk curl wget bash nodejs git jq sed - name: Get scripts uses: actions/checkout@v4 with: diff --git a/.forgejo/workflows/lint.yaml b/.forgejo/workflows/lint.yaml index 743cefc..3614deb 100644 --- a/.forgejo/workflows/lint.yaml +++ b/.forgejo/workflows/lint.yaml @@ -14,9 +14,7 @@ jobs: CI_MERGE_REQUEST_PROJECT_URL: ${{ github.server_url }}/${{ github.repository }} CI_MERGE_REQUEST_TARGET_BRANCH_NAME: ${{ github.base_ref }} steps: - - run: | - doas apk upgrade -a - doas apk add nodejs git + - run: doas apk add nodejs git - uses: actions/checkout@v4 with: fetch-depth: 500 diff --git a/ilot/mastodon/APKBUILD b/archives/mastodon/APKBUILD similarity index 96% rename from ilot/mastodon/APKBUILD rename to archives/mastodon/APKBUILD index dcf1bd5..954ff0b 100644 --- a/ilot/mastodon/APKBUILD +++ b/archives/mastodon/APKBUILD @@ -2,9 +2,9 @@ # Maintainer: Antoine Martin (ayakael) pkgname=mastodon _pkgname=$pkgname -pkgver=4.2.20 +pkgver=4.2.10 _gittag=v$pkgver -pkgrel=0 +pkgrel=1 pkgdesc="Self-hosted social media and network server based on ActivityPub and OStatus" arch="x86_64" url="https://github.com/mastodon/mastodon" @@ -192,7 +192,7 @@ assets() { } sha512sums=" -132df11b54bf0f900e2ee6e149ddb730706a67fc6130ead63b327028fa590944f21a19bcba07d859885717208b6abc005d0aee7675fd8e0fb09ad8d6f8f631b7 mastodon-v4.2.20.tar.gz +1fe5417136bc020a83b83eaccef7f1f46c13fc8318681f12ba556b1b6b03e25ef7b6335c28f4e6722101e97b63020cbd0d3fbacdaf9b3b5a4b73c3cf3e230813 mastodon-v4.2.10.tar.gz d49fea9451c97ccefe5e35b68e4274aeb427f9d1e910b89c1f6c810489c3bec1ccff72952fdaef95abf944b8aff0da84a52347540d36ff1fba5ccc19e1d935c6 mastodon.initd eefe12a31268245f802222c0001dac884e03adb0d301e53a1512a3cd204836ca03ad083908cd14d146cf0dce99e3a4366570efd0e40a9a490ccd381d4c63c32f mastodon.web.initd 8fc9249c01693bb02b8d1a6177288d5d3549addde8c03eb35cc7a32dde669171872ebc2b5deb8019dc7a12970098f1af707171fa41129be31b04e1dc1651a777 mastodon.sidekiq.initd diff --git a/ilot/mastodon/bin-wrapper.in b/archives/mastodon/bin-wrapper.in similarity index 100% rename from ilot/mastodon/bin-wrapper.in rename to archives/mastodon/bin-wrapper.in diff --git a/ilot/mastodon/mastodon.initd b/archives/mastodon/mastodon.initd similarity index 100% rename from ilot/mastodon/mastodon.initd rename to archives/mastodon/mastodon.initd diff --git a/ilot/mastodon/mastodon.logrotate b/archives/mastodon/mastodon.logrotate similarity index 100% rename from ilot/mastodon/mastodon.logrotate rename to archives/mastodon/mastodon.logrotate diff --git a/ilot/mastodon/mastodon.post-install b/archives/mastodon/mastodon.post-install similarity index 100% rename from ilot/mastodon/mastodon.post-install rename to archives/mastodon/mastodon.post-install diff --git a/ilot/mastodon/mastodon.post-upgrade b/archives/mastodon/mastodon.post-upgrade similarity index 100% rename from ilot/mastodon/mastodon.post-upgrade rename to archives/mastodon/mastodon.post-upgrade diff --git a/ilot/mastodon/mastodon.pre-install b/archives/mastodon/mastodon.pre-install similarity index 100% rename from ilot/mastodon/mastodon.pre-install rename to archives/mastodon/mastodon.pre-install diff --git a/ilot/mastodon/mastodon.sidekiq.initd b/archives/mastodon/mastodon.sidekiq.initd similarity index 100% rename from ilot/mastodon/mastodon.sidekiq.initd rename to archives/mastodon/mastodon.sidekiq.initd diff --git a/ilot/mastodon/mastodon.streaming.initd b/archives/mastodon/mastodon.streaming.initd similarity index 100% rename from ilot/mastodon/mastodon.streaming.initd rename to archives/mastodon/mastodon.streaming.initd diff --git a/ilot/mastodon/mastodon.web.initd b/archives/mastodon/mastodon.web.initd similarity index 100% rename from ilot/mastodon/mastodon.web.initd rename to archives/mastodon/mastodon.web.initd diff --git a/ilot/ruby3.2-bundler/APKBUILD b/archives/ruby3.2-bundler/APKBUILD similarity index 100% rename from ilot/ruby3.2-bundler/APKBUILD rename to archives/ruby3.2-bundler/APKBUILD diff --git a/ilot/ruby3.2-bundler/manpages.patch b/archives/ruby3.2-bundler/manpages.patch similarity index 100% rename from ilot/ruby3.2-bundler/manpages.patch rename to archives/ruby3.2-bundler/manpages.patch diff --git a/ilot/ruby3.2-minitest/APKBUILD b/archives/ruby3.2-minitest/APKBUILD similarity index 100% rename from ilot/ruby3.2-minitest/APKBUILD rename to archives/ruby3.2-minitest/APKBUILD diff --git a/ilot/ruby3.2-minitest/gemspec.patch b/archives/ruby3.2-minitest/gemspec.patch similarity index 100% rename from ilot/ruby3.2-minitest/gemspec.patch rename to archives/ruby3.2-minitest/gemspec.patch diff --git a/ilot/ruby3.2-rake/APKBUILD b/archives/ruby3.2-rake/APKBUILD similarity index 100% rename from ilot/ruby3.2-rake/APKBUILD rename to archives/ruby3.2-rake/APKBUILD diff --git a/ilot/ruby3.2/APKBUILD b/archives/ruby3.2/APKBUILD similarity index 96% rename from ilot/ruby3.2/APKBUILD rename to archives/ruby3.2/APKBUILD index f3e0f8a..59e7332 100644 --- a/ilot/ruby3.2/APKBUILD +++ b/archives/ruby3.2/APKBUILD @@ -3,10 +3,6 @@ # Maintainer: Jakub Jirutka # # secfixes: -# 3.2.4-r0: -# - CVE-2024-27282 -# - CVE-2024-27281 -# - CVE-2024-27280 # 3.1.4-r0: # - CVE-2023-28755 # - CVE-2023-28756 @@ -62,7 +58,7 @@ pkgname=ruby3.2 # When upgrading, upgrade also each ruby- aport listed in file # gems/bundled_gems. If some aport is missing or not in the main repo, # create/move it. -pkgver=3.2.6 +pkgver=3.2.2 _abiver="${pkgver%.*}.0" pkgrel=0 pkgdesc="An object-oriented language for quick and easy programming" @@ -77,7 +73,6 @@ depends_dev=" libucontext-dev " makedepends="$depends_dev - cargo autoconf gdbm-dev libffi-dev @@ -250,7 +245,7 @@ full() { } sha512sums=" -26ae9439043cf40e5eddde6b92ae51c9e1fa4e89c8ec6da36732c59c14873b022c683fb3007950d372f35de9b62a4fabbbc3ef1f4ef58cd53058bd56e1552cbe ruby-3.2.6.tar.gz +bcc68f3f24c1c8987d9c80b57332e5791f25b935ba38daf5addf60dbfe3a05f9dcaf21909681b88e862c67c6ed103150f73259c6e35c564f13a00f432e3c1e46 ruby-3.2.2.tar.gz 16fc1f35aee327d1ecac420b091beaa53c675e0504d5a6932004f17ca68a2c38f57b053b0a3903696f2232c5add160d363e3972a962f7f7bcb52e4e998c7315d test_insns-lower-recursion-depth.patch 42cd45c1db089a1ae57834684479a502e357ddba82ead5fa34e64c13971e7ab7ad2919ddd60a104a817864dd3e2e35bdbedb679210eb41d82cab36a0687e43d4 fix-get_main_stack.patch a77da5e5eb7d60caf3f1cabb81e09b88dc505ddd746e34efd1908c0096621156d81cc65095b846ba9bdb66028891aefce883a43ddec6b56b5beb4aac5e4ee33f dont-install-bundled-gems.patch diff --git a/ilot/ruby3.2/dont-install-bundled-gems.patch b/archives/ruby3.2/dont-install-bundled-gems.patch similarity index 100% rename from ilot/ruby3.2/dont-install-bundled-gems.patch rename to archives/ruby3.2/dont-install-bundled-gems.patch diff --git a/ilot/ruby3.2/fix-get_main_stack.patch b/archives/ruby3.2/fix-get_main_stack.patch similarity index 100% rename from ilot/ruby3.2/fix-get_main_stack.patch rename to archives/ruby3.2/fix-get_main_stack.patch diff --git a/ilot/ruby3.2/fix-riscv64-build.patch b/archives/ruby3.2/fix-riscv64-build.patch similarity index 100% rename from ilot/ruby3.2/fix-riscv64-build.patch rename to archives/ruby3.2/fix-riscv64-build.patch diff --git a/ilot/ruby3.2/ruby3.2.post-upgrade b/archives/ruby3.2/ruby3.2.post-upgrade similarity index 100% rename from ilot/ruby3.2/ruby3.2.post-upgrade rename to archives/ruby3.2/ruby3.2.post-upgrade diff --git a/ilot/ruby3.2/test_insns-lower-recursion-depth.patch b/archives/ruby3.2/test_insns-lower-recursion-depth.patch similarity index 100% rename from ilot/ruby3.2/test_insns-lower-recursion-depth.patch rename to archives/ruby3.2/test_insns-lower-recursion-depth.patch diff --git a/backports/forgejo-runner/APKBUILD b/backports/forgejo-runner/APKBUILD new file mode 100644 index 0000000..1005964 --- /dev/null +++ b/backports/forgejo-runner/APKBUILD @@ -0,0 +1,47 @@ +# Contributor: Patrycja Rosa +# Maintainer: Patrycja Rosa +pkgname=forgejo-runner +pkgver=3.5.0 +pkgrel=2 +pkgdesc="CI/CD job runner for Forgejo" +url="https://code.forgejo.org/forgejo/runner" +arch="all" +license="MIT" +makedepends="go" +install="$pkgname.pre-install $pkgname.pre-upgrade" +subpackages="$pkgname-openrc" +source="$pkgname-$pkgver.tar.gz::https://code.forgejo.org/forgejo/runner/archive/v$pkgver.tar.gz + + forgejo-runner.logrotate + forgejo-runner.initd + forgejo-runner.confd + " +builddir="$srcdir/runner" +options="!check" # tests require running forgejo + +build() { + go build \ + -o forgejo-runner \ + -ldflags "-X gitea.com/gitea/act_runner/internal/pkg/ver.version=$pkgver" + ./forgejo-runner generate-config > config.example.yaml +} + +check() { + go test ./... +} + +package() { + install -Dm755 forgejo-runner -t "$pkgdir"/usr/bin/ + install -Dm644 config.example.yaml -t "$pkgdir"/etc/forgejo-runner/ + + install -Dm755 "$srcdir"/forgejo-runner.initd "$pkgdir"/etc/init.d/forgejo-runner + install -Dm644 "$srcdir"/forgejo-runner.confd "$pkgdir"/etc/conf.d/forgejo-runner + install -Dm644 "$srcdir"/forgejo-runner.logrotate "$pkgdir"/etc/logrotate.d/forgejo-runner +} + +sha512sums=" +e78968a5f9b6e797fb759a5c8cbf46a5c2fef2083dabc88599c9017729faface963576c63a948b0add424cb267902e864fb1a1b619202660296976d93e670713 forgejo-runner-3.5.0.tar.gz +a3c7238b0c63053325d31e09277edd88690ef5260854517f82d9042d6173fb5d24ebfe36e1d7363673dd8801972638a6e69b6af8ad43debb6057515c73655236 forgejo-runner.logrotate +bb0c6fbe90109c77f9ef9cb0d35d20b8033be0e4b7a60839b596aa5528dfa24309ec894d8c04066bf8fb30143e63a5fd8cc6fc89aac364422b583e0f840e2da6 forgejo-runner.initd +e11eab27f88f1181112389befa7de3aa0bac7c26841861918707ede53335535425c805e6682e25704e9c8a6aecba3dc13e20900a99df1183762b012b62f26d5f forgejo-runner.confd +" diff --git a/backports/forgejo-runner/forgejo-runner.confd b/backports/forgejo-runner/forgejo-runner.confd new file mode 100644 index 0000000..874e695 --- /dev/null +++ b/backports/forgejo-runner/forgejo-runner.confd @@ -0,0 +1,17 @@ +# Configuration for /etc/init.d/forgejo-runner + +# Path to the config file (--config). +#cfgfile="/etc/forgejo-runner/config.yaml" + +# Path to the working directory (--working-directory). +#datadir="/var/lib/forgejo-runner" + +# Path to the log file where stdout/stderr will be redirected. +# Leave empty/commented out to use syslog instead. +#output_log="/var/log/forgejo-runner.log" + +# You may change this to root, e.g. to run jobs in LXC +#command_user="forgejo-runner" + +# Comment out to run without process supervisor. +supervisor=supervise-daemon diff --git a/backports/forgejo-runner/forgejo-runner.initd b/backports/forgejo-runner/forgejo-runner.initd new file mode 100644 index 0000000..c54acdd --- /dev/null +++ b/backports/forgejo-runner/forgejo-runner.initd @@ -0,0 +1,38 @@ +#!/sbin/openrc-run + +description="Forgejo CI Runner" +name="Forgejo Runner" + +: ${cfgfile:="/etc/forgejo-runner/config.yaml"} +: ${datadir:="/var/lib/forgejo-runner"} +: ${command_user:="forgejo-runner"} + +command="/usr/bin/forgejo-runner" +command_args="daemon --config $cfgfile" +command_background="yes" +directory="$datadir" +pidfile="/run/$RC_SVCNAME.pid" + +depend() { + need net + use dns logger +} + +start_pre() { + checkpath -d -o "$command_user" /etc/forgejo-runner + checkpath -d -o "$command_user" "$datadir" + + if ! [ -e "$cfgfile" ]; then + eerror "Config file $cfgfile doesn't exist." + eerror "You can generate it with: forgejo-runner generate-config," + eerror "or use the auto-generated one in /etc/forgejo-runner/config.example.yaml" + return 1 + fi + + if [ "$error_log" ]; then + output_log="$error_log" + else + output_logger="logger -t '${RC_SVCNAME}' -p daemon.info" + error_logger="logger -t '${RC_SVCNAME}' -p daemon.error" + fi +} diff --git a/backports/forgejo-runner/forgejo-runner.logrotate b/backports/forgejo-runner/forgejo-runner.logrotate new file mode 100644 index 0000000..1a0539e --- /dev/null +++ b/backports/forgejo-runner/forgejo-runner.logrotate @@ -0,0 +1,5 @@ +/var/log/forgejo-runner.log { + copytruncate + missingok + notifempty +} diff --git a/backports/forgejo-runner/forgejo-runner.pre-install b/backports/forgejo-runner/forgejo-runner.pre-install new file mode 100644 index 0000000..5ce27be --- /dev/null +++ b/backports/forgejo-runner/forgejo-runner.pre-install @@ -0,0 +1,14 @@ +#!/bin/sh + +addgroup -S forgejo-runner 2>/dev/null +adduser -S -D -H -h /var/lib/forgejo-runner -s /sbin/nologin -G forgejo-runner -g forgejo-runner forgejo-runner 2>/dev/null + +cat >&2 < # Maintainer: Antoine Martin (ayakael) pkgname=authentik -pkgver=2025.2.4 +pkgver=2024.8.4 pkgrel=0 pkgdesc="An open-source Identity Provider focused on flexibility and versatility" url="https://github.com/goauthentik/authentik" @@ -10,9 +10,6 @@ url="https://github.com/goauthentik/authentik" # ppc64le: not supported by Rollup build arch="aarch64 x86_64" license="MIT" -# following depends aren't direct dependencies, but are needed: -# py3-asn1crypto, py3-cbor2, py3-email-validator, py3-websockets -# py3-openssl, py3-uvloop, py3-httptools depends=" bash libcap-setcap @@ -20,94 +17,150 @@ depends=" postgresql procps pwgen + py3-aiohttp + py3-aiosignal + py3-amqp + py3-anyio + py3-asgiref + py3-asn1 py3-asn1crypto + py3-async-timeout + py3-attrs + py3-autobahn + py3-automat + py3-bcrypt + py3-billiard + py3-cachetools py3-cbor2 py3-celery + py3-certifi py3-cffi py3-channels py3-channels_redis + py3-charset-normalizer + py3-click + py3-click-didyoumean + py3-click-plugins + py3-click-repl + py3-codespell + py3-colorama + py3-constantly + py3-cparser py3-cryptography py3-dacite py3-daphne + py3-dateutil py3-deepmerge py3-defusedxml - py3-docker-py + py3-deprecated + py3-dnspython py3-django py3-django-countries py3-django-cte py3-django-filter py3-django-guardian py3-django-model-utils + py3-django-otp py3-django-prometheus py3-django-pglock py3-django-redis - py3-django-rest-framework~3.14.0 + py3-django-rest-framework~=3.14.0 py3-django-rest-framework-guardian py3-django-storages py3-django-tenants + py3-django-tenant-schemas + py3-docker-py + py3-dotenv py3-dumb-init - py3-duo-client + py3-duo_client py3-drf-orjson-renderer py3-drf-spectacular py3-email-validator py3-fido2 py3-flower + py3-frozenlist py3-geoip2 - py3-geopy + py3-google-auth py3-google-api-python-client py3-gunicorn + py3-h11 py3-httptools + py3-humanize + py3-hyperlink + py3-idna + py3-incremental + py3-inflection + py3-jsonschema py3-jsonpatch py3-jwt - py3-jwcrypto - py3-kadmin-rs + py3-kombu py3-kubernetes py3-ldap3 py3-lxml py3-maxminddb py3-msgpack py3-msgraph-sdk + py3-multidict + py3-oauthlib py3-opencontainers py3-openssl + py3-packaging py3-paramiko + py3-parsing + py3-prometheus-client + py3-prompt_toolkit py3-psycopg py3-psycopg-c - py3-pydantic py3-pydantic-scim + py3-pynacl + py3-pyrsistent py3-pyrad - py3-python-gssapi + py3-python-jwt + py3-redis + py3-requests py3-requests-oauthlib + py3-rsa py3-scim2-filter-parser py3-setproctitle py3-sentry-sdk py3-service_identity + py3-setuptools py3-six py3-sniffio py3-sqlparse py3-structlog py3-swagger-spec-validator + py3-tornado py3-twilio + py3-txaio py3-tenant-schemas-celery + py3-typing-extensions + py3-tz py3-ua-parser - py3-unidecode + py3-uritemplate py3-urllib3-secure-extra py3-uvloop + py3-vine py3-watchdog + py3-watchfiles + py3-wcwidth py3-webauthn + py3-websocket-client py3-websockets + py3-wrapt py3-wsproto py3-xmlsec py3-yaml + py3-yarl + py3-zope-interface py3-zxcvbn valkey uvicorn " -makedepends="go npm py3-packaging" +makedepends="go npm" checkdepends=" py3-pip py3-coverage - py3-codespell - py3-colorama py3-pytest py3-pytest-django py3-pytest-randomly @@ -115,7 +168,6 @@ checkdepends=" py3-freezegun py3-boto3 py3-requests-mock - py3-k5test " install="$pkgname.post-install $pkgname.post-upgrade $pkgname.pre-install" source=" @@ -160,9 +212,6 @@ build() { npm run build } -# test failure neutralized due to: -# relation authentik_core_user_pb_groups_id_seq does not exist - check() { msg "Setting up test environments" export POSTGRES_DB=authentik @@ -202,7 +251,7 @@ with open(\"local.env.yml\", \"w\") as _config: pip install selenium drf_jsonschema_serializer pdoc --break-system-packages msg "Starting tests" - make test || true + make test # TODO: Fix go-tests # make go-test @@ -284,13 +333,13 @@ pyc() { } sha512sums=" -75928b3ab9ae126f3cbe88ff1256de8adba7add099b0d93615abb8c91a2b7f275e83664a232e8c5393c5031bd9757af2f20fdb9d0153dacdf9a482b6b4bb8b00 authentik-2025.2.4.tar.gz +63548adc1ff93f603d133f1a23357ac1fedd975e790b81e1ad1ce17c7b32a58197c2fe49e6199362d3e90f873cd010b14b2e83b254f81b0198663657a2532e91 authentik-2024.8.4.tar.gz 4defb4fe3a4230f4aa517fbecd5e5b8bcef2a64e1b40615660ae9eec33597310a09df5e126f4d39ce7764bd1716c0a7040637699135c103cbc1879593c6c06f1 authentik.openrc 6cb03b9b69df39bb4539fe05c966536314d766b2e9307a92d87070ba5f5b7e7ab70f1b5ee1ab3c0c50c23454f9c5a4caec29e63fdf411bbb7a124ad687569b89 authentik-worker.openrc 351e6920d987861f8bf0d7ab2f942db716a8dbdad1f690ac662a6ef29ac0fd46cf817cf557de08f1c024703503d36bc8b46f0d9eb1ecaeb399dce4c3bb527d17 authentik-ldap.openrc 89ee5f0ffdade1c153f3a56ff75b25a7104aa81d8c7a97802a8f4b0eab34850cee39f874dabe0f3c6da3f71d6a0f938f5e8904169e8cdd34d407c8984adee6b0 authentik-ldap.conf f1a3cb215b6210fa7d857a452a9f2bc4dc0520e49b9fa7027547cff093d740a7e2548f1bf1f8831f7d5ccb80c8e523ee0c8bafcc4dc42d2788725f2137d21bee authentik-manage.sh -3d38076606d18a438a2d76cdd2067774d5471bb832e641050630726b4d7bd8b8c2218d25d7e987a1fb46ee6a4a81d13e899145f015b3c94204cece039c7fb182 fix-ak-bash.patch +3e47db684a3f353dcecdb7bab8836b9d5198766735d77f676a51d952141a0cf9903fcb92e6306c48d2522d7a1f3028b37247fdc1dc74d4d6e043da7eb4f36d49 fix-ak-bash.patch 5c60e54b6a7829d611af66f5cb8184a002b5ae927efbd024c054a7c176fcb9efcfbe5685279ffcf0390b0f0abb3bb03e02782c6867c2b38d1ad2d508aae83fa0 root-settings-csrf_trusted_origins.patch badff70b19aad79cf16046bd46cb62db25c2a8b85b2673ce7c44c42eb60d42f6fcb1b9a7a7236c00f24803b25d3c66a4d64423f7ce14a59763b8415db292a5b9 go-downgrade-1.22.patch " diff --git a/ilot/authentik/fix-ak-bash.patch b/ilot/authentik/fix-ak-bash.patch index 080b29f..c6afafb 100644 --- a/ilot/authentik/fix-ak-bash.patch +++ b/ilot/authentik/fix-ak-bash.patch @@ -1,10 +1,10 @@ diff --git a/lifecycle/ak.orig b/lifecycle/ak -index 44dc480..49a0cef 100755 +index 615bfe9..1646274 100755 --- a/lifecycle/ak.orig +++ b/lifecycle/ak @@ -1,4 +1,4 @@ --#!/usr/bin/env -S bash +-#!/usr/bin/env -S bash -e +#!/usr/bin/env bash - set -e -o pipefail MODE_FILE="${TMPDIR}/authentik-mode" + function log { diff --git a/ilot/codeberg-pages-server/APKBUILD b/ilot/codeberg-pages-server/APKBUILD deleted file mode 100644 index 359c118..0000000 --- a/ilot/codeberg-pages-server/APKBUILD +++ /dev/null @@ -1,49 +0,0 @@ -# Contributor: Antoine Martin (ayakael) -# Maintainer: Antoine Martin (ayakael) -pkgname=codeberg-pages-server -pkgver=6.2.1 -pkgrel=1 -pkgdesc="The Codeberg Pages Server – with custom domain support, per-repo pages using the pages branch, caching and more." -url="https://codeberg.org/Codeberg/pages-server" -arch="all" -license="EUPL-1.2" -depends="libcap-setcap nginx" -makedepends="go just" -install="$pkgname.post-install" -# tests disabled for now -options="!check" -source=" - $pkgname-$pkgver.tar.gz::https://codeberg.org/Codeberg/pages-server/archive/v$pkgver.tar.gz - codeberg-pages-server.openrc - downgrade-go.patch - " -builddir="$srcdir/"pages-server -subpackages="$pkgname-openrc" -pkgusers="git" -pkggroups="www-data" - -export GOPATH=$srcdir/go -export GOCACHE=$srcdir/go-build -export GOTMPDIR=$srcdir - -build() { - just build -} - -package() { - msg "Packaging $pkgname" - install -Dm755 "$builddir"/build/codeberg-pages-server \ - "$pkgdir"/usr/bin/codeberg-pages-server - - install -Dm755 "$srcdir"/$pkgname.openrc \ - "$pkgdir"/etc/init.d/$pkgname - - install -Dm600 "$builddir"/example_config.toml \ - "$pkgdir"/etc/codeberg-pages-server/pages.conf -} - -sha512sums=" -87992a244a580ef109fa891fd4e4ab5bf8320076f396c63e23b83e2c49e3c34fed2d6562283fc57dd89ebc13596dd7b8cbdfa7202eee43cbbd86b6a7f3b52c26 codeberg-pages-server-6.2.1.tar.gz -4808057de5d539fd9ad3db67b650d45ed60c53e07eff840115af09729ac198791b465b61da547eac1dffd0633e5855c348aa7663d6f6cb5984f7fc999be08589 codeberg-pages-server.openrc -1f02e3e9a6f0aab9b516fa7ffaaeb92da3ab839fbcf07f672398063d784c8c0ca373edc0f9a26132d40a60345c4894a5f757c13bf7500f5753f5ffcdf10c52db downgrade-go.patch -" diff --git a/ilot/codeberg-pages-server/codeberg-pages-server.openrc b/ilot/codeberg-pages-server/codeberg-pages-server.openrc deleted file mode 100644 index fe2ce85..0000000 --- a/ilot/codeberg-pages-server/codeberg-pages-server.openrc +++ /dev/null @@ -1,23 +0,0 @@ -#!/sbin/openrc-run - -: ${config:=/etc/codeberg-pages-server/pages.conf} - -name="$RC_SVCNAME" -cfgfile="/etc/conf.d/$RC_SVCNAME.conf" -pidfile="/run/$RC_SVCNAME.pid" -working_directory="/var/lib/codeberg-pages-server" -command="/usr/bin/codeberg-pages-server" -command_args="--config-file $config" -command_user="nginx" -command_group="nginx" -start_stop_daemon_args="" -command_background="yes" -output_log="/var/log/codeberg-pages-server/$RC_SVCNAME.log" -error_log="/var/log/codeberg-pages-server/$RC_SVCNAME.err" - -start_pre() { - checkpath --directory --owner $command_user:$command_group --mode 0775 \ - /var/log/codeberg-pages-server \ - /var/lib/codeberg-pages-server - cd "$working_directory" -} diff --git a/ilot/codeberg-pages-server/codeberg-pages-server.post-install b/ilot/codeberg-pages-server/codeberg-pages-server.post-install deleted file mode 100755 index d55e213..0000000 --- a/ilot/codeberg-pages-server/codeberg-pages-server.post-install +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/sh -set -eu - -setcap 'cap_net_bind_service=+ep' /usr/bin/codeberg-pages-server - -cat >&2 <<-EOF -* -* 1. Adjust settings in /etc/codeberg-pages-server/pages.conf -* -EOF diff --git a/ilot/codeberg-pages-server/downgrade-go.patch b/ilot/codeberg-pages-server/downgrade-go.patch deleted file mode 100644 index 80988d5..0000000 --- a/ilot/codeberg-pages-server/downgrade-go.patch +++ /dev/null @@ -1,12 +0,0 @@ -diff --git a/go.mod.orig b/go.mod -index bff6b77..2b9f2e4 100644 ---- a/go.mod.orig -+++ b/go.mod -@@ -1,6 +1,6 @@ - module codeberg.org/codeberg/pages - --go 1.24.0 -+go 1.23.6 - - require ( - code.gitea.io/sdk/gitea v0.20.0 diff --git a/ilot/forgejo-aneksajo/APKBUILD b/ilot/forgejo-aneksajo/APKBUILD index d3a4462..332456f 100644 --- a/ilot/forgejo-aneksajo/APKBUILD +++ b/ilot/forgejo-aneksajo/APKBUILD @@ -4,14 +4,14 @@ # Contributor: Patrycja Rosa # Maintainer: Antoine Martin (ayakael) pkgname=forgejo-aneksajo -pkgver=11.0.0_git0 +pkgver=8.0.3_git2 _gittag=v${pkgver/_git/-git-annex} pkgrel=0 pkgdesc="Self-hosted Git service written in Go with git-annex support" url="https://forgejo.org" # riscv64: builds fail https://codeberg.org/forgejo/forgejo/issues/3025 arch="all !riscv64" -license="GPL-3.0-or-later" +license="MIT" depends="git git-lfs gnupg" makedepends="go nodejs npm" checkdepends="bash openssh openssh-keygen sqlite tzdata" @@ -60,7 +60,7 @@ build() { export CGO_LDFLAGS="$LDFLAGS" unset LDFLAGS ## make FHS compliant - local setting="forgejo.org/modules/setting" + local setting="code.gitea.io/gitea/modules/setting" export LDFLAGS="$LDFLAGS -X $setting.CustomConf=/etc/forgejo/app.ini" export LDFLAGS="$LDFLAGS -X $setting.AppWorkPath=/var/lib/forgejo/" @@ -106,7 +106,7 @@ package() { } sha512sums=" -07f72fcd3bb02a6bbfbcf73f8526c51f1f3fe39d2a504395dfb0997743a190bd210389d58114aaf546fb6d0fabaa80a54240632e11eeba35250b9e6b9b63f438 forgejo-aneksajo-v11.0.0-git-annex0.tar.gz -497d8575f2eb5ac43baf82452e76007ef85e22cca2cc769f1cf55ffd03d7ce4d50ac4dc2b013e23086b7a5577fc6de5a4c7e5ec7c287f0e3528e908aaa2982aa forgejo-aneksajo.initd +65aaf0eacee6fb87d5298e2398448b1b8023b02a6e7f2a37b8d3c92a449c62e0147f33e2fcc4a066ee579c8d05ff8dcfda663e39c80658e2f3a6b0a24dfe2f84 forgejo-aneksajo-v8.0.3-git-annex2.tar.gz +eb93a9f6c8f204de5c813f58727015f53f9feaab546589e016c60743131559f04fc1518f487b6d2a0e7fa8fab6d4a67cd0cd9713a7ccd9dec767a8c1ddebe129 forgejo-aneksajo.initd b537b41b6b3a945274a6028800f39787b48c318425a37cf5d40ace0d1b305444fd07f17b4acafcd31a629bedd7d008b0bb3e30f82ffeb3d7e7e947bdbe0ff4f3 forgejo-aneksajo.ini " diff --git a/ilot/forgejo-aneksajo/forgejo-aneksajo.initd b/ilot/forgejo-aneksajo/forgejo-aneksajo.initd index ecdcde1..24dd085 100644 --- a/ilot/forgejo-aneksajo/forgejo-aneksajo.initd +++ b/ilot/forgejo-aneksajo/forgejo-aneksajo.initd @@ -1,24 +1,15 @@ #!/sbin/openrc-run -: ${command_user:="forgejo:www-data"} -: ${cfgfile:="/etc/forgejo/app.ini"} -: ${directory:="/var/lib/forgejo"} -: ${output_log="/var/log/forgejo/http.log"} -: ${error_log="/var/log/forgejo/http.log"} -: ${supervisor="supervise-daemon"} - +supervisor=supervise-daemon name=forgejo command="/usr/bin/forgejo" -command_args="web --config '$cfgfile' $command_args" -command_background="yes" -pidfile="/run/$RC_SVCNAME.pid" - -required_files="$cfgfile" - -export FORGEJO_WORK_DIR="$directory" +command_user="${FORGEJO_USER:-forgejo}:www-data" +command_args="web --config '${FORGEJO_CONF:-/etc/forgejo/app.ini}'" +supervise_daemon_args="--env FORGEJO_WORK_DIR='${FORGEJO_WORK_DIR:-/var/lib/forgejo}' --chdir '${FORGEJO_WORK_DIR:-/var/lib/forgejo}' --stdout '${FORGEJO_LOG_FILE:-/var/log/forgejo/http.log}' --stderr '${FORGEJO_LOG_FILE:-/var/log/forgejo/http.log}'" +pidfile="/run/forgejo.pid" depend() { - use logger dns - need net - after firewall mysql postgresql + use logger dns + need net + after firewall mysql postgresql } diff --git a/ilot/freescout/APKBUILD b/ilot/freescout/APKBUILD index 5f4eb2d..bac7fcc 100644 --- a/ilot/freescout/APKBUILD +++ b/ilot/freescout/APKBUILD @@ -1,7 +1,7 @@ # Maintainer: Antoine Martin (ayakael) # Contributor: Antoine Martin (ayakael) pkgname=freescout -pkgver=1.8.175 +pkgver=1.8.152 pkgrel=0 pkgdesc="Free self-hosted help desk & shared mailbox" arch="noarch" @@ -9,7 +9,7 @@ url="freescout.net" license="AGPL-3.0" _php=php83 _php_mods="-fpm -mbstring -xml -imap -zip -gd -curl -intl -tokenizer -pdo_pgsql -openssl -session -iconv -fileinfo -dom -pcntl" -depends="$_php ${_php_mods//-/$_php-} nginx postgresql pwgen bash" +depends="$_php ${_php_mods//-/$_php-} nginx postgresql pwgen" makedepends="composer pcre" install="$pkgname.post-install $pkgname.post-upgrade $pkgname.pre-install" source=" @@ -17,7 +17,6 @@ source=" freescout.nginx freescout-manage.sh rename-client-to-membre-fr-en.patch - fix-laravel-log-viewer.patch " pkgusers="freescout" pkggroups="freescout" @@ -76,9 +75,8 @@ package() { install -m755 -D "$srcdir"/freescout-manage.sh "$pkgdir"/usr/bin/freescout-manage } sha512sums=" -aa5f762eddaac34977a42bb59a0c2ec2113b0ad4f04b767465e9c23c4bb5d0dd722432735fb10975c23b0a5ca4a11abcfc52d893a3c6678d4908ceb29cefa736 freescout-1.8.175.tar.gz +0e4d6d4a1aaeba2d39db8678e436f3c46c1a1fd79ea2b37c9ac95cbb319306b818991981987f6ac7dc8100a084d4189fa12f7639b24e2744705fa409ac349864 freescout-1.8.152.tar.gz e4af6c85dc12f694bef2a02e4664e31ed50b2c109914d7ffad5001c2bbd764ef25b17ecaa59ff55ef41bccf17169bf910d1a08888364bdedd0ecc54d310e661f freescout.nginx 7ce9b3ee3a979db44f5e6d7daa69431e04a5281f364ae7be23e5a0a0547f96abc858d2a8010346be2fb99bd2355fb529e7030ed20d54f310249e61ed5db4d0ba freescout-manage.sh -0cba00b7d945ce84f72a2812d40028a073a5278856f610e46dbfe0ac78deff6bf5eba7643635fa4bc64d070c4d49eb47d24ea0a05ba1e6ea76690bfd77906366 rename-client-to-membre-fr-en.patch -2c651db6adac6d53597ba36965d0c65e005293f9b030e6be167853e4089384920524737aa947c5066877ee8caefb46741ccba797f653e7c2678556063540d261 fix-laravel-log-viewer.patch +3416da98d71aea5a7093913ea34e783e21ff05dca90bdc5ff3d00c548db5889f6d0ec98441cd65ab9f590be5cd59fdd0d7f1c98b5deef7bb3adbc8db435ec9bf rename-client-to-membre-fr-en.patch " diff --git a/ilot/freescout/fix-laravel-log-viewer.patch b/ilot/freescout/fix-laravel-log-viewer.patch deleted file mode 100644 index 8f29a36..0000000 --- a/ilot/freescout/fix-laravel-log-viewer.patch +++ /dev/null @@ -1,13 +0,0 @@ -diff --git a/vendor/composer/installed.json.orig b/vendor/composer/installed.json -index 0b826f5..9d14ec8 100644 ---- a/vendor/composer/installed.json.orig -+++ b/vendor/composer/installed.json -@@ -4494,7 +4494,7 @@ - "installation-source": "dist", - "autoload": { - "classmap": [ -- "src/controllers" -+ "src/" - ], - "psr-0": { - "Rap2hpoutre\\LaravelLogViewer\\": "src/" diff --git a/ilot/freescout/rename-client-to-membre-fr-en.patch b/ilot/freescout/rename-client-to-membre-fr-en.patch index 90e75b8..097e503 100644 --- a/ilot/freescout/rename-client-to-membre-fr-en.patch +++ b/ilot/freescout/rename-client-to-membre-fr-en.patch @@ -38,7 +38,7 @@ index 00000000..82d26052 +} \ No newline at end of file diff --git a/resources/lang/fr.json.orig b/resources/lang/fr.json -index 6264973..8a7037e 100644 +index ff8d9d4..98d158f 100644 --- a/resources/lang/fr.json.orig +++ b/resources/lang/fr.json @@ -26,8 +26,8 @@ @@ -201,8 +201,8 @@ index 6264973..8a7037e 100644 - "This number is not visible to customers. It is only used to track conversations within :app_name": "Ce numéro n'est pas visible pour les clients. Il est uniquement utilisé pour suivre les conversations dans :app_name", + "This number is not visible to customers. It is only used to track conversations within :app_name": "Ce numéro n'est pas visible pour les membres. Il est uniquement utilisé pour suivre les conversations dans :app_name", "This password is incorrect.": "Ce mot de passe est incorrect.", -- "This reply will go to the customer. :%switch_start%Switch to a note:%switch_end% if you are replying to :user_name.": "Cette réponse ira au client. :%switch_start%Passez à une note:%switch_end% si vous répondez à :user_name.", -+ "This reply will go to the customer. :%switch_start%Switch to a note:%switch_end% if you are replying to :user_name.": "Cette réponse ira au membre. :%switch_start%Passez à une note:%switch_end% si vous répondez à :user_name.", +- "This reply will go to the customer. :%switch_start%Switch to a note:switch_end if you are replying to :user_name.": "Cette réponse ira au client. :%switch_start%Passez à une note:switch_end si vous répondez à :user_name.", ++ "This reply will go to the customer. :%switch_start%Switch to a note:switch_end if you are replying to :user_name.": "Cette réponse ira au membre. :%switch_start%Passez à une note:switch_end si vous répondez à :user_name.", "This setting gives you control over what page loads after you perform an action (send a reply, add a note, change conversation status or assignee).": "Ce paramètre vous permet de contrôler la page qui se charge après avoir effectué une action (envoyer une réponse, ajouter une note, etc.).", - "This text will be added to the beginning of each email reply sent to a customer.": "Ce texte sera ajouté au début de chaque réponse par e-mail envoyée à un client.", + "This text will be added to the beginning of each email reply sent to a customer.": "Ce texte sera ajouté au début de chaque réponse par e-mail envoyée à un membre.", diff --git a/ilot/go/0001-cmd-link-prefer-musl-s-over-glibc-s-ld.so-during-dyn.patch b/ilot/go/0001-cmd-link-prefer-musl-s-over-glibc-s-ld.so-during-dyn.patch deleted file mode 100644 index 2cbbcd9..0000000 --- a/ilot/go/0001-cmd-link-prefer-musl-s-over-glibc-s-ld.so-during-dyn.patch +++ /dev/null @@ -1,45 +0,0 @@ -From fa8e52baedd21265f69b5f425157e11c8c4ec24a Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?S=C3=B6ren=20Tempel?= -Date: Sat, 25 Mar 2023 09:08:04 +0100 -Subject: [PATCH] cmd/link: prefer musl's over glibc's ld.so during dynamic - linking - -Without this commit glibc's is preferred over musl by default. This -causes issues on Alpine when a dynamically linked Go binary is created -while gcompat is installed, causing the binary to be linked against -the ld.so provided by the gcompat package. - -This commit changes the logic to check for musl's ld.so first, if it -does not exist we fallback to glibc. This default can be overwritten -using the `-I` option of cmd/link. - -See https://gitlab.alpinelinux.org/alpine/aports/-/issues/14737 ---- - src/cmd/link/internal/ld/elf.go | 12 ++++++------ - 1 file changed, 6 insertions(+), 6 deletions(-) - -diff --git a/src/cmd/link/internal/ld/elf.go b/src/cmd/link/internal/ld/elf.go -index 713f7739a5..8cf9377858 100644 ---- a/src/cmd/link/internal/ld/elf.go -+++ b/src/cmd/link/internal/ld/elf.go -@@ -1886,14 +1886,14 @@ func asmbElf(ctxt *Link) { - Exitf("ELF interpreter not set") - } - } else { -- interpreter = thearch.ELF.Linuxdynld -- // If interpreter does not exist, try musl instead. -+ interpreter = thearch.ELF.LinuxdynldMusl -+ // If interpreter does not exist, try glibc instead. - // This lets the same cmd/link binary work on -- // both glibc-based and musl-based systems. -+ // both musl-based and glibc-based systems. - if _, err := os.Stat(interpreter); err != nil { -- if musl := thearch.ELF.LinuxdynldMusl; musl != "" { -- if _, err := os.Stat(musl); err == nil { -- interpreter = musl -+ if glibc := thearch.ELF.Linuxdynld; glibc != "" { -+ if _, err := os.Stat(glibc); err == nil { -+ interpreter = glibc - } - } - } diff --git a/ilot/go/0002-go.env-Don-t-switch-Go-toolchain-version-as-directed.patch b/ilot/go/0002-go.env-Don-t-switch-Go-toolchain-version-as-directed.patch deleted file mode 100644 index db82330..0000000 --- a/ilot/go/0002-go.env-Don-t-switch-Go-toolchain-version-as-directed.patch +++ /dev/null @@ -1,29 +0,0 @@ -From 82ac7268f746c31d771e584c1c83f93890b33404 Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?S=C3=B6ren=20Tempel?= -Date: Tue, 11 Jul 2023 05:18:00 +0200 -Subject: [PATCH] go.env: Don't switch Go toolchain version as directed in - go.mod - -We want users and packages to use the version of Go that is provided -in our package repository. We don't want to download pre-built -toolchains from golang.org. - -Also note that prior to Go 1.21, pre-built Go binaries are linked -against glibc and hence do not work on Alpine. ---- - go.env | 5 ++--- - 1 file changed, 2 insertions(+), 3 deletions(-) - -diff --git a/go.env b/go.env -index 6ff2b921d4..a106fb4638 100644 ---- a/go.env -+++ b/go.env -@@ -7,6 +7,5 @@ - GOPROXY=https://proxy.golang.org,direct - GOSUMDB=sum.golang.org - --# Automatically download newer toolchains as directed by go.mod files. --# See https://go.dev/doc/toolchain for details. --GOTOOLCHAIN=auto -+# Don't attempt to switch to a newer toolchains by default. -+GOTOOLCHAIN=local diff --git a/ilot/go/0003-runtime-cleanup-M-vgetrandom-state-before-dropping-P.patch b/ilot/go/0003-runtime-cleanup-M-vgetrandom-state-before-dropping-P.patch deleted file mode 100644 index 2e02033..0000000 --- a/ilot/go/0003-runtime-cleanup-M-vgetrandom-state-before-dropping-P.patch +++ /dev/null @@ -1,245 +0,0 @@ -From 5c5b24702f5542fba019d6b98eec6121bc21df31 Mon Sep 17 00:00:00 2001 -From: Michael Pratt -Date: Thu, 3 Apr 2025 11:15:13 +0000 -Subject: [PATCH] runtime: cleanup M vgetrandom state before dropping P - -When an M is destroyed, we put its vgetrandom state back on the shared -list for another M to reuse. This list is simply a slice, so appending -to the slice may allocate. Currently this operation is performed in -mdestroy, after the P is released, meaning allocation is not allowed. - -More the cleanup earlier in mdestroy when allocation is still OK. - -Also add //go:nowritebarrierrec to mdestroy since it runs without a P, -which would have caught this bug. - -Fixes #73141. - -Change-Id: I6a6a636c3fbf5c6eec09d07a260e39dbb4d2db12 -Reviewed-on: https://go-review.googlesource.com/c/go/+/662455 -Reviewed-by: Jason Donenfeld -LUCI-TryBot-Result: Go LUCI -Reviewed-by: Keith Randall -Reviewed-by: Keith Randall ---- - src/runtime/os3_solaris.go | 5 ++++- - src/runtime/os_aix.go | 5 ++++- - src/runtime/os_darwin.go | 5 ++++- - src/runtime/os_dragonfly.go | 5 ++++- - src/runtime/os_linux.go | 9 ++++----- - src/runtime/os_netbsd.go | 5 ++++- - src/runtime/os_openbsd.go | 5 ++++- - src/runtime/os_plan9.go | 5 ++++- - src/runtime/os_windows.go | 4 +++- - src/runtime/proc.go | 3 +++ - src/runtime/vgetrandom_linux.go | 11 +++++++++-- - src/runtime/vgetrandom_unsupported.go | 2 +- - 12 files changed, 48 insertions(+), 16 deletions(-) - -diff --git a/src/runtime/os3_solaris.go b/src/runtime/os3_solaris.go -index cf163a6bf4..ded821b2e6 100644 ---- a/src/runtime/os3_solaris.go -+++ b/src/runtime/os3_solaris.go -@@ -234,8 +234,11 @@ func unminit() { - getg().m.procid = 0 - } - --// Called from exitm, but not from drop, to undo the effect of thread-owned -+// Called from mexit, but not from dropm, to undo the effect of thread-owned - // resources in minit, semacreate, or elsewhere. Do not take locks after calling this. -+// -+// This always runs without a P, so //go:nowritebarrierrec is required. -+//go:nowritebarrierrec - func mdestroy(mp *m) { - } - -diff --git a/src/runtime/os_aix.go b/src/runtime/os_aix.go -index 93464cb997..1b483c2a7e 100644 ---- a/src/runtime/os_aix.go -+++ b/src/runtime/os_aix.go -@@ -186,8 +186,11 @@ func unminit() { - getg().m.procid = 0 - } - --// Called from exitm, but not from drop, to undo the effect of thread-owned -+// Called from mexit, but not from dropm, to undo the effect of thread-owned - // resources in minit, semacreate, or elsewhere. Do not take locks after calling this. -+// -+// This always runs without a P, so //go:nowritebarrierrec is required. -+//go:nowritebarrierrec - func mdestroy(mp *m) { - } - -diff --git a/src/runtime/os_darwin.go b/src/runtime/os_darwin.go -index 0ecbea7ae4..6eab3b5c3d 100644 ---- a/src/runtime/os_darwin.go -+++ b/src/runtime/os_darwin.go -@@ -344,8 +344,11 @@ func unminit() { - getg().m.procid = 0 - } - --// Called from exitm, but not from drop, to undo the effect of thread-owned -+// Called from mexit, but not from dropm, to undo the effect of thread-owned - // resources in minit, semacreate, or elsewhere. Do not take locks after calling this. -+// -+// This always runs without a P, so //go:nowritebarrierrec is required. -+//go:nowritebarrierrec - func mdestroy(mp *m) { - } - -diff --git a/src/runtime/os_dragonfly.go b/src/runtime/os_dragonfly.go -index a02696eb4f..9b3235084d 100644 ---- a/src/runtime/os_dragonfly.go -+++ b/src/runtime/os_dragonfly.go -@@ -216,8 +216,11 @@ func unminit() { - getg().m.procid = 0 - } - --// Called from exitm, but not from drop, to undo the effect of thread-owned -+// Called from mexit, but not from dropm, to undo the effect of thread-owned - // resources in minit, semacreate, or elsewhere. Do not take locks after calling this. -+// -+// This always runs without a P, so //go:nowritebarrierrec is required. -+//go:nowritebarrierrec - func mdestroy(mp *m) { - } - -diff --git a/src/runtime/os_linux.go b/src/runtime/os_linux.go -index 8b3c4d0ecc..fb46b81682 100644 ---- a/src/runtime/os_linux.go -+++ b/src/runtime/os_linux.go -@@ -412,13 +412,12 @@ func unminit() { - getg().m.procid = 0 - } - --// Called from exitm, but not from drop, to undo the effect of thread-owned -+// Called from mexit, but not from dropm, to undo the effect of thread-owned - // resources in minit, semacreate, or elsewhere. Do not take locks after calling this. -+// -+// This always runs without a P, so //go:nowritebarrierrec is required. -+//go:nowritebarrierrec - func mdestroy(mp *m) { -- if mp.vgetrandomState != 0 { -- vgetrandomPutState(mp.vgetrandomState) -- mp.vgetrandomState = 0 -- } - } - - // #ifdef GOARCH_386 -diff --git a/src/runtime/os_netbsd.go b/src/runtime/os_netbsd.go -index 735ace25ad..a06e5febbd 100644 ---- a/src/runtime/os_netbsd.go -+++ b/src/runtime/os_netbsd.go -@@ -320,8 +320,11 @@ func unminit() { - // must continue working after unminit. - } - --// Called from exitm, but not from drop, to undo the effect of thread-owned -+// Called from mexit, but not from dropm, to undo the effect of thread-owned - // resources in minit, semacreate, or elsewhere. Do not take locks after calling this. -+// -+// This always runs without a P, so //go:nowritebarrierrec is required. -+//go:nowritebarrierrec - func mdestroy(mp *m) { - } - -diff --git a/src/runtime/os_openbsd.go b/src/runtime/os_openbsd.go -index 574bfa8b17..4ce4c3c58d 100644 ---- a/src/runtime/os_openbsd.go -+++ b/src/runtime/os_openbsd.go -@@ -182,8 +182,11 @@ func unminit() { - getg().m.procid = 0 - } - --// Called from exitm, but not from drop, to undo the effect of thread-owned -+// Called from mexit, but not from dropm, to undo the effect of thread-owned - // resources in minit, semacreate, or elsewhere. Do not take locks after calling this. -+// -+// This always runs without a P, so //go:nowritebarrierrec is required. -+//go:nowritebarrierrec - func mdestroy(mp *m) { - } - -diff --git a/src/runtime/os_plan9.go b/src/runtime/os_plan9.go -index 2dbb42ad03..3b5965ab99 100644 ---- a/src/runtime/os_plan9.go -+++ b/src/runtime/os_plan9.go -@@ -217,8 +217,11 @@ func minit() { - func unminit() { - } - --// Called from exitm, but not from drop, to undo the effect of thread-owned -+// Called from mexit, but not from dropm, to undo the effect of thread-owned - // resources in minit, semacreate, or elsewhere. Do not take locks after calling this. -+// -+// This always runs without a P, so //go:nowritebarrierrec is required. -+//go:nowritebarrierrec - func mdestroy(mp *m) { - } - -diff --git a/src/runtime/os_windows.go b/src/runtime/os_windows.go -index 7183e79f7d..54407a320c 100644 ---- a/src/runtime/os_windows.go -+++ b/src/runtime/os_windows.go -@@ -906,9 +906,11 @@ func unminit() { - mp.procid = 0 - } - --// Called from exitm, but not from drop, to undo the effect of thread-owned -+// Called from mexit, but not from dropm, to undo the effect of thread-owned - // resources in minit, semacreate, or elsewhere. Do not take locks after calling this. - // -+// This always runs without a P, so //go:nowritebarrierrec is required. -+//go:nowritebarrierrec - //go:nosplit - func mdestroy(mp *m) { - if mp.highResTimer != 0 { -diff --git a/src/runtime/proc.go b/src/runtime/proc.go -index e9873e54cd..21bee4df71 100644 ---- a/src/runtime/proc.go -+++ b/src/runtime/proc.go -@@ -1935,6 +1935,9 @@ func mexit(osStack bool) { - mp.gsignal = nil - } - -+ // Free vgetrandom state. -+ vgetrandomDestroy(mp) -+ - // Remove m from allm. - lock(&sched.lock) - for pprev := &allm; *pprev != nil; pprev = &(*pprev).alllink { -diff --git a/src/runtime/vgetrandom_linux.go b/src/runtime/vgetrandom_linux.go -index a6ec4b701c..40be022f24 100644 ---- a/src/runtime/vgetrandom_linux.go -+++ b/src/runtime/vgetrandom_linux.go -@@ -73,9 +73,16 @@ func vgetrandomGetState() uintptr { - return state - } - --func vgetrandomPutState(state uintptr) { -+// Free vgetrandom state from the M (if any) prior to destroying the M. -+// -+// This may allocate, so it must have a P. -+func vgetrandomDestroy(mp *m) { -+ if mp.vgetrandomState == 0 { -+ return -+ } -+ - lock(&vgetrandomAlloc.statesLock) -- vgetrandomAlloc.states = append(vgetrandomAlloc.states, state) -+ vgetrandomAlloc.states = append(vgetrandomAlloc.states, mp.vgetrandomState) - unlock(&vgetrandomAlloc.statesLock) - } - -diff --git a/src/runtime/vgetrandom_unsupported.go b/src/runtime/vgetrandom_unsupported.go -index 070392cfaa..43c53e1198 100644 ---- a/src/runtime/vgetrandom_unsupported.go -+++ b/src/runtime/vgetrandom_unsupported.go -@@ -13,6 +13,6 @@ func vgetrandom(p []byte, flags uint32) (ret int, supported bool) { - return -1, false - } - --func vgetrandomPutState(state uintptr) {} -+func vgetrandomDestroy(mp *m) {} - - func vgetrandomInit() {} diff --git a/ilot/go/APKBUILD b/ilot/go/APKBUILD deleted file mode 100644 index 82a98ba..0000000 --- a/ilot/go/APKBUILD +++ /dev/null @@ -1,318 +0,0 @@ -# Contributor: Sören Tempel -# Contributor: Eivind Uggedal -# Maintainer: Sören Tempel -pkgname=go -# go binaries are statically linked, security updates require rebuilds -pkgver=1.24.2 -pkgrel=1 -pkgdesc="Go programming language compiler" -url="https://go.dev/" -arch="all" -license="BSD-3-Clause" -depends="binutils gcc musl-dev" -makedepends="bash" -options="!check" -checkdepends="binutils-gold git git-daemon" -subpackages="$pkgname-doc" -source="https://go.dev/dl/go$pkgver.src.tar.gz - 0001-cmd-link-prefer-musl-s-over-glibc-s-ld.so-during-dyn.patch - 0002-go.env-Don-t-switch-Go-toolchain-version-as-directed.patch - 0003-runtime-cleanup-M-vgetrandom-state-before-dropping-P.patch - tests-fchmodat-not-supported.patch - " -case "$CARCH" in - arm*|aarch64) depends="$depends binutils-gold";; - riscv64|loongarch64) - # binutils-gold is not supported on riscv64 and loongarch64. - checkdepends="${checkdepends/binutils-gold/}" - ;; -esac - -# secfixes: -# 0: -# - CVE-2022-41716 -# - CVE-2022-41720 -# - CVE-2022-41722 -# - CVE-2024-24787 -# 1.24.2-r0: -# - CVE-2025-22871 -# 1.24.1-r0: -# - CVE-2025-22870 -# 1.23.6-r0: -# - CVE-2025-22866 -# 1.23.5-r0: -# - CVE-2024-45336 -# - CVE-2024-45341 -# 1.23.1-r0: -# - CVE-2024-34155 -# - CVE-2024-34156 -# - CVE-2024-34158 -# 1.22.5-r0: -# - CVE-2024-24791 -# 1.22.4-r0: -# - CVE-2024-24789 -# - CVE-2024-24790 -# 1.22.3-r0: -# - CVE-2024-24788 -# 1.22.2-r0: -# - CVE-2023-45288 -# 1.22.1-r0: -# - CVE-2024-24783 -# - CVE-2023-45290 -# - CVE-2023-45289 -# - CVE-2024-24785 -# - CVE-2024-24784 -# 1.21.5-r0: -# - CVE-2023-39324 -# - CVE-2023-39326 -# 1.21.3-r0: -# - CVE-2023-39325 -# - CVE-2023-44487 -# 1.21.2-r0: -# - CVE-2023-39323 -# 1.21.1-r0: -# - CVE-2023-39318 -# - CVE-2023-39319 -# - CVE-2023-39320 -# - CVE-2023-39321 -# - CVE-2023-39322 -# 1.20.7-r0: -# - CVE-2023-29409 -# 1.20.6-r0: -# - CVE-2023-29406 -# 1.20.5-r0: -# - CVE-2023-29402 -# - CVE-2023-29403 -# - CVE-2023-29404 -# - CVE-2023-29405 -# 1.20.4-r0: -# - CVE-2023-24539 -# - CVE-2023-24540 -# - CVE-2023-29400 -# 1.20.3-r0: -# - CVE-2023-24537 -# - CVE-2023-24538 -# - CVE-2023-24534 -# - CVE-2023-24536 -# 1.20.2-r0: -# - CVE-2023-24532 -# 1.20.1-r0: -# - CVE-2022-41725 -# - CVE-2022-41724 -# - CVE-2022-41723 -# 1.19.4-r0: -# - CVE-2022-41717 -# 1.19.2-r0: -# - CVE-2022-2879 -# - CVE-2022-2880 -# - CVE-2022-41715 -# 1.19.1-r0: -# - CVE-2022-27664 -# - CVE-2022-32190 -# 1.18.5-r0: -# - CVE-2022-32189 -# 1.18.4-r0: -# - CVE-2022-1705 -# - CVE-2022-1962 -# - CVE-2022-28131 -# - CVE-2022-30630 -# - CVE-2022-30631 -# - CVE-2022-30632 -# - CVE-2022-30633 -# - CVE-2022-30635 -# - CVE-2022-32148 -# 1.18.1-r0: -# - CVE-2022-28327 -# - CVE-2022-27536 -# - CVE-2022-24675 -# 1.17.8-r0: -# - CVE-2022-24921 -# 1.17.7-r0: -# - CVE-2022-23772 -# - CVE-2022-23773 -# - CVE-2022-23806 -# 1.17.6-r0: -# - CVE-2021-44716 -# - CVE-2021-44717 -# 1.17.3-r0: -# - CVE-2021-41772 -# - CVE-2021-41771 -# 1.17.2-r0: -# - CVE-2021-38297 -# 1.17.1-r0: -# - CVE-2021-39293 -# 1.17-r0: -# - CVE-2020-29509 -# - CVE-2020-29511 -# - CVE-2021-29923 -# 1.16.7-r0: -# - CVE-2021-36221 -# 1.16.6-r0: -# - CVE-2021-34558 -# 1.16.5-r0: -# - CVE-2021-33195 -# - CVE-2021-33196 -# - CVE-2021-33197 -# - CVE-2021-33198 -# 1.16.4-r0: -# - CVE-2021-31525 -# 1.16.2-r0: -# - CVE-2021-27918 -# - CVE-2021-27919 -# 1.15.7-r0: -# - CVE-2021-3114 -# - CVE-2021-3115 -# 1.15.5-r0: -# - CVE-2020-28362 -# - CVE-2020-28366 -# - CVE-2020-28367 -# 1.15.2-r0: -# - CVE-2020-24553 -# 1.15-r0: -# - CVE-2020-16845 -# 1.14.5-r0: -# - CVE-2020-15586 -# 1.13.7-r0: -# - CVE-2020-7919 -# 1.13.2-r0: -# - CVE-2019-17596 -# 1.13.1-r0: -# - CVE-2019-16276 -# 1.12.8-r0: -# - CVE-2019-9512 -# - CVE-2019-9514 -# - CVE-2019-14809 -# 1.11.5-r0: -# - CVE-2019-6486 -# 1.9.4-r0: -# - CVE-2018-6574 - -if [ "$CBUILD" = "$CTARGET" ]; then - makedepends="go-bootstrap $makedepends" - provides="go-bootstrap=$pkgver-r$pkgrel" -else - pkgname="go-bootstrap" - makedepends="go $makedepends" - # Go expect host linker instead of the cross-compiler - export CC_FOR_TARGET="$CC" - export CC="${HOSTLD:-gcc}" - export CXX="${HOSTLD:-g++}" - export LD="${HOSTLD:-ld}" -fi - -case "$CTARGET_ARCH" in -aarch64) export GOARCH="arm64" ;; -armel) export GOARCH="arm" GOARM=5 ;; -armhf) export GOARCH="arm" GOARM=6 ;; -armv7) export GOARCH="arm" GOARM=7 ;; -s390x) export GOARCH="s390x" ;; -x86) export GOARCH="386" ;; -x86_64) export GOARCH="amd64" ;; -ppc64) export GOARCH="ppc64" ;; -ppc64le) export GOARCH="ppc64le" ;; -riscv64) export GOARCH="riscv64" ;; -loongarch64) export GOARCH="loong64" ;; -*) export GOARCH="unsupported";; -esac - -# compile go itself as a PIE on supported arches. -case "$CARCH" in -x86_64|s390x|aarch64) export GO_LDFLAGS=-buildmode=pie ;; -esac - -prepare() { - default_prepare - - # The GitLab CI builds aports in a container. On ppc64le, ASLR - # needs to be disabled in order to have the following test case - # pass. However, the container doesn't have permissions to - # disable ASLR, hence we just disable this test for now. - # - # See https://github.com/golang/go/issues/49066#issuecomment-1252948861 - if [ "$CTARGET_ARCH" = "ppc64le" ]; then - rm test/fixedbugs/bug513.go - fi -} - -builddir="$srcdir"/go -build() { - cd "$builddir/src" - - export GOOS="linux" - export GOPATH="$srcdir" - export GOROOT="$builddir" - export GOBIN="$GOROOT"/bin - export GOROOT_FINAL=/usr/lib/go - - local p; for p in /usr/lib/go-bootstrap /usr/lib/go-linux-$GOARCH-bootstrap /usr/lib/go; do - if [ -d "$p" ]; then - export GOROOT_BOOTSTRAP="$p" - break - fi - done - - ./make.bash -v - - # copied from bootstrap.bash to fixup cross-built bootstrap go - if [ "$CBUILD" != "$CTARGET" ]; then - local gohostos="$(../bin/go env GOHOSTOS)" - local gohostarch="$(../bin/go env GOHOSTARCH)" - mv ../bin/*_*/* ../bin - rmdir ../bin/*_* - rm -rf "../pkg/${gohostos}_$gohostarch"* "../pkg/tool/${gohostos}_$gohostarch"* - rm -rf ../pkg/bootstrap ../pkg/obj - fi -} - -check() { - cd "$builddir/src" - if [ "$CTARGET_ARCH" = "armhf" ]; then - export GO_TEST_TIMEOUT_SCALE=2 - fi - - # Test suite does not pass with ccache, thus remove it form $PATH. - export PATH="$(echo "$PATH" | sed 's|/usr/lib/ccache/bin:||g')" - - PATH="$builddir/bin:$PATH" ./run.bash -no-rebuild -} - -package() { - mkdir -p "$pkgdir"/usr/bin "$pkgdir"/usr/lib/go/bin "$pkgdir"/usr/share/doc/go - - for binary in go gofmt; do - install -Dm755 bin/"$binary" "$pkgdir"/usr/lib/go/bin/"$binary" - ln -s /usr/lib/go/bin/"$binary" "$pkgdir"/usr/bin/ - done - - cp -a misc pkg src lib "$pkgdir"/usr/lib/go - cp -r doc "$pkgdir"/usr/share/doc/go - rm -rf "$pkgdir"/usr/lib/go/pkg/obj - rm -rf "$pkgdir"/usr/lib/go/pkg/bootstrap - rm -f "$pkgdir"/usr/lib/go/pkg/tool/*/api - - # Install go.env, see https://go.dev/doc/toolchain#GOTOOLCHAIN. - install -Dm644 "$builddir"/go.env "$pkgdir"/usr/lib/go/go.env - install -Dm644 VERSION "$pkgdir/usr/lib/go/VERSION" - - # Remove tests from /usr/lib/go/src to reduce package size, - # these should not be needed at run-time by any program. - find "$pkgdir"/usr/lib/go/src \( -type f -a -name "*_test.go" \) \ - -exec rm -rf \{\} \+ - find "$pkgdir"/usr/lib/go/src \( -type d -a -name "testdata" \) \ - -exec rm -rf \{\} \+ - - # Remove rc (plan 9) and bat scripts (windows) to reduce package - # size further. The bash scripts are actually needed at run-time. - # - # See: https://gitlab.alpinelinux.org/alpine/aports/issues/11091 - find "$pkgdir"/usr/lib/go/src -type f -a \( -name "*.rc" -o -name "*.bat" \) \ - -exec rm -rf \{\} \+ -} - -sha512sums=" -6366a32f6678e7908b138f62dafeed96f7144b3b93505e75fba374b33727da8b1d087c1f979f493382b319758ebfcbeb30e9d7dadcb2923b628c8abe7db41c6f go1.24.2.src.tar.gz -34dbe032c5f08dd8a7aad36fc4d54e746a876fdadc25466888a2f04f5a9d53103190ebd68d3cf978d3a041976185e30ffb25611fb577d031c159810d2d4c7c41 0001-cmd-link-prefer-musl-s-over-glibc-s-ld.so-during-dyn.patch -8061e4ef9d7dd31804bd8d98c95afa5dd82567940b3436f45f874e0419e324b49713d8a814df04617e575ec3c6155199c4661352ea8aef63ead81ca3020f3dc4 0002-go.env-Don-t-switch-Go-toolchain-version-as-directed.patch -d56b796ac81f8901cf426711e381b386ec6e039090fd914ebb2246e5b2ccaa6c1dcb40810a886c5e1b0a748c9bcd4cfe9749d85da91e7ce4c11aaf470295e549 0003-runtime-cleanup-M-vgetrandom-state-before-dropping-P.patch -33ecefca77fa0af52a3b2b66a76977af27a88c8dddb89f03e0a5ae6794b9aac53a62d7be33020b49022e9a89d4cdfa383038ee10e160eb94548b2430bf3cfb5e tests-fchmodat-not-supported.patch -" diff --git a/ilot/go/tests-fchmodat-not-supported.patch b/ilot/go/tests-fchmodat-not-supported.patch deleted file mode 100644 index 168ca71..0000000 --- a/ilot/go/tests-fchmodat-not-supported.patch +++ /dev/null @@ -1,19 +0,0 @@ -Without this patch, the TestFchmodat fails on our arm CI with: - - syscall_linux_test.go:139: Fchmodat: unexpected error: operation not permitted, expected EOPNOTSUPP - -The "operation not permitted" means that EPERM was returned which -is likely due to the security policy of our CI container. - -diff -upr go.orig/src/syscall/syscall_linux_test.go go/src/syscall/syscall_linux_test.go ---- go.orig/src/syscall/syscall_linux_test.go 2024-02-07 22:54:39.316022227 +0100 -+++ go/src/syscall/syscall_linux_test.go 2024-02-07 22:56:05.104871102 +0100 -@@ -135,7 +135,7 @@ func TestFchmodat(t *testing.T) { - } - - err = syscall.Fchmodat(_AT_FDCWD, "symlink1", 0444, _AT_SYMLINK_NOFOLLOW) -- if err != syscall.EOPNOTSUPP { -+ if !testenv.SyscallIsNotSupported(err) && err != syscall.EOPNOTSUPP { - t.Fatalf("Fchmodat: unexpected error: %v, expected EOPNOTSUPP", err) - } - } diff --git a/ilot/listmonk/APKBUILD b/ilot/listmonk/APKBUILD index 704084b..2d65fa6 100644 --- a/ilot/listmonk/APKBUILD +++ b/ilot/listmonk/APKBUILD @@ -1,7 +1,7 @@ # Contributor: Antoine Martin (ayakael) # Maintainer: Antoine Martin (ayakael) pkgname=listmonk -pkgver=5.0.0 +pkgver=4.0.1 pkgrel=0 pkgdesc='Self-hosted newsletter and mailing list manager with a modern dashboard' arch="all" @@ -67,7 +67,7 @@ package() { ln -s /etc/listmonk/config.toml "$pkgdir"/usr/share/webapps/listmonk/config.toml } sha512sums=" -b0875124106ac737550eb340c209f079698c0b9e1f1e55c70eca113720dbc9dcfaac63aa65722299a1448a582cedf0f9ee20b24ea0625d4e780d83e0d6bab198 listmonk-5.0.0.tar.gz +ae5c338b756bb9d84739ab8b04b591e33ee7f6a579725083ec95e2609dbff55adbd1f2a11c0487b971aa030a3a35347cf54966820e1320b4144351935b2497d8 listmonk-4.0.1.tar.gz 939450af4b23708e3d23a5a88fad4c24b957090bdd21351a6dd520959e52e45e5fcac117a3eafa280d9506616dae39ad3943589571f008cac5abe1ffd8062424 listmonk.sh 8e9c0b1f335c295fb741418246eb17c7566e5e4200a284c6483433e8ddbf5250aa692435211cf062ad1dfcdce3fae9148def28f03f2492d33fe5e66cbeebd4bd listmonk.openrc " diff --git a/ilot/nextcloud30/APKBUILD b/ilot/nextcloud30/APKBUILD deleted file mode 100644 index 161f723..0000000 --- a/ilot/nextcloud30/APKBUILD +++ /dev/null @@ -1,325 +0,0 @@ -# Contributor: Jakub Jirutka -# Contributor: jahway603 -# Maintainer: Leonardo Arena -_pkgname=nextcloud -pkgver=30.0.10 -pkgrel=0 -is_latest=true -_pkgvermaj=${pkgver%%.*} -pkgname=nextcloud$_pkgvermaj -_replaced_ver=$(( _pkgvermaj - 1 )) -pkgdesc="A safe home for all your data" -url="https://nextcloud.com/" -arch="noarch" -license="AGPL-3.0-only" -_php=php83 -_php_mods="-bcmath -ctype -curl -dom -gd -fileinfo -gmp -iconv -intl - -mbstring -opcache -openssl -pcntl -posix -session - -simplexml -xml -xmlreader -xmlwriter -zip" -depends="ca-certificates $_php ${_php_mods//-/$_php-}" -makedepends="xmlstarlet" -$is_latest && provides="$_pkgname=$pkgver-r$pkgrel - $_pkgname-accessibility=$pkgver-r$pkgrel - $pkgname-accessibility=$pkgver-r$pkgrel - $_pkgname-bruteforcesettings=$pkgver-r$pkgrel - $pkgname-bruteforcesettings=$pkgver-r$pkgrel - $_pkgname-contactsinteraction=$pkgver-r$pkgrel - $pkgname-contactsinteraction=$pkgver-r$pkgrel - $_pkgname-cloud_federation_api=$pkgver-r$pkgrel - $pkgname-cloud_federation_api=$pkgver-r$pkgrel - $_pkgname-dav=$pkgver-r$pkgrel - $pkgname-dav=$pkgver-r$pkgrel - $_pkgname-files=$pkgver-r$pkgrel - $pkgname-files=$pkgver-r$pkgrel - $_pkgname-files_videoplayer=$pkgver-r$pkgrel - $pkgname-files_videoplayer=$pkgver-r$pkgrel - $_pkgname-federatedfilesharing=$pkgver-r$pkgrel - $pkgname-federatedfilesharing=$pkgver-r$pkgrel - $_pkgname-lookup_server_connector=$pkgver-r$pkgrel - $pkgname-lookup_server_connector=$pkgver-r$pkgrel - $_pkgname-oauth2=$pkgver-r$pkgrel - $pkgname-oauth2=$pkgver-r$pkgrel - $_pkgname-provisioning_api=$pkgver-r$pkgrel - $pkgname-provisioning_api=$pkgver-r$pkgrel - $_pkgname-related_resources=$pkgver-r$pkgrel - $pkgname-related_resources=$pkgver-r$pkgrel - $_pkgname-settings=$pkgver-r$pkgrel - $pkgname-settings=$pkgver-r$pkgrel - $_pkgname-theming=$pkgver-r$pkgrel - $pkgname-theming=$pkgver-r$pkgrel - $_pkgname-twofactor_backupcodes=$pkgver-r$pkgrel - $pkgname-twofactor_backupcodes=$pkgver-r$pkgrel - $_pkgname-twofactor_nextcloud_notification=$pkgver-r$pkgrel - $pkgname-twofactor_nextcloud_notification=$pkgver-r$pkgrel - $_pkgname-twofactor_totp=$pkgver-r$pkgrel - $pkgname-twofactor_totp=$pkgver-r$pkgrel - $_pkgname-viewer=$pkgver-r$pkgrel - $pkgname-viewer=$pkgver-r$pkgrel - $_pkgname-workflowengine=$pkgver-r$pkgrel - $pkgname-workflowengine=$pkgver-r$pkgrel - " || provides="$pkgname-accessibility=$pkgver-r$pkgrel - $pkgname-bruteforcesettings=$pkgver-r$pkgrel - $pkgname-contactsinteraction=$pkgver-r$pkgrel - $pkgname-cloud_federation_api=$pkgver-r$pkgrel - $pkgname-dav=$pkgver-r$pkgrel - $pkgname-files=$pkgver-r$pkgrel - $pkgname-files_videoplayer=$pkgver-r$pkgrel - $pkgname-federatedfilesharing=$pkgver-r$pkgrel - $pkgname-lookup_server_connector=$pkgver-r$pkgrel - $pkgname-oauth2=$pkgver-r$pkgrel - $pkgname-provisioning_api=$pkgver-r$pkgrel - $pkgname-related_resources=$pkgver-r$pkgrel - $pkgname-settings=$pkgver-r$pkgrel - $pkgname-theming=$pkgver-r$pkgrel - $pkgname-twofactor_backupcodes=$pkgver-r$pkgrel - $pkgname-twofactor_nextcloud_notification=$pkgver-r$pkgrel - $pkgname-twofactor_totp=$pkgver-r$pkgrel - $pkgname-viewer=$pkgver-r$pkgrel - $pkgname-workflowengine=$pkgver-r$pkgrel - " -replaces="nextcloud$_replaced_ver" -install="$pkgname.pre-install $pkgname.pre-upgrade $pkgname.post-upgrade $pkgname.post-install - $pkgname-initscript.post-install" -subpackages="$pkgname-doc $pkgname-initscript $pkgname-mysql $pkgname-pgsql $pkgname-sqlite - $pkgname-default-apps:_default_apps $pkgname-occ" -source="https://download.nextcloud.com/server/releases/nextcloud-$pkgver.tar.bz2 - nextcloud-dont-chmod.patch - dont-update-htaccess.patch - disable-integrity-check-as-default.patch - use-external-docs-if-local-not-avail.patch - - $_pkgname-config.php - $_pkgname.logrotate - $_pkgname.confd - $_pkgname.cron - $_pkgname-mysql.cnf - fpm-pool.conf - occ - " -options="!check" -pkgusers="nextcloud" -pkggroups="www-data" -builddir="$srcdir"/$_pkgname - -# List of bundled apps to separate into subpackages. Keep it in sync! -# Note: Don't add "bruteforcesettings", "contactsinteraction", -# "cloud_federation_api", "dav", "files", -# "federatedfilesharing", "lookup_server_connector", "provisioning_api", -# "oauth2", "settings", "twofactor_backupcodes", "twofactor_totp", -# "twofactor_nextcloud_notification", "theming", "viewer", -# "workflowengine", "related_resources" -# here, these should be always installed. -_apps="activity - admin_audit - circles - comments - dashboard - encryption - federation - files_downloadlimit - files_external - files_pdfviewer - files_reminders - files_sharing - files_trashbin - files_versions - firstrunwizard - logreader - nextcloud_announcements - notifications - password_policy - photos - privacy - recommendations - serverinfo - support - sharebymail - survey_client - suspicious_login - systemtags - text - user_ldap - user_status - weather_status - webhook_listeners - " -for _i in $_apps; do - subpackages="$subpackages $pkgname-$_i:_package_app" -done - -# Directory for apps shipped with Nextcloud. -_appsdir="usr/share/webapps/$_pkgname/apps" - -package() { - local basedir="var/lib/$_pkgname" - local datadir="$basedir/data" - local wwwdir="usr/share/webapps/$_pkgname" - local confdir="etc/$_pkgname" - - mkdir -p "$pkgdir" - cd "$pkgdir" - - mkdir -p ./${wwwdir%/*} - cp -a "$builddir" ./$wwwdir - - chmod +x ./$wwwdir/occ - chmod 664 ./$wwwdir/.htaccess \ - ./$wwwdir/.user.ini - - # Let's not ship upstream's 'updatenotification' app and updater, which - # has zero chance of working and a big chance of blowing things up. - rm -r ./$wwwdir/apps/updatenotification \ - ./$wwwdir/lib/private/Updater/VersionCheck.php - - # Replace bundled CA bundle with ours. - ln -sf /etc/ssl/certs/ca-certificates.crt ./$wwwdir/resources/config/ca-bundle.crt - - install -d -m 770 -o nextcloud -g www-data \ - ./$confdir ./$datadir ./$basedir/apps - install -d -m 775 -o nextcloud -g www-data \ - ./var/log/$_pkgname - - # Create symlink from web root to site-apps, so web server can find - # assets w/o explicit configuration for this layout. - ln -s /$basedir/apps ./$wwwdir/apps-appstore - - mv ./$wwwdir/config/* ./$confdir/ - rm -r ./$wwwdir/config - ln -s /$confdir ./$wwwdir/config - - mkdir -p ./usr/share/doc/$pkgname - mv ./$wwwdir/core/doc ./usr/share/doc/$pkgname/core - - install -m 660 -o nextcloud -g www-data \ - "$srcdir"/$_pkgname-config.php ./$confdir/config.php - - install -m 644 -D "$srcdir"/$_pkgname.logrotate ./etc/logrotate.d/$_pkgname - - install -m 755 -D "$srcdir"/occ ./usr/bin/occ - - # Clean some unnecessary files. - find . -name .gitignore -delete \ - -o -name .bower.json -delete \ - -o -name 'README*' -delete \ - -o -name 'CHANGELOG*' -delete \ - -o -name 'CONTRIBUTING*' -delete - find . -name .github -type d -prune -exec rm -r {} \; -} - -doc() { - replaces="nextcloud$_replaced_ver-doc" - $is_latest && provides="$_pkgname-doc=$pkgver-r$pkgrel" - default_doc - - local target="$subpkgdir"/usr/share/webapps/$_pkgname/core/doc - mkdir -p "${target%/*}" - ln -s ../../../doc/$pkgname/core "$target" - install -m644 README.alpine "$subpkgdir"/usr/share/webapps/$_pkgname/README.alpine -} - -initscript() { - pkgdesc="Init script that runs Nextcloud with php-fpm" - depends="$pkgname $_php-fpm" - replaces="nextcloud$_replaced_ver-initscript" - $is_latest && provides="$_pkgname-initscript=$pkgver-r$pkgrel" - - local confdir="$subpkgdir/etc/$_php/php-fpm.d" - local fpm_name="php-fpm${_php#php}" - - install -m 644 -D "$srcdir"/fpm-pool.conf "$confdir"/$_pkgname.conf - install -m 644 -D "$srcdir"/$_pkgname.confd "$subpkgdir"/etc/conf.d/$_pkgname - install -m 755 -D "$srcdir"/$_pkgname.cron "$subpkgdir"/etc/periodic/15min/$_pkgname - - mkdir -p "$subpkgdir"/etc/init.d - ln -s $fpm_name "$subpkgdir"/etc/init.d/$_pkgname -} - -pgsql() { - pkgdesc="Nextcloud PostgreSQL support" - depends="$pkgname $_php-pgsql $_php-pdo_pgsql" - replaces="nextcloud$_replaced_ver-pgsql" - $is_latest && provides="$_pkgname-pgsql=$pkgver-r$pkgrel" - mkdir -p "$subpkgdir" -} - -sqlite() { - pkgdesc="Nextcloud SQLite support" - depends="$pkgname $_php-sqlite3 $_php-pdo_sqlite" - replaces="nextcloud$_replaced_ver-sqlite" - $is_latest && provides="$_pkgname-sqlite=$pkgver-r$pkgrel" - mkdir -p "$subpkgdir" -} - -mysql() { - pkgdesc="Nextcloud MySQL support" - depends="$pkgname $_php-pdo_mysql" - replaces="nextcloud$_replaced_ver-mysql" - $is_latest && provides="$_pkgname-mysql=$pkgver-r$pkgrel" - - mkdir -p "$subpkgdir" - install -m 644 -D "$srcdir"/$_pkgname-mysql.cnf "$subpkgdir"/etc/my.cnf.d/$_pkgname.cnf -} - -occ() { - pkgdesc="Nextcloud OCC cmd" - replaces="nextcloud$_replaced_ver-occ" - $is_latest && provides="$_pkgname-occ=$pkgver-r$pkgrel" - mkdir -p "$subpkgdir/usr/share/webapps/$_pkgname" - amove "usr/share/webapps/$_pkgname/occ" - amove "usr/bin/occ" -} - -_default_apps() { - pkgdesc="Nextcloud default apps" - depends="$pkgname" - replaces="nextcloud$_replaced_ver-default-apps" - $is_latest && provides="$_pkgname-default-apps=$pkgver-r$pkgrel" - - local path; for path in "$pkgdir"/"$_appsdir"/*; do - if grep -q '' "$path"/appinfo/info.xml; then - depends="$depends $pkgname-${path##*/}" - fi - done - - mkdir -p "$subpkgdir" -} - -_package_app() { - local appname="${subpkgname#"$pkgname"-}" - local appinfo="$pkgdir/$_appsdir/$appname/appinfo/info.xml" - - local name=$(xmlstarlet sel -t -v 'info/name/text()' "$appinfo") - pkgdesc="Nextcloud ${name:-$appname} app" - replaces="nextcloud$_replaced_ver-$appname" - $is_latest && provides="$_pkgname-$appname=$pkgver-r$pkgrel" - - local php_deps=$(xmlstarlet sel -t -v 'info/dependencies/lib/text()' "$appinfo" \ - | xargs -r -n1 printf "$_php-%s\n") - local app_deps="" - - case "$appname" in - files_sharing) app_deps="-federatedfilesharing" - ;; - serverinfo) app_deps="-files_sharing" - esac - - depends="$pkgname $php_deps ${app_deps//-/$pkgname-}" - - mkdir -p "$subpkgdir"/$_appsdir - mv "$pkgdir"/$_appsdir/$appname "$subpkgdir"/$_appsdir/ -} - -sha512sums=" -c8c9800fff46c5634576b9e0696afd4083e34d24000762ebf3a66192d1dea3f664d1c1d42e6ae262535757991d0a60ee7ee1e1d24757677be56bb8ea7d4d3fd5 nextcloud-30.0.10.tar.bz2 -daeabeaa315bb908cc1e49612cce4b2debd71d17acb84b5d14e15fe124c907884b72d54e9aa669ec209eee1b1934d0bc242d72a28d8db7339cfb08383f66fd5c nextcloud-dont-chmod.patch -12f4a39aef0f81a0115c81bf2b345cc194537a7e8300748b800b0e35bc07928091296074b23c2019c17aced69854a11d1ed7225f67eefd27cf00c3969a75c5b0 dont-update-htaccess.patch -cb04252d01407c7030e87dd54616c621ea0f85ef0212674b1161288182538cae0fb31c67e7cc07c66f9607075774c64e386009cc66365b1f1b155f6ad4f83ac0 disable-integrity-check-as-default.patch -c0a9b7c31c8beaca711f8e97d98441007b3dca7fb3d316d2eacd28a73b5233def6f846c02d98202f75efb9cb248b8787a80e20b07c32d1c7534a0e54bb20feab use-external-docs-if-local-not-avail.patch -5f73cd9399fa484ef15bd47e803c93381deffbc7699eceadbb5c27e43b20156806d74e5021a64d28f0165ef87b519e962780651711a37bceb9f0b04455dfdce1 nextcloud-config.php -7388458a9e8b7afd3d3269718306410ffa59c3c23da4bef367a4d7f6d2570136fae9dd421b19c1441e7ffb15a5405e18bb5da67b1a15f9f45e8b98d3fda532ba nextcloud.logrotate -dcc57735d7d4af4a7ebbdd1186d301e51d2ae4675022aea6bf1111222dfa188a3a490ebd6e7c8a7ac30046cb7d93f81cec72a51acbc60d0c10b7fb64630c637a nextcloud.confd -06a62deae219d09df7acbf38ccb1dcac691dd882459ef70243b5583d7ed21d1ea81dbf0751b4e7199c0de9878755a3882e139d9ccc280bf2e90cbe33fb565487 nextcloud.cron -b9ad5434c384c96608f00d65c45f782e279c6756da8fb706f62ecaf7b7aa420077cb6989da5e85becc47418884ec0672e7db874174454ca679fdca84a50f537f nextcloud-mysql.cnf -78ef204ee7c12b228c0b7b04333514e561c1c8e19153f5507224fa4fdd112aaaa6331747014f3b72181298f52ecd4223bcff4bd963b49b49153265254b07e79b fpm-pool.conf -be54ad9308c8250ff3aef3514b10b228487fc2fbdefa1d28dbbb18a4770f7d9fda90e80c722de8e3c25ce752d124ff79314f16f783b1e5ad67df4e1fe6e880f9 occ -" diff --git a/ilot/nextcloud30/README.alpine b/ilot/nextcloud30/README.alpine deleted file mode 100644 index 229ab60..0000000 --- a/ilot/nextcloud30/README.alpine +++ /dev/null @@ -1,5 +0,0 @@ -## nextcloud-serverinfo package - -If you are using the provided nextcloud php-fpm configuration, -nextcloud-serverinfo package requires to enable 'shell_exec' function -in php configuration file 'nextcloud.conf'. diff --git a/ilot/nextcloud30/disable-integrity-check-as-default.patch b/ilot/nextcloud30/disable-integrity-check-as-default.patch deleted file mode 100644 index f6a3a11..0000000 --- a/ilot/nextcloud30/disable-integrity-check-as-default.patch +++ /dev/null @@ -1,23 +0,0 @@ -We patch some files and Nextcloud's integrity check doesn't like it... -APK ensures integrity of all installed files, so this Nextcloud's integrity -check doesn't add any value. ---- - lib/private/IntegrityCheck/Checker.php | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/lib/private/IntegrityCheck/Checker.php b/lib/private/IntegrityCheck/Checker.php -index e8fd087e..cfbaeb7d 100644 ---- a/lib/private/IntegrityCheck/Checker.php -+++ b/lib/private/IntegrityCheck/Checker.php -@@ -91,7 +91,7 @@ class Checker { - * applicable for very specific scenarios and we should not advertise it - * too prominent. So please do not add it to config.sample.php. - */ -- return !($this->config?->getSystemValueBool('integrity.check.disabled', false) ?? false); -+ return !($this->config?->getSystemValueBool('integrity.check.disabled', true) ?? true); - } - - /** --- -2.44.0 - diff --git a/ilot/nextcloud30/dont-update-htaccess.patch b/ilot/nextcloud30/dont-update-htaccess.patch deleted file mode 100644 index 87ec6a1..0000000 --- a/ilot/nextcloud30/dont-update-htaccess.patch +++ /dev/null @@ -1,42 +0,0 @@ -Don't mess with .htaccess files. - -Patch ported from https://src.fedoraproject.org/cgit/rpms/nextcloud.git/tree/nextcloud-9.1.0-dont_update_htacess.patch ---- - core/register_command.php | 1 - - lib/private/Updater.php | 8 -------- - 2 files changed, 9 deletions(-) - -diff --git a/core/register_command.php b/core/register_command.php -index 4a84e551..a5158dc4 100644 ---- a/core/register_command.php -+++ b/core/register_command.php -@@ -136,7 +136,6 @@ if ($config->getSystemValueBool('installed', false)) { - $application->add(Server::get(Command\Maintenance\Mimetype\UpdateDB::class)); - $application->add(Server::get(Command\Maintenance\Mimetype\UpdateJS::class)); - $application->add(Server::get(Command\Maintenance\Mode::class)); -- $application->add(Server::get(Command\Maintenance\UpdateHtaccess::class)); - $application->add(Server::get(Command\Maintenance\UpdateTheme::class)); - - $application->add(Server::get(Command\Upgrade::class)); -diff --git a/lib/private/Updater.php b/lib/private/Updater.php -index 09866273..59144308 100644 ---- a/lib/private/Updater.php -+++ b/lib/private/Updater.php -@@ -230,14 +230,6 @@ class Updater extends BasicEmitter { - throw new \Exception('Updates between multiple major versions and downgrades are unsupported.'); - } - -- // Update .htaccess files -- try { -- Setup::updateHtaccess(); -- Setup::protectDataDirectory(); -- } catch (\Exception $e) { -- throw new \Exception($e->getMessage()); -- } -- - // create empty file in data dir, so we can later find - // out that this is indeed an ownCloud data directory - // (in case it didn't exist before) --- -2.44.0 - diff --git a/ilot/nextcloud30/fpm-pool.conf b/ilot/nextcloud30/fpm-pool.conf deleted file mode 100644 index cae9acc..0000000 --- a/ilot/nextcloud30/fpm-pool.conf +++ /dev/null @@ -1,200 +0,0 @@ -[global] -; Error log file -; Default Value: log/php-fpm.log -error_log = /var/log/nextcloud/php-fpm.log - -; Log level -; Possible Values: alert, error, warning, notice, debug -; Default Value: notice -log_level = warning - -; If this number of child processes exit with SIGSEGV or SIGBUS within the time -; interval set by emergency_restart_interval then FPM will restart. A value -; of '0' means 'Off'. -; Default Value: 0 -emergency_restart_threshold = 10 - -; Interval of time used by emergency_restart_interval to determine when -; a graceful restart will be initiated. This can be useful to work around -; accidental corruptions in an accelerator's shared memory. -; Available Units: s(econds), m(inutes), h(ours), or d(ays) -; Default Unit: seconds -; Default Value: 0 -emergency_restart_interval = 1m - -; Time limit for child processes to wait for a reaction on signals from master. -; Available units: s(econds), m(inutes), h(ours), or d(ays) -; Default Unit: seconds -; Default Value: 0 -process_control_timeout = 10s - - -[nextcloud] -user = nextcloud -group = www-data - -; The address on which to accept FastCGI requests. -; Valid syntaxes are: -; 'ip.add.re.ss:port' - to listen on a TCP socket to a specific address on -; a specific port; -; 'port' - to listen on a TCP socket to all addresses on a -; specific port; -; '/path/to/unix/socket' - to listen on a unix socket (the path is *not* -; relative to chroot!) -; Note: This value is mandatory. -listen = /run/nextcloud/fastcgi.sock - -; Set permissions for unix socket, if one is used. In Linux, read/write -; permissions must be set in order to allow connections from a web server. Many -; BSD-derived systems allow connections regardless of permissions. -; Default Values: user and group are set as the running user -; mode is set to 0666 -listen.mode = 0660 - -; Choose how the process manager will control the number of child processes. -; Possible Values: -; static ... a fixed number of child processes. -; dynamic ... the number of child processes are set dynamically. -; ondemand ... no children are created at startup; children will be forked -; when new requests will connect. -; Note: This value is mandatory. -pm = ondemand - -; The number of child processes to be created when pm is set to 'static' and the -; maximum number of child processes when pm is set to 'dynamic' or 'ondemand'. -; This value sets the limit on the number of simultaneous requests that will be -; served. -; Note: Used when pm is set to 'static', 'dynamic' or 'ondemand' -; Note: This value is mandatory. -pm.max_children = 10 - -; The number of seconds after which an idle process will be killed. -; Note: Used only when pm is set to 'ondemand' -; Default Value: 10s -pm.process_idle_timeout = 120s - -; The number of requests each child process should execute before respawning. -; This can be useful to work around memory leaks in 3rd party libraries. For -; endless request processing specify '0'. Equivalent to PHP_FCGI_MAX_REQUESTS. -; Default Value: 0 -pm.max_requests = 500 - -; The URI to view the FPM status page. If this value is not set, no URI will be -; recognized as a status page. -; Note: The value must start with a leading slash (/). The value can be -; anything, but it may not be a good idea to use the .php extension or it -; may conflict with a real PHP file. -; Default Value: not set -pm.status_path = - -; The ping URI to call the monitoring page of FPM. If this value is not set, no -; URI will be recognized as a ping page. This could be used to test from outside -; that FPM is alive and responding, or to -; - create a graph of FPM availability (rrd or such); -; - remove a server from a group if it is not responding (load balancing); -; - trigger alerts for the operating team (24/7). -; Note: The value must start with a leading slash (/). The value can be -; anything, but it may not be a good idea to use the .php extension or it -; may conflict with a real PHP file. -; Default Value: not set -ping.path = /ping - -; The timeout for serving a single request after which the worker process will -; be killed. This option should be used when the 'max_execution_time' ini option -; does not stop script execution for some reason. A value of '0' means 'off'. -; Available units: s(econds)(default), m(inutes), h(ours), or d(ays) -; Default Value: 0 -;request_terminate_timeout = 0 - -; The timeout for serving a single request after which a PHP backtrace will be -; dumped to the 'slowlog' file. A value of '0s' means 'off'. -; Available units: s(econds)(default), m(inutes), h(ours), or d(ays) -; Default Value: 0 -;request_slowlog_timeout = 0 - -; The log file for slow requests -; Default Value: not set -; Note: slowlog is mandatory if request_slowlog_timeout is set -; Note: the path is *not* relative to chroot. -;slowlog = /var/log/nextcloud/php-fpm.slow.log - -; Redirect worker stdout and stderr into main error log. If not set, stdout and -; stderr will be redirected to /dev/null according to FastCGI specs. -; Note: on highloaded environement, this can cause some delay in the page -; process time (several ms). -; Default Value: no -;catch_workers_output = yes - -; Pass environment variables like LD_LIBRARY_PATH. All $VARIABLEs are taken from -; the current environment. -; Default Value: clean env -env[PATH] = /usr/local/bin:/usr/bin:/bin -env[TMP] = /tmp -env[TMPDIR] = /tmp -env[TEMP] = /tmp - -; Additional php.ini defines, specific to this pool of workers. These settings -; overwrite the values previously defined in the php.ini. The directives are the -; same as the PHP SAPI: -; php_value/php_flag - you can set classic ini defines which can -; be overwritten from PHP call 'ini_set'. -; php_admin_value/php_admin_flag - these directives won't be overwritten by -; PHP call 'ini_set' -; For php_*flag, valid values are on, off, 1, 0, true, false, yes or no. -; -; Defining 'extension' will load the corresponding shared extension from -; extension_dir. Defining 'disable_functions' or 'disable_classes' will not -; overwrite previously defined php.ini values, but will append the new value -; instead. -; -; Note: path INI options can be relative and will be expanded with the prefix -; (pool, global or /usr/lib/php7.x) - -; Allow HTTP file uploads. -php_admin_flag[file_uploads] = true - -; Maximal size of a file that can be uploaded via web interface. -php_admin_value[memory_limit] = 512M -php_admin_value[post_max_size] = 513M -php_admin_value[upload_max_filesize] = 513M - -; Where to store temporary files. -php_admin_value[session.save_path] = /var/tmp/nextcloud -php_admin_value[sys_temp_dir] = /var/tmp/nextcloud -php_admin_value[upload_tmp_dir] = /var/tmp/nextcloud - -; Log errors to specified file. -php_admin_flag[log_errors] = on -php_admin_value[error_log] = /var/log/nextcloud/php.error.log - -; OPcache error_log file name. Empty string assumes "stderr" -php_admin_value[opcache.error_log] = /var/log/nextcloud/php.error.log - -; Output buffering is a mechanism for controlling how much output data -; (excluding headers and cookies) PHP should keep internally before pushing that -; data to the client. If your application's output exceeds this setting, PHP -; will send that data in chunks of roughly the size you specify. -; This must be disabled for ownCloud. -php_admin_flag[output_buffering] = false - -; Overload(replace) single byte functions by mbstring functions. -; This must be disabled for ownCloud. -php_admin_flag[mbstring.func_overload] = false - -; Never populate the $HTTP_RAW_POST_DATA variable. -; http://php.net/always-populate-raw-post-data -php_admin_value[always_populate_raw_post_data] = -1 - -; Disable certain functions for security reasons. -; http://php.net/disable-functions -php_admin_value[disable_functions] = exec,passthru,shell_exec,system,proc_open,curl_multi_exec,show_source - -; Set recommended settings for OpCache. -; https://docs.nextcloud.com/server/13/admin_manual/configuration_server/server_tuning.html#enable-php-opcache -php_admin_flag[opcache.enable] = true -php_admin_flag[opcache.enable_cli] = true -php_admin_flag[opcache.save_comments] = true -php_admin_value[opcache.interned_strings_buffer] = 8 -php_admin_value[opcache.max_accelerated_files] = 10000 -php_admin_value[opcache.memory_consumption] = 128 -php_admin_value[opcache.revalidate_freq] = 1 diff --git a/ilot/nextcloud30/nextcloud-config.php b/ilot/nextcloud30/nextcloud-config.php deleted file mode 100644 index 035fb79..0000000 --- a/ilot/nextcloud30/nextcloud-config.php +++ /dev/null @@ -1,37 +0,0 @@ - '/var/lib/nextcloud/data', - 'logfile' => '/var/log/nextcloud/nextcloud.log', - 'apps_paths' => array ( - // Read-only location for apps shipped with Nextcloud and installed by apk. - 0 => array ( - 'path' => '/usr/share/webapps/nextcloud/apps', - 'url' => '/apps', - 'writable' => false, - ), - // Writable location for apps installed from AppStore. - 1 => array ( - 'path' => '/var/lib/nextcloud/apps', - 'url' => '/apps-appstore', - 'writable' => true, - ), - ), - 'updatechecker' => false, - 'check_for_working_htaccess' => false, - - // Uncomment to enable Zend OPcache. - //'memcache.local' => '\OC\Memcache\APCu', - - // Uncomment this and add user nextcloud to the redis group to enable Redis - // cache for file locking. This is highly recommended, see - // https://github.com/nextcloud/server/issues/9305. - //'memcache.locking' => '\OC\Memcache\Redis', - //'redis' => array( - // 'host' => '/run/redis/redis.sock', - // 'port' => 0, - // 'dbindex' => 0, - // 'timeout' => 1.5, - //), - - 'installed' => false, -); diff --git a/ilot/nextcloud30/nextcloud-dont-chmod.patch b/ilot/nextcloud30/nextcloud-dont-chmod.patch deleted file mode 100644 index 1929185..0000000 --- a/ilot/nextcloud30/nextcloud-dont-chmod.patch +++ /dev/null @@ -1,46 +0,0 @@ -commit d8f09abd65e5fd620b8b0d720daee293c355660c -Author: Leonardo Arena -Date: Mon Aug 31 06:59:15 2020 +0000 - - Don't chmod. The package takes care of setting the right permissions for directories and files - -diff --git a/lib/private/Config.php b/lib/private/Config.php -index cbdbc5b2..1118981b 100644 ---- a/lib/private/Config.php -+++ b/lib/private/Config.php -@@ -242,9 +242,6 @@ class Config { - touch($this->configFilePath); - $filePointer = fopen($this->configFilePath, 'r+'); - -- // Prevent others not to read the config -- chmod($this->configFilePath, 0640); -- - // File does not exist, this can happen when doing a fresh install - if (!is_resource($filePointer)) { - throw new HintException( -diff --git a/lib/private/Log/File.php b/lib/private/Log/File.php -index 9e9abb11..7db25286 100644 ---- a/lib/private/Log/File.php -+++ b/lib/private/Log/File.php -@@ -82,9 +82,6 @@ class File extends LogDetails implements IWriter, IFileBased { - public function write(string $app, $message, int $level) { - $entry = $this->logDetailsAsJSON($app, $message, $level); - $handle = @fopen($this->logFile, 'a'); -- if ($this->logFileMode > 0 && is_file($this->logFile) && (fileperms($this->logFile) & 0777) != $this->logFileMode) { -- @chmod($this->logFile, $this->logFileMode); -- } - if ($handle) { - fwrite($handle, $entry."\n"); - fclose($handle); -diff --git a/lib/private/legacy/OC_Util.php b/lib/private/legacy/OC_Util.php -index 71f6edba..216abdf8 100644 ---- a/lib/private/legacy/OC_Util.php -+++ b/lib/private/legacy/OC_Util.php -@@ -1004,7 +1004,6 @@ class OC_Util { - . ' cannot be listed by other users.'); - $perms = substr(decoct(@fileperms($dataDirectory)), -3); - if (substr($perms, -1) !== '0') { -- chmod($dataDirectory, 0770); - clearstatcache(); - $perms = substr(decoct(@fileperms($dataDirectory)), -3); - if ($perms[2] !== '0') { diff --git a/ilot/nextcloud30/nextcloud-mysql.cnf b/ilot/nextcloud30/nextcloud-mysql.cnf deleted file mode 100644 index ff90630..0000000 --- a/ilot/nextcloud30/nextcloud-mysql.cnf +++ /dev/null @@ -1,3 +0,0 @@ -[server] -# See https://github.com/nextcloud/server/issues/25436 -innodb_read_only_compressed=off diff --git a/ilot/nextcloud30/nextcloud.confd b/ilot/nextcloud30/nextcloud.confd deleted file mode 100644 index b24f26d..0000000 --- a/ilot/nextcloud30/nextcloud.confd +++ /dev/null @@ -1,8 +0,0 @@ -# Config file for /etc/init.d/nextcloud - -name="Nextcloud" -user="nextcloud" -group="www-data" - -# Uncomment if you use Nextcloud with Redis for caching. -#rc_need="redis" diff --git a/ilot/nextcloud30/nextcloud.cron b/ilot/nextcloud30/nextcloud.cron deleted file mode 100644 index 398cb11..0000000 --- a/ilot/nextcloud30/nextcloud.cron +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/sh - -# Run only when nextcloud service is started. -if rc-service nextcloud -q status >/dev/null 2>&1; then - su nextcloud -s /bin/sh -c 'php83 -f /usr/share/webapps/nextcloud/cron.php' -fi diff --git a/ilot/nextcloud30/nextcloud.logrotate b/ilot/nextcloud30/nextcloud.logrotate deleted file mode 100644 index 19e17fd..0000000 --- a/ilot/nextcloud30/nextcloud.logrotate +++ /dev/null @@ -1,6 +0,0 @@ -/var/log/nextcloud/*.log { - daily - compress - copytruncate - su nextcloud www-data -} diff --git a/ilot/nextcloud30/nextcloud30-initscript.post-install b/ilot/nextcloud30/nextcloud30-initscript.post-install deleted file mode 100644 index 48f1351..0000000 --- a/ilot/nextcloud30/nextcloud30-initscript.post-install +++ /dev/null @@ -1,28 +0,0 @@ -#!/bin/sh - -# It's not needed to be writable for www-data group when running with php-fpm. -for dir in /etc/nextcloud \ - /etc/nextcloud/config.php \ - /var/lib/nextcloud/data \ - /var/lib/nextcloud/apps -do - chmod g-w $dir -done -chgrp root /etc/nextcloud/config.php - -# This must be writable (only) by nextcloud user. -chmod 750 /var/log/nextcloud - -mkdir /var/tmp/nextcloud # If /var/tmp doesn't exist there's a big problem -chown nextcloud /var/tmp/nextcloud -chmod 700 /var/tmp/nextcloud - -cat <&2 -echo ' please do the following:' >&2 -echo -echo ' * Run "apk upgrade -a" a second time to complete the upgrade of all' >&2 -echo ' nextcloud packages' >&2 -echo ' * Run "occ upgrade" to finish upgrading your Nextcloud instance' >&2 -echo ' * NOTE: since v29.0.4-r1 "occ" command is now in package "nextcloudNN-occ"' >&2 -echo diff --git a/ilot/nextcloud30/nextcloud30.post-upgrade b/ilot/nextcloud30/nextcloud30.post-upgrade deleted file mode 100644 index be7e267..0000000 --- a/ilot/nextcloud30/nextcloud30.post-upgrade +++ /dev/null @@ -1,47 +0,0 @@ -#!/bin/sh - -ver_new="$1" -ver_old="$2" - -if [ $(apk version -t "$ver_old" '12.0.0-r2') = '<' ]; then - cat >&2 <<-EOF - * - * All Nextcloud's bundled apps (except "files" and "dav") have been moved to - * separate subpackages (e.g. nextcloud-activity). If you want to install - * all apps that are enabled by default at once, run: - * - * apk add nextcloud-default-apps - * - EOF - - if [ "$(ls -A /var/lib/nextcloud/apps)" ]; then - cat >&2 <<-EOF - * - * Nextcloud's bundled apps have been moved from /var/lib/nextcloud/apps - * to /usr/share/webapps/nextcloud/apps. Only apps installed from App Store - * should be stored in /var/lib/nextcloud/apps. - * - * It seems that you have installed some apps from App Store, so you have to - * add /var/lib/nextcloud/apps to your apps_paths. Copy "apps_paths" key - * from /etc/nextcloud/config.php.apk-new to your config.php. - * - EOF - fi -fi - -if [ $(apk version -t "$ver_old" '15.0.2-r0') = '<' ]; then - cat >&2 <<-EOF - * - * App "user_external" is no longer available via release channel. - * You need to uninstall the package and install it via appstore: - * - * apk del nextcloud-user_external - * - EOF - -fi - -if [ "${ver_new%-r*}" != "${ver_old%-r*}" ]; then - echo ' * Run "occ upgrade" to finish upgrading your NextCloud instance!' >&2 - echo ' * NOTE: since v29.0.4-r1 "occ" command is now in package "nextcloudNN-occ"' >&2 -fi diff --git a/ilot/nextcloud30/nextcloud30.pre-install b/ilot/nextcloud30/nextcloud30.pre-install deleted file mode 100644 index e9cf539..0000000 --- a/ilot/nextcloud30/nextcloud30.pre-install +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/sh - -addgroup -S -g 82 www-data 2>/dev/null -adduser -S -D -H -h /var/lib/nextcloud -s /sbin/nologin -G www-data -g Nextcloud nextcloud 2>/dev/null - -exit 0 diff --git a/ilot/nextcloud30/nextcloud30.pre-upgrade b/ilot/nextcloud30/nextcloud30.pre-upgrade deleted file mode 100644 index f444e78..0000000 --- a/ilot/nextcloud30/nextcloud30.pre-upgrade +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/sh - -ver_old="$2" -apps_link='/usr/share/webapps/nextcloud/apps' - -# Remove apps symlink before replacing files to avoid losing installed apps. -# This is a workaround for some issue in apk. -if [ $(apk version -t "$ver_old" '12.0.0-r2') = '<' ] && [ -L "$apps_link" ]; then - rm "$apps_link" -fi diff --git a/ilot/nextcloud30/occ b/ilot/nextcloud30/occ deleted file mode 100644 index 57b8ef3..0000000 --- a/ilot/nextcloud30/occ +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/sh - -NEXTCLOUD_DIR='/usr/share/webapps/nextcloud' -: ${NEXTCLOUD_USER:="nextcloud"} - -if [ "$(id -un)" != "$NEXTCLOUD_USER" ]; then - exec su -s /bin/sh "$NEXTCLOUD_USER" -c '$0 "$@"' -- php83 $NEXTCLOUD_DIR/occ "$@" -else - exec php83 $NEXTCLOUD_DIR/occ "$@" -fi diff --git a/ilot/nextcloud30/use-external-docs-if-local-not-avail.patch b/ilot/nextcloud30/use-external-docs-if-local-not-avail.patch deleted file mode 100644 index 42af94b..0000000 --- a/ilot/nextcloud30/use-external-docs-if-local-not-avail.patch +++ /dev/null @@ -1,36 +0,0 @@ -From f17c14956c51206ad82acc5d9b66fd752f0e3c03 Mon Sep 17 00:00:00 2001 -From: Jakub Jirutka -Date: Tue, 19 Dec 2023 07:53:40 +0000 -Subject: [PATCH] use external docs if local not available - ---- - apps/settings/templates/help.php | 11 +++++++++++ - 1 file changed, 11 insertions(+) - -diff --git a/apps/settings/templates/help.php b/apps/settings/templates/help.php -index 649178c1..29b5ac4c 100644 ---- a/apps/settings/templates/help.php -+++ b/apps/settings/templates/help.php -@@ -48,8 +48,19 @@ - - -
-+ - -+ -+
-+

Local documentation is not installed

-+

Please use -+ -+ t('online documentation')); ?> ↗ -+ -+

-+
-+ -
- -
--- -2.42.0 - diff --git a/ilot/py3-azure-core/APKBUILD b/ilot/py3-azure-core/APKBUILD index 6e76144..8befd33 100644 --- a/ilot/py3-azure-core/APKBUILD +++ b/ilot/py3-azure-core/APKBUILD @@ -3,7 +3,7 @@ pkgname=py3-azure-core #_pkgreal is used by apkbuild-pypi to find modules at PyPI _pkgreal=azure-core -pkgver=1.32.0 +pkgver=1.31.0 pkgrel=0 pkgdesc="Microsoft Azure Core Library for Python" url="https://pypi.python.org/project/microsoft-kiota-authentication-azure" @@ -35,5 +35,5 @@ package() { } sha512sums=" -d258a2ca3bc2c9514dec91bf2dbb19c0ee4c0c0bec73a4301b47fb43be768be836f32621b70a8cdb0e39f1491a522191a82a00f318ee7c901e8861a62439e934 py3-azure-core-1.32.0.tar.gz +be2fc27610034ee5c345ed11f59233ec81d8ad628c4b732531a24e0d54720b81f22d855e5a4d9214c6a8234e479da059b37a40c7ad15e738e2dd46fb4755dad6 py3-azure-core-1.31.0.tar.gz " diff --git a/ilot/py3-azure-identity/APKBUILD b/ilot/py3-azure-identity/APKBUILD index 9341e11..e448e38 100644 --- a/ilot/py3-azure-identity/APKBUILD +++ b/ilot/py3-azure-identity/APKBUILD @@ -3,7 +3,8 @@ pkgname=py3-azure-identity #_pkgreal is used by apkbuild-pypi to find modules at PyPI _pkgreal=azure-identity -pkgver=1.19.0 +pkgver=1.18.0 +_pkgver=${pkgver}b2 pkgrel=0 pkgdesc="Microsoft Azure Identity Library for Python" url="https://pypi.org/project/azure-identity/" @@ -18,8 +19,8 @@ depends=" checkdepends="py3-pytest" makedepends="py3-setuptools py3-gpep517 py3-wheel py3-flit" options="!check" #todo -source="$pkgname-$pkgver.tar.gz::https://github.com/Azure/azure-sdk-for-python/archive/refs/tags/azure-identity_$pkgver.tar.gz" -builddir="$srcdir"/azure-sdk-for-python-azure-identity_$pkgver/sdk/identity/azure-identity +source="$pkgname-$pkgver.tar.gz::https://github.com/Azure/azure-sdk-for-python/archive/refs/tags/azure-identity_$_pkgver.tar.gz" +builddir="$srcdir"/azure-sdk-for-python-azure-identity_$_pkgver/sdk/identity/azure-identity subpackages="$pkgname-pyc" build() { @@ -40,5 +41,5 @@ package() { } sha512sums=" -090aed812a7a72c649ded2574dc0a05dd7d9db41675e3d86921ab0555f8af7c83999cb879a2f2e0984880874b3b6dfead6b8de0563d8a99d81775715640a9e01 py3-azure-identity-1.19.0.tar.gz +84defc19db3aea614b13dbf2d24ee3ea13c210a05460a4ae2968b01d34f136c81eb9d77b7ce1f0c4590e6f36af0b6fe114787fc7897ffa0f2d8093a9bcb48bf4 py3-azure-identity-1.18.0.tar.gz " diff --git a/ilot/py3-django-tenants/997_update-from-pgclone-schema.patch b/ilot/py3-django-tenants/997_update-from-pgclone-schema.patch new file mode 100644 index 0000000..b2999d2 --- /dev/null +++ b/ilot/py3-django-tenants/997_update-from-pgclone-schema.patch @@ -0,0 +1,3823 @@ +From 07e14a3442d080bd4e873dc74e441296b8291ae2 Mon Sep 17 00:00:00 2001 +From: Marc 'risson' Schmitt +Date: Thu, 16 Nov 2023 13:26:16 +0100 +Subject: [PATCH 1/3] clone: update from pg-clone-schema + +Signed-off-by: Marc 'risson' Schmitt +--- + django_tenants/clone.py | 3407 ++++++++++++++++++++++++++++++++++----- + 1 file changed, 2977 insertions(+), 430 deletions(-) + +diff --git a/django_tenants/clone.py b/django_tenants/clone.py +index 426e81b8..3afce109 100644 +--- a/django_tenants/clone.py ++++ b/django_tenants/clone.py +@@ -6,24 +6,592 @@ + from django_tenants.utils import schema_exists + + CLONE_SCHEMA_FUNCTION = r""" +--- https://github.com/denishpatel/pg-clone-schema/ rev 0d3b522 ++-- https://github.com/denishpatel/pg-clone-schema/ rev 073922e + -- https://github.com/tomturner/django-tenants/issues/322 + +--- Function: clone_schema(text, text, boolean, boolean) ++do $$ ++<> ++DECLARE ++ cnt int; ++BEGIN ++ DROP TYPE IF EXISTS public.cloneparms CASCADE; ++ CREATE TYPE public.cloneparms AS ENUM ('DATA', 'NODATA','DDLONLY','NOOWNER','NOACL','VERBOSE','DEBUG','FILECOPY'); ++ -- END IF; ++end first_block $$; ++ ++ ++-- select * from public.get_insert_stmt_ddl('clone1','sample','address'); ++CREATE OR REPLACE FUNCTION public.get_insert_stmt_ddl( ++ source_schema text, ++ target_schema text, ++ atable text, ++ bTextCast boolean default False ++) ++RETURNS text ++LANGUAGE plpgsql VOLATILE ++AS ++$$ ++ DECLARE ++ -- the ddl we're building ++ v_insert_ddl text := ''; ++ v_cols text := ''; ++ v_cols_sel text := ''; ++ v_cnt int := 0; ++ v_colrec record; ++ v_schema text; ++ BEGIN ++ FOR v_colrec IN ++ SELECT c.column_name, c.data_type, c.udt_name, c.udt_schema, c.character_maximum_length, c.is_nullable, c.column_default, c.numeric_precision, c.numeric_scale, c.is_identity, c.identity_generation, c.is_generated ++ FROM information_schema.columns c WHERE (table_schema, table_name) = (source_schema, atable) ORDER BY ordinal_position ++ LOOP ++ IF v_colrec.udt_schema = 'public' THEN ++ v_schema = 'public'; ++ ELSE ++ v_schema = target_schema; ++ END IF; ++ ++ v_cnt = v_cnt + 1; ++ IF v_colrec.is_identity = 'YES' OR v_colrec.is_generated = 'ALWAYS' THEN ++ -- skip ++ continue; ++ END IF; ++ ++ IF v_colrec.data_type = 'USER-DEFINED' THEN ++ IF v_cols = '' THEN ++ v_cols = v_colrec.column_name; ++ IF bTextCast THEN ++ -- v_cols_sel = v_colrec.column_name || '::text::' || v_schema || '.' || v_colrec.udt_name; ++ IF v_schema = 'public' THEN ++ v_cols_sel = v_colrec.column_name || '::' || v_schema || '.' || v_colrec.udt_name; ++ ELSE ++ v_cols_sel = v_colrec.column_name || '::text::' || v_colrec.udt_name; ++ END IF; ++ ELSE ++ v_cols_sel = v_colrec.column_name || '::' || v_schema || '.' || v_colrec.udt_name; ++ END IF; ++ ELSE ++ v_cols = v_cols || ', ' || v_colrec.column_name; ++ IF bTextCast THEN ++ -- v_cols_sel = v_cols_sel || ', ' || v_colrec.column_name || '::text::' || v_schema || '.' || v_colrec.udt_name; ++ IF v_schema = 'public' THEN ++ v_cols_sel = v_cols_sel || ', ' || v_colrec.column_name || '::' || v_schema || '.' || v_colrec.udt_name; ++ ELSE ++ v_cols_sel = v_cols_sel || ', ' || v_colrec.column_name || '::text::' || v_colrec.udt_name; ++ END IF; ++ ELSE ++ v_cols_sel = v_cols_sel || ', ' || v_colrec.column_name || '::' || v_schema || '.' || v_colrec.udt_name; ++ END IF; ++ END IF; ++ ELSE ++ IF v_cols = '' THEN ++ v_cols = v_colrec.column_name; ++ v_cols_sel = v_colrec.column_name; ++ ELSE ++ v_cols = v_cols || ', ' || v_colrec.column_name; ++ v_cols_sel = v_cols_sel || ', ' || v_colrec.column_name; ++ END IF; ++ END IF; ++ END LOOP; ++ ++ -- put it all together and return the insert statement ++ -- INSERT INTO clone1.address2 (id2, id3, addr) SELECT id2::text::clone1.udt_myint, id3::text::clone1.udt_myint, addr FROM sample.address; ++ v_insert_ddl = 'INSERT INTO ' || target_schema || '.' || atable || ' (' || v_cols || ') ' || 'SELECT ' || v_cols_sel || ' FROM ' || source_schema || '.' || atable || ';'; ++ RETURN v_insert_ddl; ++ END; ++$$; ++ ++ ++CREATE OR REPLACE FUNCTION public.get_table_ddl_complex( ++ src_schema text, ++ dst_schema text, ++ in_table text, ++ sq_server_version_num integer ++) ++RETURNS text ++LANGUAGE plpgsql VOLATILE ++AS ++$$ ++ DECLARE ++ v_table_ddl text; ++ v_buffer1 text; ++ v_buffer2 text; ++ ++ BEGIN ++ IF sq_server_version_num < 110000 THEN ++ SELECT 'CREATE TABLE ' ++ || quote_ident(dst_schema) ++ || '.' ++ || pc.relname ++ || E'(\n' ++ || string_agg( ++ pa.attname ++ || ' ' ++ || pg_catalog.format_type(pa.atttypid, pa.atttypmod) ++ || coalesce( ++ ' DEFAULT ' ++ || ( ++ SELECT pg_catalog.pg_get_expr(d.adbin, d.adrelid) ++ FROM pg_catalog.pg_attrdef d ++ WHERE d.adrelid = pa.attrelid ++ AND d.adnum = pa.attnum ++ AND pa.atthasdef ++ ), ++ '' ++ ) ++ || ' ' ++ || CASE pa.attnotnull ++ WHEN TRUE THEN 'NOT NULL' ++ ELSE 'NULL' ++ END, ++ E',\n' ++ ) ++ || coalesce( ++ ( ++ SELECT ++ E',\n' ++ || string_agg( ++ 'CONSTRAINT ' ++ || pc1.conname ++ || ' ' ++ || pg_get_constraintdef(pc1.oid), ++ E',\n' ++ ORDER BY pc1.conindid ++ ) ++ FROM pg_constraint pc1 ++ --Issue#103: do not return FKEYS for partitions since we assume it is implied by the one done on the parent table, otherwise error for trying to define it again. ++ WHERE pc1.conrelid = pa.attrelid ++ ), ++ '' ++ ) ++ INTO v_buffer1 ++ FROM pg_catalog.pg_attribute pa ++ JOIN pg_catalog.pg_class pc ON pc.oid = pa.attrelid ++ AND pc.relname = quote_ident(in_table) ++ JOIN pg_catalog.pg_namespace pn ON pn.oid = pc.relnamespace ++ AND pn.nspname = quote_ident(src_schema) ++ WHERE pa.attnum > 0 ++ AND NOT pa.attisdropped ++ GROUP BY pn.nspname, pc.relname, pa.attrelid; ++ ++ ELSE ++ SELECT 'CREATE TABLE ' ++ || quote_ident(dst_schema) ++ || '.' ++ || pc.relname ++ || E'(\n' ++ || string_agg( ++ pa.attname ++ || ' ' ++ || pg_catalog.format_type(pa.atttypid, pa.atttypmod) ++ || coalesce( ++ ' DEFAULT ' ++ || ( ++ SELECT pg_catalog.pg_get_expr(d.adbin, d.adrelid) ++ FROM pg_catalog.pg_attrdef d ++ WHERE d.adrelid = pa.attrelid ++ AND d.adnum = pa.attnum ++ AND pa.atthasdef ++ ), ++ '' ++ ) ++ || ' ' ++ || CASE pa.attnotnull ++ WHEN TRUE THEN 'NOT NULL' ++ ELSE 'NULL' ++ END, ++ E',\n' ++ ) ++ || coalesce( ++ ( ++ SELECT ++ E',\n' ++ || string_agg( ++ 'CONSTRAINT ' ++ || pc1.conname ++ || ' ' ++ || pg_get_constraintdef(pc1.oid), ++ E',\n' ++ ORDER BY pc1.conindid ++ ) ++ FROM pg_constraint pc1 ++ --Issue#103: do not return FKEYS for partitions since we assume it is implied by the one done on the parent table, otherwise error for trying to define it again. ++ WHERE pc1.conrelid = pa.attrelid AND pc1.conparentid = 0 ++ ), ++ '' ++ ) ++ INTO v_buffer1 ++ FROM pg_catalog.pg_attribute pa ++ JOIN pg_catalog.pg_class pc ON pc.oid = pa.attrelid ++ AND pc.relname = quote_ident(in_table) ++ JOIN pg_catalog.pg_namespace pn ON pn.oid = pc.relnamespace ++ AND pn.nspname = quote_ident(src_schema) ++ WHERE pa.attnum > 0 ++ AND NOT pa.attisdropped ++ GROUP BY pn.nspname, pc.relname, pa.attrelid; ++ END IF; ++ ++ -- append partition keyword to it ++ SELECT pg_catalog.pg_get_partkeydef(c.oid::pg_catalog.oid) into v_buffer2 ++ FROM pg_catalog.pg_class c LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace ++ WHERE c.relname = quote_ident(in_table) COLLATE pg_catalog.default AND n.nspname = quote_ident(src_schema) COLLATE pg_catalog.default; ++ ++ v_table_ddl := v_buffer1 || ') PARTITION BY ' || v_buffer2 || ';'; ++ ++ RETURN v_table_ddl; ++ END; ++$$; ++ ++ ++-- SELECT * FROM public.get_table_ddl('sample', 'address', True); ++CREATE OR REPLACE FUNCTION public.get_table_ddl( ++ in_schema varchar, ++ in_table varchar, ++ bfkeys boolean ++) ++RETURNS text ++LANGUAGE plpgsql VOLATILE ++AS ++$$ ++ DECLARE ++ -- the ddl we're building ++ v_table_ddl text; ++ ++ -- data about the target table ++ v_table_oid int; ++ ++ -- records for looping ++ v_colrec record; ++ v_constraintrec record; ++ v_indexrec record; ++ v_primary boolean := False; ++ v_constraint_name text; ++ v_src_path_old text := ''; ++ v_src_path_new text := ''; ++ v_dummy text; ++ v_partbound text; ++ v_pgversion int; ++ v_parent text := ''; ++ v_relopts text := ''; ++ v_tablespace text; ++ v_partition_key text := ''; ++ v_temp text; ++ bPartitioned bool := False; ++ bInheritance bool := False; ++ bRelispartition bool; ++ constraintarr text[] := '{{}}'; ++ constraintelement text; ++ bSkip boolean; ++ ++ BEGIN ++ SELECT c.oid, ( ++ SELECT setting ++ FROM pg_settings ++ WHERE name = 'server_version_num') INTO v_table_oid, v_pgversion ++ FROM pg_catalog.pg_class c ++ LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace ++ WHERE c.relkind IN ('r', 'p') ++ AND c.relname = in_table ++ AND n.nspname = in_schema; ++ IF (v_table_oid IS NULL) THEN ++ RAISE EXCEPTION 'table does not exist'; ++ END IF; ++ ++ -- get user-defined tablespaces if applicable ++ SELECT TABLESPACE INTO v_temp ++ FROM pg_tables ++ WHERE schemaname = in_schema ++ AND tablename = in_table ++ AND TABLESPACE IS NOT NULL; ++ -- Issue#99 Fix: simple coding error! ++ -- IF v_tablespace IS NULL THEN ++ IF v_temp IS NULL THEN ++ v_tablespace := 'TABLESPACE pg_default'; ++ ELSE ++ v_tablespace := 'TABLESPACE ' || v_temp; ++ END IF; ++ -- also see if there are any SET commands for this table, ie, autovacuum_enabled=off, fillfactor=70 ++ WITH relopts AS ( ++ SELECT unnest(c.reloptions) relopts ++ FROM pg_class c, pg_namespace n ++ WHERE n.nspname = in_schema ++ AND n.oid = c.relnamespace ++ AND c.relname = in_table ++ ) ++ SELECT string_agg(r.relopts, ', ') AS relopts INTO v_temp ++ FROM relopts r; ++ IF v_temp IS NULL THEN ++ v_relopts := ''; ++ ELSE ++ v_relopts := ' WITH (' || v_temp || ')'; ++ END IF; ++ ++ -- Issue#61 FIX: set search_path = public before we do anything to force explicit schema qualification but dont forget to set it back before exiting... ++ SELECT setting INTO v_src_path_old FROM pg_settings WHERE name = 'search_path'; ++ ++ SELECT REPLACE(REPLACE(setting, '"$user"', '$user'), '$user', '"$user"') INTO v_src_path_old ++ FROM pg_settings ++ WHERE name = 'search_path'; ++ -- RAISE INFO 'DEBUG tableddl: saving old search_path: ***%***', v_src_path_old; ++ EXECUTE 'SET search_path = "public"'; ++ SELECT setting INTO v_src_path_new FROM pg_settings WHERE name = 'search_path'; ++ ++ -- grab the oid of the table; https://www.postgresql.org/docs/8.3/catalog-pg-class.html ++ SELECT c.oid INTO v_table_oid ++ FROM pg_catalog.pg_class c ++ LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace ++ WHERE 1 = 1 ++ AND c.relkind = 'r' ++ AND c.relname = in_table ++ AND n.nspname = in_schema; ++ ++ IF (v_table_oid IS NULL) THEN ++ -- Dont give up yet. It might be a partitioned table ++ SELECT c.oid INTO v_table_oid ++ FROM pg_catalog.pg_class c ++ LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace ++ WHERE 1 = 1 ++ AND c.relkind = 'p' ++ AND c.relname = in_table ++ AND n.nspname = in_schema; ++ ++ IF (v_table_oid IS NULL) THEN ++ RAISE EXCEPTION 'table does not exist'; ++ END IF; ++ bPartitioned := True; ++ END IF; ++ IF v_pgversion < 100000 THEN ++ SELECT c2.relname parent INTO v_parent ++ FROM pg_class c1, pg_namespace n, pg_inherits i, pg_class c2 ++ WHERE n.nspname = in_schema ++ AND n.oid = c1.relnamespace ++ AND c1.relname = in_table ++ AND c1.oid = i.inhrelid ++ AND i.inhparent = c2.oid ++ AND c1.relkind = 'r'; ++ ++ IF (v_parent IS NOT NULL) THEN ++ bPartitioned := True; ++ bInheritance := True; ++ END IF; ++ ELSE ++ SELECT c2.relname parent, c1.relispartition, pg_get_expr(c1.relpartbound, c1.oid, TRUE) INTO v_parent, bRelispartition, v_partbound ++ FROM pg_class c1, pg_namespace n, pg_inherits i, pg_class c2 ++ WHERE n.nspname = in_schema ++ AND n.oid = c1.relnamespace ++ AND c1.relname = in_table ++ AND c1.oid = i.inhrelid ++ AND i.inhparent = c2.oid ++ AND c1.relkind = 'r'; ++ ++ IF (v_parent IS NOT NULL) THEN ++ bPartitioned := True; ++ IF bRelispartition THEN ++ bInheritance := False; ++ ELSE ++ bInheritance := True; ++ END IF; ++ END IF; ++ END IF; ++ -- RAISE NOTICE 'version=% schema=% parent=% relopts=% tablespace=% partitioned=% inherited=% relispartition=%',v_pgversion, in_schema, v_parent, v_relopts, v_tablespace, bPartitioned, bInheritance, bRelispartition; ++ ++ -- start the create definition ++ v_table_ddl := 'CREATE TABLE ' || in_schema || '.' || in_table || ' (' || E'\n'; ++ ++ -- define all of the columns in the table; https://stackoverflow.com/a/8153081/3068233 ++ FOR v_colrec IN ++ SELECT c.column_name, c.data_type, c.udt_name, c.udt_schema, c.character_maximum_length, c.is_nullable, c.column_default, c.numeric_precision, c.numeric_scale, c.is_identity, c.identity_generation ++ FROM information_schema.columns c ++ WHERE (table_schema, table_name) = (in_schema, in_table) ++ ORDER BY ordinal_position ++ LOOP ++ v_table_ddl := v_table_ddl || ' ' -- note: two char spacer to start, to indent the column ++ || v_colrec.column_name || ' ' ++ -- FIX #82, FIX #100 as well by adding 'citext' to the list ++ -- FIX #105 by overriding the previous fixes (#82, #100), which presumed "public" was always the schema for extensions. It could be a custom schema. ++ -- so assume udt_schema for all USER-DEFINED datatypes ++ -- || CASE WHEN v_colrec.udt_name in ('geometry', 'box2d', 'box2df', 'box3d', 'geography', 'geometry_dump', 'gidx', 'spheroid', 'valid_detail','citext') ++ -- THEN v_colrec.udt_name ++ || CASE WHEN v_colrec.data_type = 'USER-DEFINED' ++ -- THEN in_schema || '.' || v_colrec.udt_name ELSE v_colrec.data_type END ++ THEN v_colrec.udt_schema || '.' || v_colrec.udt_name ELSE v_colrec.data_type END ++ || CASE WHEN v_colrec.is_identity = 'YES' ++ THEN ++ CASE WHEN v_colrec.identity_generation = 'ALWAYS' ++ THEN ' GENERATED ALWAYS AS IDENTITY' ELSE ' GENERATED BY DEFAULT AS IDENTITY' END ELSE '' END ++ || CASE WHEN v_colrec.character_maximum_length IS NOT NULL ++ THEN ('(' || v_colrec.character_maximum_length || ')') ++ WHEN v_colrec.numeric_precision > 0 AND v_colrec.numeric_scale > 0 ++ THEN '(' || v_colrec.numeric_precision || ',' || v_colrec.numeric_scale || ')' ++ ELSE '' END || ' ' ++ || CASE WHEN v_colrec.is_nullable = 'NO' ++ THEN 'NOT NULL' ELSE 'NULL' END ++ || CASE WHEN v_colrec.column_default IS NOT null ++ THEN (' DEFAULT ' || v_colrec.column_default) ELSE '' END ++ || ',' || E'\n'; ++ END LOOP; ++ -- define all the constraints in the; https://www.postgresql.org/docs/9.1/catalog-pg-constraint.html && https://dba.stackexchange.com/a/214877/75296 ++ -- Issue#103: do not get foreign keys for partitions since they are defined on the parent and this will cause an "already exists" error otherwise ++ -- Also conparentid is not in V10, so bypass since we do not have FKEYS in partitioned tables in V10 ++ IF v_pgversion < 110000 THEN ++ FOR v_constraintrec IN ++ SELECT ++ con.conname as constraint_name, ++ con.contype as constraint_type, ++ CASE ++ WHEN con.contype = 'p' THEN 1 -- primary key constraint ++ WHEN con.contype = 'u' THEN 2 -- unique constraint ++ WHEN con.contype = 'f' THEN 3 -- foreign key constraint ++ WHEN con.contype = 'c' THEN 4 ++ ELSE 5 ++ END as type_rank, ++ pg_get_constraintdef(con.oid) as constraint_definition ++ FROM pg_catalog.pg_constraint con ++ JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid ++ JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace ++ WHERE nsp.nspname = in_schema ++ AND rel.relname = in_table ++ ORDER BY type_rank ++ LOOP ++ -- Issue#85 fix ++ -- constraintarr := constraintarr || v_constraintrec.constraint_name; ++ constraintarr := constraintarr || v_constraintrec.constraint_name::text; ++ IF v_constraintrec.type_rank = 1 THEN ++ v_primary := True; ++ v_constraint_name := v_constraintrec.constraint_name; ++ END IF; ++ IF NOT bfkeys AND v_constraintrec.constraint_type = 'f' THEN ++ continue; ++ END IF; ++ v_table_ddl := v_table_ddl || ' ' -- note: two char spacer to start, to indent the column ++ || 'CONSTRAINT' || ' ' ++ || v_constraintrec.constraint_name || ' ' ++ || v_constraintrec.constraint_definition ++ || ',' || E'\n'; ++ END LOOP; ++ ELSE ++ FOR v_constraintrec IN ++ SELECT ++ con.conname as constraint_name, ++ con.contype as constraint_type, ++ CASE ++ WHEN con.contype = 'p' THEN 1 -- primary key constraint ++ WHEN con.contype = 'u' THEN 2 -- unique constraint ++ WHEN con.contype = 'f' THEN 3 -- foreign key constraint ++ WHEN con.contype = 'c' THEN 4 ++ ELSE 5 ++ END as type_rank, ++ pg_get_constraintdef(con.oid) as constraint_definition ++ FROM pg_catalog.pg_constraint con ++ JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid ++ JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace ++ WHERE nsp.nspname = in_schema ++ AND rel.relname = in_table ++ -- Issue#103: do not get partitioned tables ++ AND con.conparentid = 0 ++ ORDER BY type_rank ++ LOOP ++ -- Issue#85 fix ++ -- constraintarr := constraintarr || v_constraintrec.constraint_name; ++ constraintarr := constraintarr || v_constraintrec.constraint_name::text; ++ IF v_constraintrec.type_rank = 1 THEN ++ v_primary := True; ++ v_constraint_name := v_constraintrec.constraint_name; ++ END IF; ++ IF NOT bfkeys AND v_constraintrec.constraint_type = 'f' THEN ++ continue; ++ END IF; ++ v_table_ddl := v_table_ddl || ' ' -- note: two char spacer to start, to indent the column ++ || 'CONSTRAINT' || ' ' ++ || v_constraintrec.constraint_name || ' ' ++ || v_constraintrec.constraint_definition ++ || ',' || E'\n'; ++ END LOOP; ++ END IF; ++ ++ -- drop the last comma before ending the create statement ++ v_table_ddl = substr(v_table_ddl, 0, length(v_table_ddl) - 1) || E'\n'; ++ -- end the create table def but add inherits clause if valid ++ IF bPartitioned and bInheritance THEN ++ v_table_ddl := v_table_ddl || ') INHERITS (' || in_schema || '.' || v_parent || ') ' || v_relopts || ' ' || v_tablespace || ';' || E'\n'; ++ ELSIF v_pgversion >= 100000 AND bPartitioned and NOT bInheritance THEN ++ -- See if this is a partitioned table (pg_class.relkind = 'p') and add the partitioned key ++ SELECT pg_get_partkeydef (c1.oid) AS partition_key INTO v_partition_key ++ FROM pg_class c1 ++ JOIN pg_namespace n ON (n.oid = c1.relnamespace) ++ LEFT JOIN pg_partitioned_table p ON (c1.oid = p.partrelid) ++ WHERE n.nspname = in_schema ++ AND n.oid = c1.relnamespace ++ AND c1.relname = in_table ++ AND c1.relkind = 'p'; ++ END IF; ++ IF v_partition_key IS NOT NULL AND v_partition_key <> '' THEN ++ -- add partition clause ++ -- NOTE: cannot specify default tablespace for partitioned relations ++ v_table_ddl := v_table_ddl || ') PARTITION BY ' || v_partition_key || ';' || E'\n'; ++ ELSIF bPartitioned AND not bInheritance THEN ++ IF v_relopts <> '' THEN ++ v_table_ddl := 'CREATE TABLE ' || in_schema || '.' || in_table || ' PARTITION OF ' || in_schema || '.' || v_parent || ' ' || v_partbound || v_relopts || ' ' || v_tablespace || '; ' || E'\n'; ++ ELSE ++ v_table_ddl := 'CREATE TABLE ' || in_schema || '.' || in_table || ' PARTITION OF ' || in_schema || '.' || v_parent || ' ' || v_partbound || ' ' || v_tablespace || '; ' || E'\n'; ++ END IF; ++ ELSIF bPartitioned and bInheritance THEN ++ -- we already did this above ++ v_table_ddl := v_table_ddl; ++ ELSIF v_relopts <> '' THEN ++ v_table_ddl := v_table_ddl || ') ' || v_relopts || ' ' || v_tablespace || ';' || E'\n'; ++ ELSE ++ v_table_ddl := v_table_ddl || ') ' || v_tablespace || ';' || E'\n'; ++ END IF; ++ -- suffix create statement with all of the indexes on the table ++ FOR v_indexrec IN ++ SELECT indexdef, indexname ++ FROM pg_indexes ++ WHERE (schemaname, tablename) = (in_schema, in_table) ++ LOOP ++ -- Issue#83 fix: loop through constraints and skip ones already defined ++ bSkip = False; ++ FOREACH constraintelement IN ARRAY constraintarr ++ LOOP ++ IF constraintelement = v_indexrec.indexname THEN ++ bSkip = True; ++ EXIT; ++ END IF; ++ END LOOP; ++ if bSkip THEN CONTINUE; END IF; ++ v_table_ddl := v_table_ddl ++ || v_indexrec.indexdef ++ || ';' || E'\n'; ++ END LOOP; ++ ++ -- reset search_path back to what it was ++ IF v_src_path_old = '' THEN ++ SELECT set_config('search_path', '', false) into v_dummy; ++ ELSE ++ EXECUTE 'SET search_path = ' || v_src_path_old; ++ END IF; ++ -- RAISE NOTICE 'DEBUG tableddl: reset search_path back to ***%***', v_src_path_old; ++ ++ -- return the ddl ++ RETURN v_table_ddl; ++ END; ++$$; + +--- DROP FUNCTION clone_schema(text, text, boolean, boolean); + ++-- Function: clone_schema(text, text, boolean, boolean, boolean) ++-- DROP FUNCTION clone_schema(text, text, boolean, boolean, boolean); ++-- DROP FUNCTION IF EXISTS public.clone_schema(text, text, boolean, boolean); ++ ++DROP FUNCTION IF EXISTS public.clone_schema(text, text, cloneparms[]); + CREATE OR REPLACE FUNCTION public.clone_schema( + source_schema text, + dest_schema text, +- include_recs boolean, +- ddl_only boolean) ++ VARIADIC arr public.cloneparms[] DEFAULT '{{}}':: public.cloneparms[]) + RETURNS void AS + $BODY$ + + -- This function will clone all sequences, tables, data, views & functions from any existing schema to a new one + -- SAMPLE CALL: +--- SELECT clone_schema('public', 'new_schema', True, False); ++-- SELECT clone_schema('sample', 'sample_clone2'); + + DECLARE + src_oid oid; +@@ -32,20 +600,37 @@ + object text; + buffer text; + buffer2 text; ++ buffer3 text; + srctbl text; ++ aname text; + default_ text; + column_ text; + qry text; + ix_old_name text; + ix_new_name text; ++ relpersist text; ++ udt_name text; ++ udt_schema text; ++ bRelispart bool; ++ bChild bool; ++ relknd text; ++ data_type text; ++ ocomment text; ++ adef text; + dest_qry text; + v_def text; ++ part_range text; + src_path_old text; ++ src_path_new text; + aclstr text; ++ -- issue#80 initialize arrays properly ++ tblarray text[] := '{{}}'; ++ tblarray2 text[] := '{{}}'; ++ tblarray3 text[] := '{{}}'; ++ tblelement text; + grantor text; + grantee text; + privs text; +- records_count bigint; + seqval bigint; + sq_last_value bigint; + sq_max_value bigint; +@@ -53,16 +638,28 @@ + sq_increment_by bigint; + sq_min_value bigint; + sq_cache_value bigint; +- sq_is_called boolean; ++ sq_is_called boolean := True; + sq_is_cycled boolean; ++ is_prokind boolean; ++ abool boolean; + sq_data_type text; + sq_cycled char(10); ++ sq_owned text; ++ sq_version text; ++ sq_server_version text; ++ sq_server_version_num integer; ++ bWindows boolean; + arec RECORD; + cnt integer; ++ cnt1 integer; + cnt2 integer; +- seq_cnt integer; ++ cnt3 integer; ++ cnt4 integer; + pos integer; ++ tblscopied integer := 0; ++ l_child integer; + action text := 'N/A'; ++ tblname text; + v_ret text; + v_diag1 text; + v_diag2 text; +@@ -70,48 +667,209 @@ + v_diag4 text; + v_diag5 text; + v_diag6 text; ++ v_dummy text; ++ spath text; ++ spath_tmp text; ++ -- issue#86 fix ++ isGenerated text; ++ ++ -- issue#91 fix ++ tblowner text; ++ func_owner text; ++ func_name text; ++ func_args text; ++ func_argno integer; ++ view_owner text; ++ ++ -- issue#92 ++ calleruser text; ++ ++ -- issue#94 ++ bData boolean := False; ++ bDDLOnly boolean := False; ++ bVerbose boolean := False; ++ bDebug boolean := False; ++ bNoACL boolean := False; ++ bNoOwner boolean := False; ++ arglen integer; ++ vargs text; ++ avarg public.cloneparms; ++ ++ -- issue#98 ++ mvarray text[] := '{{}}'; ++ mvscopied integer := 0; ++ ++ -- issue#99 tablespaces ++ tblspace text; ++ ++ -- issue#101 ++ bFileCopy boolean := False; ++ ++ t timestamptz := clock_timestamp(); ++ r timestamptz; ++ s timestamptz; ++ lastsql text := ''; ++ v_version text := '1.19 September 07, 2023'; + + BEGIN ++ -- Make sure NOTICE are shown ++ SET client_min_messages = 'notice'; ++ RAISE NOTICE 'clone_schema version %', v_version; ++ ++ IF 'DEBUG' = ANY ($3) THEN bDebug = True; END IF; ++ IF 'VERBOSE' = ANY ($3) THEN bVerbose = True; END IF; ++ ++ -- IF bVerbose THEN RAISE NOTICE 'START: %',clock_timestamp() - t; END IF; ++ ++ arglen := array_length($3, 1); ++ IF arglen IS NULL THEN ++ -- nothing to do, so defaults are assumed ++ NULL; ++ ELSE ++ -- loop thru args ++ -- IF 'NO_TRIGGERS' = ANY ($3) ++ -- select array_to_string($3, ',', '***') INTO vargs; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: arguments=%', $3; END IF; ++ FOREACH avarg IN ARRAY $3 LOOP ++ IF bDebug THEN RAISE NOTICE 'DEBUG: arg=%', avarg; END IF; ++ IF avarg = 'DATA' THEN ++ bData = True; ++ ELSEIF avarg = 'NODATA' THEN ++ -- already set to that by default ++ bData = False; ++ ELSEIF avarg = 'DDLONLY' THEN ++ bDDLOnly = True; ++ ELSEIF avarg = 'NOACL' THEN ++ bNoACL = True; ++ ELSEIF avarg = 'NOOWNER' THEN ++ bNoOwner = True; ++ -- issue#101 fix ++ ELSEIF avarg = 'FILECOPY' THEN ++ bFileCopy = True; ++ END IF; ++ END LOOP; ++ IF bData and bDDLOnly THEN ++ RAISE WARNING 'You can only specify DDLONLY or DATA, but not both.'; ++ RETURN; ++ END IF; ++ END IF; ++ ++ -- Get server version info to handle certain things differently based on the version. ++ SELECT setting INTO sq_server_version ++ FROM pg_settings ++ WHERE name = 'server_version'; ++ SELECT version() INTO sq_version; ++ ++ IF POSITION('compiled by Visual C++' IN sq_version) > 0 THEN ++ bWindows = True; ++ RAISE NOTICE 'Windows: %', sq_version; ++ ELSE ++ bWindows = False; ++ RAISE NOTICE 'Linux: %', sq_version; ++ END IF; ++ SELECT setting INTO sq_server_version_num ++ FROM pg_settings ++ WHERE name = 'server_version_num'; ++ ++ IF sq_server_version_num < 100000 THEN ++ IF sq_server_version_num > 90600 THEN ++ RAISE WARNING 'Server Version:% Number:% PG Versions older than v10 are not supported. Will try however for PG 9.6...', sq_server_version, sq_server_version_num; ++ ELSE ++ RAISE WARNING 'Server Version:% Number:% PG Versions older than v10 are not supported. You need to be at minimum version 9.6 to at least try', sq_server_version, sq_server_version_num; ++ RETURN; ++ END IF; ++ END IF; + + -- Check that source_schema exists + SELECT oid INTO src_oid +- FROM pg_namespace +- WHERE nspname = quote_ident(source_schema); ++ FROM pg_namespace ++ WHERE nspname = quote_ident(source_schema); ++ + IF NOT FOUND + THEN +- RAISE NOTICE 'source schema % does not exist!', source_schema; ++ RAISE NOTICE ' source schema % does not exist!', source_schema; + RETURN ; + END IF; + ++ -- Check for case-sensitive target schemas and reject them for now. ++ SELECT lower(dest_schema) = dest_schema INTO abool; ++ IF not abool THEN ++ RAISE NOTICE 'Case-sensitive target schemas are not supported at this time.'; ++ RETURN; ++ END IF; ++ + -- Check that dest_schema does not yet exist + PERFORM nspname +- FROM pg_namespace +- WHERE nspname = quote_ident(dest_schema); ++ FROM pg_namespace ++ WHERE nspname = quote_ident(dest_schema); ++ + IF FOUND + THEN +- RAISE NOTICE 'dest schema % already exists!', dest_schema; ++ RAISE NOTICE ' dest schema % already exists!', dest_schema; + RETURN ; + END IF; +- IF ddl_only and include_recs THEN ++ IF bDDLOnly and bData THEN + RAISE WARNING 'You cannot specify to clone data and generate ddl at the same time.'; + RETURN ; + END IF; + ++ -- Issue#92 ++ SELECT current_user into calleruser; ++ + -- Set the search_path to source schema. Before exiting set it back to what it was before. +- SELECT setting INTO src_path_old FROM pg_settings WHERE name='search_path'; ++ -- In order to avoid issues with the special schema name "$user" that may be ++ -- returned unquoted by some applications, we ensure it remains double quoted. ++ -- MJV FIX: #47 ++ SELECT setting INTO v_dummy FROM pg_settings WHERE name='search_path'; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: search_path=%', v_dummy; END IF; ++ ++ SELECT REPLACE(REPLACE(setting, '"$user"', '$user'), '$user', '"$user"') INTO src_path_old ++ FROM pg_settings WHERE name = 'search_path'; ++ ++ IF bDebug THEN RAISE NOTICE 'DEBUG: src_path_old=%', src_path_old; END IF; ++ + EXECUTE 'SET search_path = ' || quote_ident(source_schema) ; +- -- RAISE NOTICE 'Using source search_path=%', buffer; ++ SELECT setting INTO src_path_new FROM pg_settings WHERE name='search_path'; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: new search_path=%', src_path_new; END IF; + + -- Validate required types exist. If not, create them. +- select a.objtypecnt, b.permtypecnt INTO cnt, cnt2 FROM +- (SELECT count(*) as objtypecnt FROM pg_catalog.pg_type t LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace +- WHERE (t.typrelid = 0 OR (SELECT c.relkind = 'c' FROM pg_catalog.pg_class c WHERE c.oid = t.typrelid)) +- AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type el WHERE el.oid = t.typelem AND el.typarray = t.oid) +- AND n.nspname <> 'pg_catalog' AND n.nspname <> 'information_schema' AND pg_catalog.pg_type_is_visible(t.oid) AND pg_catalog.format_type(t.oid, NULL) = 'obj_type') a, +- (SELECT count(*) as permtypecnt FROM pg_catalog.pg_type t LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace +- WHERE (t.typrelid = 0 OR (SELECT c.relkind = 'c' FROM pg_catalog.pg_class c WHERE c.oid = t.typrelid)) +- AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type el WHERE el.oid = t.typelem AND el.typarray = t.oid) +- AND n.nspname <> 'pg_catalog' AND n.nspname <> 'information_schema' AND pg_catalog.pg_type_is_visible(t.oid) AND pg_catalog.format_type(t.oid, NULL) = 'perm_type') b; ++ SELECT a.objtypecnt, b.permtypecnt INTO cnt, cnt2 ++ FROM ( ++ SELECT count(*) AS objtypecnt ++ FROM pg_catalog.pg_type t ++ LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace ++ WHERE (t.typrelid = 0 ++ OR ( ++ SELECT c.relkind = 'c' ++ FROM pg_catalog.pg_class c ++ WHERE c.oid = t.typrelid)) ++ AND NOT EXISTS ( ++ SELECT 1 ++ FROM pg_catalog.pg_type el ++ WHERE el.oid = t.typelem ++ AND el.typarray = t.oid) ++ AND n.nspname <> 'pg_catalog' ++ AND n.nspname <> 'information_schema' ++ AND pg_catalog.pg_type_is_visible(t.oid) ++ AND pg_catalog.format_type(t.oid, NULL) = 'obj_type') a, ( ++ SELECT count(*) AS permtypecnt ++ FROM pg_catalog.pg_type t ++ LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace ++ WHERE (t.typrelid = 0 ++ OR ( ++ SELECT c.relkind = 'c' ++ FROM pg_catalog.pg_class c ++ WHERE c.oid = t.typrelid)) ++ AND NOT EXISTS ( ++ SELECT 1 ++ FROM pg_catalog.pg_type el ++ WHERE el.oid = t.typelem ++ AND el.typarray = t.oid) ++ AND n.nspname <> 'pg_catalog' ++ AND n.nspname <> 'information_schema' ++ AND pg_catalog.pg_type_is_visible(t.oid) ++ AND pg_catalog.format_type(t.oid, NULL) = 'perm_type') b; ++ + IF cnt = 0 THEN + CREATE TYPE obj_type AS ENUM ('TABLE','VIEW','COLUMN','SEQUENCE','FUNCTION','SCHEMA','DATABASE'); + END IF; +@@ -119,53 +877,148 @@ + CREATE TYPE perm_type AS ENUM ('SELECT','INSERT','UPDATE','DELETE','TRUNCATE','REFERENCES','TRIGGER','USAGE','CREATE','EXECUTE','CONNECT','TEMPORARY'); + END IF; + +- IF ddl_only THEN +- RAISE NOTICE 'Only generating DDL, not actually creating anything...'; ++ -- Issue#95 ++ SELECT pg_catalog.pg_get_userbyid(nspowner) INTO buffer FROM pg_namespace WHERE nspname = quote_ident(source_schema); ++ ++ IF bDDLOnly THEN ++ RAISE NOTICE ' Only generating DDL, not actually creating anything...'; ++ -- issue#95 ++ IF bNoOwner THEN ++ RAISE INFO 'CREATE SCHEMA %;', quote_ident(dest_schema); ++ ELSE ++ RAISE INFO 'CREATE SCHEMA % AUTHORIZATION %;', quote_ident(dest_schema), buffer; ++ END IF; ++ RAISE NOTICE 'SET search_path=%;', quote_ident(dest_schema); ++ ELSE ++ -- issue#95 ++ IF bNoOwner THEN ++ EXECUTE 'CREATE SCHEMA ' || quote_ident(dest_schema) ; ++ ELSE ++ EXECUTE 'CREATE SCHEMA ' || quote_ident(dest_schema) || ' AUTHORIZATION ' || buffer; ++ END IF; + END IF; + +- IF ddl_only THEN +- RAISE NOTICE '%', 'CREATE SCHEMA ' || quote_ident(dest_schema); ++ -- Do system table validations for subsequent system table queries ++ -- Issue#65 Fix ++ SELECT count(*) into cnt ++ FROM pg_attribute ++ WHERE attrelid = 'pg_proc'::regclass AND attname = 'prokind'; ++ ++ IF cnt = 0 THEN ++ is_prokind = False; + ELSE +- EXECUTE 'CREATE SCHEMA ' || quote_ident(dest_schema) ; ++ is_prokind = True; + END IF; + + -- MV: Create Collations + action := 'Collations'; + cnt := 0; +- FOR arec IN +- SELECT n.nspname as schemaname, a.rolname as ownername , c.collname, c.collprovider, c.collcollate as locale, +- 'CREATE COLLATION ' || quote_ident(dest_schema) || '."' || c.collname || '" (provider = ' || CASE WHEN c.collprovider = 'i' THEN 'icu' WHEN c.collprovider = 'c' THEN 'libc' ELSE '' END || ', locale = ''' || c.collcollate || ''');' as COLL_DDL +- FROM pg_collation c JOIN pg_namespace n ON (c.collnamespace = n.oid) JOIN pg_roles a ON (c.collowner = a.oid) WHERE n.nspname = quote_ident(source_schema) order by c.collname +- LOOP +- BEGIN +- cnt := cnt + 1; +- IF ddl_only THEN +- RAISE INFO '%', arec.coll_ddl; +- ELSE +- EXECUTE arec.coll_ddl; +- END IF; +- END; +- END LOOP; ++ -- Issue#96 Handle differently based on PG Versions (PG15 rely on colliculocale, not collcolocate) ++ -- perhaps use this logic instead: COALESCE(c.collcollate, c.colliculocale) AS lc_collate, COALESCE(c.collctype, c.colliculocale) AS lc_type ++ IF sq_server_version_num > 150000 THEN ++ FOR arec IN ++ SELECT n.nspname AS schemaname, a.rolname AS ownername, c.collname, c.collprovider, c.collcollate AS locale, ++ 'CREATE COLLATION ' || quote_ident(dest_schema) || '."' || c.collname || '" (provider = ' || ++ CASE WHEN c.collprovider = 'i' THEN 'icu' WHEN c.collprovider = 'c' THEN 'libc' ELSE '' END || ++ ', locale = ''' || c.colliculocale || ''');' AS COLL_DDL ++ FROM pg_collation c ++ JOIN pg_namespace n ON (c.collnamespace = n.oid) ++ JOIN pg_roles a ON (c.collowner = a.oid) ++ WHERE n.nspname = quote_ident(source_schema) ++ ORDER BY c.collname ++ LOOP ++ BEGIN ++ cnt := cnt + 1; ++ IF bDDLOnly THEN ++ RAISE INFO '%', arec.coll_ddl; ++ ELSE ++ EXECUTE arec.coll_ddl; ++ END IF; ++ END; ++ END LOOP; ++ ELSIF sq_server_version_num > 100000 THEN ++ FOR arec IN ++ SELECT n.nspname AS schemaname, a.rolname AS ownername, c.collname, c.collprovider, c.collcollate AS locale, ++ 'CREATE COLLATION ' || quote_ident(dest_schema) || '."' || c.collname || '" (provider = ' || ++ CASE WHEN c.collprovider = 'i' THEN 'icu' WHEN c.collprovider = 'c' THEN 'libc' ELSE '' END || ++ ', locale = ''' || c.collcollate || ''');' AS COLL_DDL ++ FROM pg_collation c ++ JOIN pg_namespace n ON (c.collnamespace = n.oid) ++ JOIN pg_roles a ON (c.collowner = a.oid) ++ WHERE n.nspname = quote_ident(source_schema) ++ ORDER BY c.collname ++ LOOP ++ BEGIN ++ cnt := cnt + 1; ++ IF bDDLOnly THEN ++ RAISE INFO '%', arec.coll_ddl; ++ ELSE ++ EXECUTE arec.coll_ddl; ++ END IF; ++ END; ++ END LOOP; ++ ELSE ++ -- handle 9.6 that is missing some columns in pg_collation ++ FOR arec IN ++ SELECT n.nspname AS schemaname, a.rolname AS ownername, c.collname, c.collcollate AS locale, ++ 'CREATE COLLATION ' || quote_ident(dest_schema) || '."' || c.collname || '" (provider = ' || ++ ', locale = ''' || c.collcollate || ''');' AS COLL_DDL ++ FROM pg_collation c ++ JOIN pg_namespace n ON (c.collnamespace = n.oid) ++ JOIN pg_roles a ON (c.collowner = a.oid) ++ WHERE n.nspname = quote_ident(source_schema) ++ ORDER BY c.collname ++ LOOP ++ BEGIN ++ cnt := cnt + 1; ++ IF bDDLOnly THEN ++ RAISE INFO '%', arec.coll_ddl; ++ ELSE ++ EXECUTE arec.coll_ddl; ++ END IF; ++ END; ++ END LOOP; ++ END IF; + RAISE NOTICE ' COLLATIONS cloned: %', LPAD(cnt::text, 5, ' '); + + -- MV: Create Domains + action := 'Domains'; + cnt := 0; + FOR arec IN +- SELECT n.nspname as "Schema", t.typname as "Name", pg_catalog.format_type(t.typbasetype, t.typtypmod) as "Type", +- (SELECT c.collname FROM pg_catalog.pg_collation c, pg_catalog.pg_type bt WHERE c.oid = t.typcollation AND +- bt.oid = t.typbasetype AND t.typcollation <> bt.typcollation) as "Collation", +- CASE WHEN t.typnotnull THEN 'not null' END as "Nullable", t.typdefault as "Default", +- pg_catalog.array_to_string(ARRAY(SELECT pg_catalog.pg_get_constraintdef(r.oid, true) FROM pg_catalog.pg_constraint r WHERE t.oid = r.contypid), ' ') as "Check", +- 'CREATE DOMAIN ' || quote_ident(dest_schema) || '.' || t.typname || ' AS ' || pg_catalog.format_type(t.typbasetype, t.typtypmod) || +- CASE WHEN t.typnotnull IS NOT NULL THEN ' NOT NULL ' ELSE ' ' END || CASE WHEN t.typdefault IS NOT NULL THEN 'DEFAULT ' || t.typdefault || ' ' ELSE ' ' END || +- pg_catalog.array_to_string(ARRAY(SELECT pg_catalog.pg_get_constraintdef(r.oid, true) FROM pg_catalog.pg_constraint r WHERE t.oid = r.contypid), ' ') || ';' AS DOM_DDL +- FROM pg_catalog.pg_type t LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace +- WHERE t.typtype = 'd' AND n.nspname = quote_ident(source_schema) AND pg_catalog.pg_type_is_visible(t.oid) ORDER BY 1, 2 ++ SELECT n.nspname AS "Schema", t.typname AS "Name", pg_catalog.format_type(t.typbasetype, t.typtypmod) AS "Type", ( ++ SELECT c.collname ++ FROM pg_catalog.pg_collation c, pg_catalog.pg_type bt ++ WHERE c.oid = t.typcollation ++ AND bt.oid = t.typbasetype ++ AND t.typcollation <> bt.typcollation) AS "Collation", CASE WHEN t.typnotnull THEN ++ 'not null' ++ END AS "Nullable", t.typdefault AS "Default", pg_catalog.array_to_string(ARRAY ( ++ SELECT pg_catalog.pg_get_constraintdef(r.oid, TRUE) ++ FROM pg_catalog.pg_constraint r ++ -- Issue#78 FIX: handle case-sensitive names with quote_ident() on t.typename ++ WHERE t.oid = r.contypid), ' ') AS "Check", 'CREATE DOMAIN ' || quote_ident(dest_schema) || '.' || quote_ident(t.typname) || ' AS ' || pg_catalog.format_type(t.typbasetype, t.typtypmod) || ++ CASE WHEN t.typnotnull IS NOT NULL THEN ++ ' NOT NULL ' ++ ELSE ++ ' ' ++ END || CASE WHEN t.typdefault IS NOT NULL THEN ++ 'DEFAULT ' || t.typdefault || ' ' ++ ELSE ++ ' ' ++ END || pg_catalog.array_to_string(ARRAY ( ++ SELECT pg_catalog.pg_get_constraintdef(r.oid, TRUE) ++ FROM pg_catalog.pg_constraint r ++ WHERE t.oid = r.contypid), ' ') || ';' AS DOM_DDL ++ FROM pg_catalog.pg_type t ++ LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace ++ WHERE t.typtype = 'd' ++ AND n.nspname = quote_ident(source_schema) ++ AND pg_catalog.pg_type_is_visible(t.oid) ++ ORDER BY 1, 2 + LOOP + BEGIN + cnt := cnt + 1; +- IF ddl_only THEN ++ IF bDDLOnly THEN + RAISE INFO '%', arec.dom_ddl; + ELSE + EXECUTE arec.dom_ddl; +@@ -177,36 +1030,70 @@ + -- MV: Create types + action := 'Types'; + cnt := 0; ++ lastsql = ''; + FOR arec IN +- SELECT c.relkind, n.nspname AS schemaname, t.typname AS typname, t.typcategory, CASE WHEN t.typcategory='C' THEN +- 'CREATE TYPE ' || quote_ident(dest_schema) || '.' || t.typname || ' AS (' || array_to_string(array_agg(a.attname || ' ' || pg_catalog.format_type(a.atttypid, a.atttypmod) ORDER BY c.relname, a.attnum),', ') || ');' +- WHEN t.typcategory='E' THEN +- 'CREATE TYPE ' || quote_ident(dest_schema) || '.' || t.typname || ' AS ENUM (' || REPLACE(quote_literal(array_to_string(array_agg(e.enumlabel ORDER BY e.enumsortorder),',')), ',', ''',''') || ');' +- ELSE '' END AS type_ddl FROM pg_type t JOIN pg_namespace n ON (n.oid = t.typnamespace) +- LEFT JOIN pg_enum e ON (t.oid = e.enumtypid) +- LEFT JOIN pg_class c ON (c.reltype = t.oid) LEFT JOIN pg_attribute a ON (a.attrelid = c.oid) +- WHERE n.nspname = quote_ident(source_schema) and (c.relkind IS NULL or c.relkind = 'c') and t.typcategory in ('C', 'E') group by 1,2,3,4 order by n.nspname, t.typcategory, t.typname ++ -- Fixed Issue#108:enclose double-quote roles with special characters for setting "OWNER TO" ++ -- SELECT c.relkind, n.nspname AS schemaname, t.typname AS typname, t.typcategory, pg_catalog.pg_get_userbyid(t.typowner) AS owner, CASE WHEN t.typcategory = 'C' THEN ++ SELECT c.relkind, n.nspname AS schemaname, t.typname AS typname, t.typcategory, '"' || pg_catalog.pg_get_userbyid(t.typowner) || '"' AS owner, CASE WHEN t.typcategory = 'C' THEN ++ 'CREATE TYPE ' || quote_ident(dest_schema) || '.' || t.typname || ' AS (' || array_to_string(array_agg(a.attname || ' ' || pg_catalog.format_type(a.atttypid, a.atttypmod) ++ ORDER BY c.relname, a.attnum), ', ') || ');' ++ WHEN t.typcategory = 'E' THEN ++ 'CREATE TYPE ' || quote_ident(dest_schema) || '.' || t.typname || ' AS ENUM (' || REPLACE(quote_literal(array_to_string(array_agg(e.enumlabel ORDER BY e.enumsortorder), ',')), ',', ''',''') || ');' ++ ELSE ++ '' ++ END AS type_ddl ++ FROM pg_type t ++ JOIN pg_namespace n ON (n.oid = t.typnamespace) ++ LEFT JOIN pg_enum e ON (t.oid = e.enumtypid) ++ LEFT JOIN pg_class c ON (c.reltype = t.oid) ++ LEFT JOIN pg_attribute a ON (a.attrelid = c.oid) ++ WHERE n.nspname = quote_ident(source_schema) ++ AND (c.relkind IS NULL ++ OR c.relkind = 'c') ++ AND t.typcategory IN ('C', 'E') ++ GROUP BY 1, 2, 3, 4, 5 ++ ORDER BY n.nspname, t.typcategory, t.typname ++ + LOOP + BEGIN + cnt := cnt + 1; + -- Keep composite and enum types in separate branches for fine tuning later if needed. + IF arec.typcategory = 'E' THEN +- -- RAISE NOTICE '%', arec.type_ddl; +- IF ddl_only THEN +- RAISE INFO '%', arec.type_ddl; +- ELSE +- EXECUTE arec.type_ddl; +- END IF; ++ IF bDDLOnly THEN ++ RAISE INFO '%', arec.type_ddl; ++ ++ --issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ RAISE INFO 'ALTER TYPE % OWNER TO %;', quote_ident(dest_schema) || '.' || arec.typname, arec.owner; ++ END IF; ++ ELSE ++ EXECUTE arec.type_ddl; + +- ELSEIF arec.typcategory = 'C' THEN +- -- RAISE NOTICE '%', arec.type_ddl; +- IF ddl_only THEN ++ --issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ EXECUTE 'ALTER TYPE ' || quote_ident(dest_schema) || '.' || arec.typname || ' OWNER TO ' || arec.owner; ++ END IF; ++ END IF; ++ ELSIF arec.typcategory = 'C' THEN ++ IF bDDLOnly THEN + RAISE INFO '%', arec.type_ddl; ++ --issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ RAISE INFO 'ALTER TYPE % OWNER TO %;', quote_ident(dest_schema) || '.' || arec.typname, arec.owner; ++ END IF; + ELSE + EXECUTE arec.type_ddl; ++ --issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ EXECUTE 'ALTER TYPE ' || quote_ident(dest_schema) || '.' || arec.typname || ' OWNER TO ' || arec.owner; ++ END IF; + END IF; + ELSE +- RAISE NOTICE 'Unhandled type:%-%', arec.typcategory, arec.typname; ++ RAISE NOTICE ' Unhandled type:%-%', arec.typcategory, arec.typname; + END IF; + END; + END LOOP; +@@ -214,82 +1101,361 @@ + + -- Create sequences + action := 'Sequences'; +- seq_cnt := 0; +- -- TODO: Find a way to make this sequence's owner is the correct table. +- FOR object IN +- SELECT sequence_name::text +- FROM information_schema.sequences +- WHERE sequence_schema = quote_ident(source_schema) ++ ++ cnt := 0; ++ -- fix#63 get from pg_sequences not information_schema ++ -- fix#63 take 2: get it from information_schema.sequences since we need to treat IDENTITY columns differently. ++ -- fix#95 get owner as well by joining to pg_sequences ++ -- fix#106 we can get owner info with pg_class, pg_user/pg_group, and information_schema.sequences, so we can avoid the hit to pg_sequences which is not available in 9.6 ++ FOR object, buffer IN ++ -- Fixed Issue#108: ++ -- SELECT s1.sequence_name::text, s2.sequenceowner FROM information_schema.sequences s1 JOIN pg_sequences s2 ON (s1.sequence_schema = s2.schemaname AND s1.sequence_name = s2.sequencename) AND s1.sequence_schema = quote_ident(source_schema) ++ -- SELECT s.sequence_name::text, '"' || u.usename || '"' as owner FROM information_schema.sequences s JOIN pg_class c ON (s.sequence_name = c.relname AND s.sequence_schema = c.relnamespace::regnamespace::text) JOIN pg_user u ON (c.relowner = u.usesysid) ++ -- WHERE c.relkind = 'S' AND s.sequence_schema = quote_ident(source_schema) ++ -- UNION SELECT s.sequence_name::text, g.groname as owner FROM information_schema.sequences s JOIN pg_class c ON (s.sequence_name = c.relname AND s.sequence_schema = c.relnamespace::regnamespace::text) JOIN pg_group g ON (c.relowner = g.grosysid) ++ -- WHERE c.relkind = 'S' AND s.sequence_schema = quote_ident(source_schema) ++ SELECT sequencename::text, sequenceowner FROM pg_catalog.pg_sequences WHERE schemaname = quote_ident(source_schema) + LOOP +- seq_cnt := seq_cnt + 1; +- IF ddl_only THEN ++ cnt := cnt + 1; ++ IF bDDLOnly THEN ++ -- issue#95 + RAISE INFO '%', 'CREATE SEQUENCE ' || quote_ident(dest_schema) || '.' || quote_ident(object) || ';'; ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ RAISE INFO '%', 'ALTER SEQUENCE ' || quote_ident(dest_schema) || '.' || quote_ident(object) || ' OWNER TO ' || buffer || ';'; ++ END IF; + ELSE + EXECUTE 'CREATE SEQUENCE ' || quote_ident(dest_schema) || '.' || quote_ident(object); ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ EXECUTE 'ALTER SEQUENCE ' || quote_ident(dest_schema) || '.' || quote_ident(object) || ' OWNER TO ' || buffer; ++ END IF; + END IF; + srctbl := quote_ident(source_schema) || '.' || quote_ident(object); + +- EXECUTE 'SELECT last_value, is_called +- FROM ' || quote_ident(source_schema) || '.' || quote_ident(object) || ';' +- INTO sq_last_value, sq_is_called; +- +- EXECUTE 'SELECT max_value, start_value, increment_by, min_value, cache_size, cycle, data_type +- FROM pg_catalog.pg_sequences WHERE schemaname='|| quote_literal(source_schema) || ' AND sequencename=' || quote_literal(object) || ';' +- INTO sq_max_value, sq_start_value, sq_increment_by, sq_min_value, sq_cache_value, sq_is_cycled, sq_data_type ; ++ IF sq_server_version_num < 100000 THEN ++ EXECUTE 'SELECT last_value, is_called FROM ' || quote_ident(source_schema) || '.' || quote_ident(object) || ';' INTO sq_last_value, sq_is_called; ++ EXECUTE 'SELECT maximum_value, start_value, increment, minimum_value, 1 cache_size, cycle_option, data_type ++ FROM information_schema.sequences WHERE sequence_schema='|| quote_literal(source_schema) || ' AND sequence_name=' || quote_literal(object) || ';' ++ INTO sq_max_value, sq_start_value, sq_increment_by, sq_min_value, sq_cache_value, sq_is_cycled, sq_data_type; ++ IF sq_is_cycled ++ THEN ++ sq_cycled := 'CYCLE'; ++ ELSE ++ sq_cycled := 'NO CYCLE'; ++ END IF; + +- IF sq_is_cycled +- THEN +- sq_cycled := 'CYCLE'; ++ qry := 'ALTER SEQUENCE ' || quote_ident(dest_schema) || '.' || quote_ident(object) ++ || ' INCREMENT BY ' || sq_increment_by ++ || ' MINVALUE ' || sq_min_value ++ || ' MAXVALUE ' || sq_max_value ++ -- will update current sequence value after this ++ || ' START WITH ' || sq_start_value ++ || ' RESTART ' || sq_min_value ++ || ' CACHE ' || sq_cache_value ++ || ' ' || sq_cycled || ' ;' ; + ELSE +- sq_cycled := 'NO CYCLE'; +- END IF; ++ EXECUTE 'SELECT max_value, start_value, increment_by, min_value, cache_size, cycle, data_type, COALESCE(last_value, 1) ++ FROM pg_catalog.pg_sequences WHERE schemaname='|| quote_literal(source_schema) || ' AND sequencename=' || quote_literal(object) || ';' ++ INTO sq_max_value, sq_start_value, sq_increment_by, sq_min_value, sq_cache_value, sq_is_cycled, sq_data_type, sq_last_value; ++ IF sq_is_cycled ++ THEN ++ sq_cycled := 'CYCLE'; ++ ELSE ++ sq_cycled := 'NO CYCLE'; ++ END IF; + +- qry := 'ALTER SEQUENCE ' || quote_ident(dest_schema) || '.' || quote_ident(object) +- || ' AS ' || sq_data_type +- || ' INCREMENT BY ' || sq_increment_by +- || ' MINVALUE ' || sq_min_value +- || ' MAXVALUE ' || sq_max_value +- || ' START WITH ' || sq_start_value +- || ' RESTART ' || sq_min_value +- || ' CACHE ' || sq_cache_value +- || ' ' || sq_cycled || ' ;' ; ++ qry := 'ALTER SEQUENCE ' || quote_ident(dest_schema) || '.' || quote_ident(object) ++ || ' AS ' || sq_data_type ++ || ' INCREMENT BY ' || sq_increment_by ++ || ' MINVALUE ' || sq_min_value ++ || ' MAXVALUE ' || sq_max_value ++ -- will update current sequence value after this ++ || ' START WITH ' || sq_start_value ++ || ' RESTART ' || sq_min_value ++ || ' CACHE ' || sq_cache_value ++ || ' ' || sq_cycled || ' ;' ; ++ END IF; + +- IF ddl_only THEN ++ IF bDDLOnly THEN + RAISE INFO '%', qry; + ELSE + EXECUTE qry; + END IF; + + buffer := quote_ident(dest_schema) || '.' || quote_ident(object); +- IF include_recs THEN ++ IF bData THEN + EXECUTE 'SELECT setval( ''' || buffer || ''', ' || sq_last_value || ', ' || sq_is_called || ');' ; + ELSE +- if ddl_only THEN +- RAISE INFO '%', 'SELECT setval( ''' || buffer || ''', ' || sq_start_value || ', ' || sq_is_called || ');' ; ++ if bDDLOnly THEN ++ -- fix#63 ++ -- RAISE INFO '%', 'SELECT setval( ''' || buffer || ''', ' || sq_start_value || ', ' || sq_is_called || ');' ; ++ RAISE INFO '%', 'SELECT setval( ''' || buffer || ''', ' || sq_last_value || ', ' || sq_is_called || ');' ; + ELSE +- EXECUTE 'SELECT setval( ''' || buffer || ''', ' || sq_start_value || ', ' || sq_is_called || ');' ; ++ -- fix#63 ++ -- EXECUTE 'SELECT setval( ''' || buffer || ''', ' || sq_start_value || ', ' || sq_is_called || ');' ; ++ EXECUTE 'SELECT setval( ''' || buffer || ''', ' || sq_last_value || ', ' || sq_is_called || ');' ; + END IF; + + END IF; + END LOOP; +- RAISE NOTICE ' SEQUENCES cloned: %', LPAD(seq_cnt::text, 5, ' '); ++ RAISE NOTICE ' SEQUENCES cloned: %', LPAD(cnt::text, 5, ' '); ++ + +--- Create tables ++ -- Create tables including partitioned ones (parent/children) and unlogged ones. Order by is critical since child partition range logic is dependent on it. + action := 'Tables'; +- cnt := 0; +- FOR object IN +- SELECT TABLE_NAME::text +- FROM information_schema.tables +- WHERE table_schema = quote_ident(source_schema) +- AND table_type = 'BASE TABLE' ++ SELECT setting INTO v_dummy FROM pg_settings WHERE name='search_path'; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: search_path=%', v_dummy; END IF; + ++ cnt := 0; ++ -- Issue#61 FIX: use set_config for empty string ++ -- SET search_path = ''; ++ SELECT set_config('search_path', '', false) into v_dummy; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: setting search_path to empty string:%', v_dummy; END IF; ++ -- Fix#86 add isgenerated to column list ++ -- Fix#91 add tblowner for setting the table ownership to that of the source ++ -- Fix#99 added join to pg_tablespace ++ ++ -- Handle PG versions greater than last major/minor version of PG 9.6.24 ++ IF sq_server_version_num > 90624 THEN ++ FOR tblname, relpersist, bRelispart, relknd, data_type, udt_name, udt_schema, ocomment, l_child, isGenerated, tblowner, tblspace IN ++ -- 2021-03-08 MJV #39 fix: change sql to get indicator of user-defined columns to issue warnings ++ -- select c.relname, c.relpersistence, c.relispartition, c.relkind ++ -- FROM pg_class c, pg_namespace n where n.oid = c.relnamespace and n.nspname = quote_ident(source_schema) and c.relkind in ('r','p') and ++ -- order by c.relkind desc, c.relname ++ --Fix#65 add another left join to distinguish child tables by inheritance ++ -- Fix#86 add is_generated to column select ++ -- Fix#91 add tblowner to the select ++ -- Fix#105 need a different kinda distinct to avoid retrieving a table twice in the case of a table with multiple USER-DEFINED datatypes using DISTINCT ON instead of just DISTINCT ++ --SELECT DISTINCT c.relname, c.relpersistence, c.relispartition, c.relkind, co.data_type, co.udt_name, co.udt_schema, obj_description(c.oid), i.inhrelid, ++ -- COALESCE(co.is_generated, ''), pg_catalog.pg_get_userbyid(c.relowner) as "Owner", CASE WHEN reltablespace = 0 THEN 'pg_default' ELSE ts.spcname END as tablespace ++ -- fixed #108 by enclosing owner in double quotes to avoid errors for bad characters like #.@... ++ -- SELECT DISTINCT ON (c.relname, c.relpersistence, c.relispartition, c.relkind, co.data_type) c.relname, c.relpersistence, c.relispartition, c.relkind, co.data_type, co.udt_name, co.udt_schema, obj_description(c.oid), i.inhrelid, ++ SELECT DISTINCT ON (c.relname, c.relpersistence, c.relispartition, c.relkind, co.data_type) c.relname, c.relpersistence, c.relispartition, c.relkind, co.data_type, co.udt_name, co.udt_schema, obj_description(c.oid), i.inhrelid, ++ COALESCE(co.is_generated, ''), '"' || pg_catalog.pg_get_userbyid(c.relowner) || '"' as "Owner", CASE WHEN reltablespace = 0 THEN 'pg_default' ELSE ts.spcname END as tablespace ++ FROM pg_class c ++ JOIN pg_namespace n ON (n.oid = c.relnamespace ++ AND n.nspname = quote_ident(source_schema) ++ AND c.relkind IN ('r', 'p')) ++ LEFT JOIN information_schema.columns co ON (co.table_schema = n.nspname ++ AND co.table_name = c.relname ++ AND (co.data_type = 'USER-DEFINED' OR co.is_generated = 'ALWAYS')) ++ LEFT JOIN pg_inherits i ON (c.oid = i.inhrelid) ++ -- issue#99 added join ++ LEFT JOIN pg_tablespace ts ON (c.reltablespace = ts.oid) ++ ORDER BY c.relkind DESC, c.relname + LOOP + cnt := cnt + 1; +- buffer := quote_ident(dest_schema) || '.' || quote_ident(object); +- IF ddl_only THEN +- RAISE INFO '%', 'CREATE TABLE ' || buffer || ' (LIKE ' || quote_ident(source_schema) || '.' || quote_ident(object) || ' INCLUDING ALL)'; ++ lastsql = ''; ++ IF l_child IS NULL THEN ++ bChild := False; + ELSE +- EXECUTE 'CREATE TABLE ' || buffer || ' (LIKE ' || quote_ident(source_schema) || '.' || quote_ident(object) || ' INCLUDING ALL)'; ++ bChild := True; ++ END IF; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: TABLE START --> table=% bRelispart=% relkind=% bChild=%',tblname, bRelispart, relknd, bChild; END IF; ++ ++ IF data_type = 'USER-DEFINED' THEN ++ -- RAISE NOTICE ' Table (%) has column(s) with user-defined types so using get_table_ddl() instead of CREATE TABLE LIKE construct.',tblname; ++ cnt :=cnt; ++ END IF; ++ buffer := quote_ident(dest_schema) || '.' || quote_ident(tblname); ++ buffer2 := ''; ++ IF relpersist = 'u' THEN ++ buffer2 := 'UNLOGGED '; ++ END IF; ++ IF relknd = 'r' THEN ++ IF bDDLOnly THEN ++ IF data_type = 'USER-DEFINED' THEN ++ -- FIXED #65, #67 ++ -- SELECT * INTO buffer3 FROM public.pg_get_tabledef(quote_ident(source_schema), tblname); ++ SELECT * INTO buffer3 FROM public.get_table_ddl(quote_ident(source_schema), tblname, False); ++ ++ buffer3 := REPLACE(buffer3, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.'); ++ RAISE INFO '%', buffer3; ++ -- issue#91 fix ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ RAISE INFO 'ALTER TABLE IF EXISTS % OWNER TO %;', quote_ident(dest_schema) || '.' || tblname, tblowner; ++ END IF; ++ ELSE ++ IF NOT bChild THEN ++ RAISE INFO '%', 'CREATE ' || buffer2 || 'TABLE ' || buffer || ' (LIKE ' || quote_ident(source_schema) || '.' || quote_ident(tblname) || ' INCLUDING ALL);'; ++ -- issue#91 fix ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ RAISE INFO 'ALTER TABLE IF EXISTS % OWNER TO %;', quote_ident(dest_schema) || '.' || tblname, tblowner; ++ END IF; ++ ++ -- issue#99 ++ IF tblspace <> 'pg_default' THEN ++ -- replace with user-defined tablespace ++ -- ALTER TABLE myschema.mytable SET TABLESPACE usrtblspc; ++ RAISE INFO 'ALTER TABLE IF EXISTS % SET TABLESPACE %;', quote_ident(dest_schema) || '.' || tblname, tblspace; ++ END IF; ++ ELSE ++ -- FIXED #65, #67 ++ -- SELECT * INTO buffer3 FROM public.pg_get_tabledef(quote_ident(source_schema), tblname); ++ SELECT * INTO buffer3 FROM public.get_table_ddl(quote_ident(source_schema), tblname, False); ++ buffer3 := REPLACE(buffer3, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.'); ++ RAISE INFO '%', buffer3; ++ -- issue#91 fix ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ RAISE INFO 'ALTER TABLE IF EXISTS % OWNER TO %;', quote_ident(dest_schema) || '.' || tblname, tblowner; ++ END IF; ++ END IF; ++ END IF; ++ ELSE ++ IF data_type = 'USER-DEFINED' THEN ++ -- FIXED #65, #67 ++ -- SELECT * INTO buffer3 FROM public.pg_get_tabledef(quote_ident(source_schema), tblname); ++ SELECT * INTO buffer3 FROM public.get_table_ddl(quote_ident(source_schema), tblname, False); ++ buffer3 := REPLACE(buffer3, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.'); ++ IF bDebug THEN RAISE NOTICE 'DEBUG: tabledef01:%', buffer3; END IF; ++ -- #82: Table def should be fully qualified with target schema, ++ -- so just make search path = public to handle extension types that should reside in public schema ++ v_dummy = 'public'; ++ SELECT set_config('search_path', v_dummy, false) into v_dummy; ++ EXECUTE buffer3; ++ -- issue#91 fix ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ buffer3 = 'ALTER TABLE IF EXISTS ' || quote_ident(dest_schema) || '.' || tblname || ' OWNER TO ' || tblowner; ++ lastsql = buffer3; ++ EXECUTE buffer3; ++ END IF; ++ ELSE ++ IF (NOT bChild OR bRelispart) THEN ++ buffer3 := 'CREATE ' || buffer2 || 'TABLE ' || buffer || ' (LIKE ' || quote_ident(source_schema) || '.' || quote_ident(tblname) || ' INCLUDING ALL)'; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: tabledef02:%', buffer3; END IF; ++ EXECUTE buffer3; ++ -- issue#91 fix ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ buffer3 = 'ALTER TABLE IF EXISTS ' || quote_ident(dest_schema) || '.' || quote_ident(tblname) || ' OWNER TO ' || tblowner; ++ lastsql = buffer3; ++ EXECUTE buffer3; ++ END IF; ++ ++ -- issue#99 ++ IF tblspace <> 'pg_default' THEN ++ -- replace with user-defined tablespace ++ -- ALTER TABLE myschema.mytable SET TABLESPACE usrtblspc; ++ buffer3 = 'ALTER TABLE IF EXISTS ' || quote_ident(dest_schema) || '.' || tblname || ' SET TABLESPACE ' || tblspace; ++ EXECUTE buffer3; ++ END IF; ++ ++ ELSE ++ -- FIXED #65, #67 ++ -- SELECT * INTO buffer3 FROM public.pg_get_tabledef(quote_ident(source_schema), tblname); ++ SELECT * INTO buffer3 FROM public.get_table_ddl(quote_ident(source_schema), tblname, False); ++ ++ buffer3 := REPLACE(buffer3, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.'); ++ -- set client_min_messages higher to avoid messages like this: ++ -- NOTICE: merging column "city_id" with inherited definition ++ set client_min_messages = 'WARNING'; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: tabledef03:%', buffer3; END IF; ++ EXECUTE buffer3; ++ -- issue#91 fix ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ buffer3 = 'ALTER TABLE IF EXISTS ' || quote_ident(dest_schema) || '.' || tblname || ' OWNER TO ' || tblowner; ++ lastsql = buffer3; ++ EXECUTE buffer3; ++ END IF; ++ ++ -- reset it back, only get these for inheritance-based tables ++ set client_min_messages = 'notice'; ++ END IF; ++ END IF; ++ -- Add table comment. ++ IF ocomment IS NOT NULL THEN ++ EXECUTE 'COMMENT ON TABLE ' || buffer || ' IS ' || quote_literal(ocomment); ++ END IF; ++ END IF; ++ ELSIF relknd = 'p' THEN ++ -- define parent table and assume child tables have already been created based on top level sort order. ++ -- Issue #103 Put the complex query into its own function, get_table_ddl_complex() ++ SELECT * INTO qry FROM public.get_table_ddl_complex(source_schema, dest_schema, tblname, sq_server_version_num); ++ IF bDebug THEN RAISE NOTICE 'DEBUG: tabledef04 - %', buffer; END IF; ++ ++ -- consider replacing complicated query above with this simple call to get_table_ddl()... ++ -- SELECT * INTO qry FROM public.get_table_ddl(quote_ident(source_schema), tblname, False); ++ -- qry := REPLACE(qry, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.'); ++ ++ IF bDDLOnly THEN ++ RAISE INFO '%', qry; ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ RAISE INFO 'ALTER TABLE IF EXISTS % OWNER TO %;', quote_ident(dest_schema) || '.' || quote_ident(tblname), tblowner; ++ END IF; ++ ELSE ++ -- Issue#103: we need to always set search_path priority to target schema when we execute DDL ++ IF bDebug THEN RAISE NOTICE 'DEBUG: tabledef04 context: old search path=% new search path=% current search path=%', src_path_old, src_path_new, v_dummy; END IF; ++ SELECT setting INTO spath_tmp FROM pg_settings WHERE name = 'search_path'; ++ IF spath_tmp <> dest_schema THEN ++ -- change it to target schema and don't forget to change it back after we execute the DDL ++ spath = 'SET search_path = "' || dest_schema || '"'; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: changing search_path --> %', spath; END IF; ++ EXECUTE spath; ++ SELECT setting INTO v_dummy FROM pg_settings WHERE name = 'search_path'; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: search_path changed to %', v_dummy; END IF; ++ END IF; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: tabledef04:%', qry; END IF; ++ EXECUTE qry; ++ ++ -- Issue#103 ++ -- Set search path back to what it was ++ spath = 'SET search_path = "' || spath_tmp || '"'; ++ EXECUTE spath; ++ SELECT setting INTO v_dummy FROM pg_settings WHERE name = 'search_path'; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: search_path changed back to %', v_dummy; END IF; ++ ++ -- issue#91 fix ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ buffer3 = 'ALTER TABLE IF EXISTS ' || quote_ident(dest_schema) || '.' || quote_ident(tblname) || ' OWNER TO ' || tblowner; ++ lastsql = buffer3; ++ EXECUTE buffer3; ++ END IF; ++ ++ END IF; ++ -- loop for child tables and alter them to attach to parent for specific partition method. ++ -- Issue#103 fix: only loop for the table we are currently processing, tblname! ++ FOR aname, part_range, object IN ++ SELECT quote_ident(dest_schema) || '.' || c1.relname as tablename, pg_catalog.pg_get_expr(c1.relpartbound, c1.oid) as partrange, quote_ident(dest_schema) || '.' || c2.relname as object ++ FROM pg_catalog.pg_class c1, pg_namespace n, pg_catalog.pg_inherits i, pg_class c2 ++ WHERE n.nspname = quote_ident(source_schema) AND c1.relnamespace = n.oid AND c1.relkind = 'r' ++ -- Issue#103: added this condition to only work on current partitioned table. The problem was regression testing previously only worked on one partition table clone case ++ AND c2.relname = tblname AND ++ c1.relispartition AND c1.oid=i.inhrelid AND i.inhparent = c2.oid AND c2.relnamespace = n.oid ORDER BY pg_catalog.pg_get_expr(c1.relpartbound, c1.oid) = 'DEFAULT', ++ c1.oid::pg_catalog.regclass::pg_catalog.text ++ LOOP ++ qry := 'ALTER TABLE ONLY ' || object || ' ATTACH PARTITION ' || aname || ' ' || part_range || ';'; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: %',qry; END IF; ++ -- issue#91, not sure if we need to do this for child tables ++ -- issue#95 we dont set ownership here ++ IF bDDLOnly THEN ++ RAISE INFO '%', qry; ++ IF NOT bNoOwner THEN ++ NULL; ++ END IF; ++ ELSE ++ EXECUTE qry; ++ IF NOT bNoOwner THEN ++ NULL; ++ END IF; ++ END IF; ++ END LOOP; + END IF; + + -- INCLUDING ALL creates new index names, we restore them to the old name. +@@ -300,171 +1466,975 @@ + WHERE old.schemaname = source_schema + AND new.schemaname = dest_schema + AND old.tablename = new.tablename +- AND old.tablename = object ++ AND old.tablename = tblname + AND old.indexname <> new.indexname + AND regexp_replace(old.indexdef, E'.*USING','') = regexp_replace(new.indexdef, E'.*USING','') +- ORDER BY old.indexname, new.indexname ++ ORDER BY old.indexdef, new.indexdef + LOOP +- IF ddl_only THEN ++ IF bDDLOnly THEN + RAISE INFO '%', 'ALTER INDEX ' || quote_ident(dest_schema) || '.' || quote_ident(ix_new_name) || ' RENAME TO ' || quote_ident(ix_old_name) || ';'; + ELSE +- EXECUTE 'ALTER INDEX ' || quote_ident(dest_schema) || '.' || quote_ident(ix_new_name) || ' RENAME TO ' || quote_ident(ix_old_name) || ';'; ++ -- The SELECT query above may return duplicate names when a column is ++ -- indexed twice the same manner with 2 different names. Therefore, to ++ -- avoid a 'relation "xxx" already exists' we test if the index name ++ -- is in use or free. Skipping existing index will fallback on unused ++ -- ones and every duplicate will be mapped to distinct old names. ++ IF NOT EXISTS ( ++ SELECT TRUE ++ FROM pg_indexes ++ WHERE schemaname = dest_schema ++ AND tablename = tblname ++ AND indexname = quote_ident(ix_old_name)) ++ AND EXISTS ( ++ SELECT TRUE ++ FROM pg_indexes ++ WHERE schemaname = dest_schema ++ AND tablename = tblname ++ AND indexname = quote_ident(ix_new_name)) ++ THEN ++ EXECUTE 'ALTER INDEX ' || quote_ident(dest_schema) || '.' || quote_ident(ix_new_name) || ' RENAME TO ' || quote_ident(ix_old_name) || ';'; ++ END IF; + END IF; + END LOOP; + +- records_count := 0; +- IF include_recs +- THEN ++ lastsql = ''; ++ IF bData THEN + -- Insert records from source table +- RAISE NOTICE 'Populating cloned table, %', buffer; +- EXECUTE 'INSERT INTO ' || buffer || ' SELECT * FROM ' || quote_ident(source_schema) || '.' || quote_ident(object) || ';'; +- +- -- restart the counter for PK's internal identity sequence +- EXECUTE 'SELECT count(*) FROM ' || quote_ident(dest_schema) || '.' || quote_ident(object) || ';' INTO records_count; +- FOR column_ IN +- SELECT column_name::text +- FROM information_schema.columns +- WHERE +- table_schema = dest_schema AND +- table_name = object AND +- is_identity = 'YES' +- LOOP +- EXECUTE 'ALTER TABLE ' || quote_ident(dest_schema) || '.' || quote_ident(object) || ' ALTER COLUMN ' || quote_ident(column_) || ' RESTART WITH ' || records_count + 1 || ';'; +- END LOOP; ++ ++ -- 2021-03-03 MJV FIX ++ buffer := quote_ident(dest_schema) || '.' || quote_ident(tblname); ++ ++ -- 2020/06/18 - Issue #31 fix: add "OVERRIDING SYSTEM VALUE" for IDENTITY columns marked as GENERATED ALWAYS. ++ select count(*) into cnt2 from pg_class c, pg_attribute a, pg_namespace n ++ where a.attrelid = c.oid and c.relname = quote_ident(tblname) and n.oid = c.relnamespace and n.nspname = quote_ident(source_schema) and a.attidentity = 'a'; ++ buffer3 := ''; ++ IF cnt2 > 0 THEN ++ buffer3 := ' OVERRIDING SYSTEM VALUE'; ++ END IF; ++ -- BUG for inserting rows from tables with user-defined columns ++ -- INSERT INTO sample_clone.address OVERRIDING SYSTEM VALUE SELECT * FROM sample.address; ++ -- ERROR: column "id2" is of type sample_clone.udt_myint but expression is of type udt_myint ++ ++ -- Issue#86 fix: ++ -- IF data_type = 'USER-DEFINED' THEN ++ IF bDebug THEN RAISE NOTICE 'DEBUG: includerecs branch table=% data_type=% isgenerated=% buffer3=%', tblname, data_type, isGenerated, buffer3; END IF; ++ IF data_type = 'USER-DEFINED' OR isGenerated = 'ALWAYS' THEN ++ ++ -- RAISE WARNING 'Bypassing copying rows for table (%) with user-defined data types. You must copy them manually.', tblname; ++ -- won't work --> INSERT INTO clone1.address (id2, id3, addr) SELECT cast(id2 as clone1.udt_myint), cast(id3 as clone1.udt_myint), addr FROM sample.address; ++ -- Issue#101 --> INSERT INTO clone1.address2 (id2, id3, addr) SELECT id2::text::clone1.udt_myint, id3::text::clone1.udt_myint, addr FROM sample.address; ++ ++ -- Issue#79 implementation follows ++ -- COPY sample.statuses(id, s) TO '/tmp/statuses.txt' WITH DELIMITER AS ','; ++ -- COPY sample_clone1.statuses FROM '/tmp/statuses.txt' (DELIMITER ',', NULL ''); ++ -- Issue#101 fix: use text cast to get around the problem. ++ IF bFileCopy THEN ++ IF bWindows THEN ++ buffer2 := 'COPY ' || quote_ident(source_schema) || '.' || quote_ident(tblname) || ' TO ''C:\WINDOWS\TEMP\cloneschema.tmp'' WITH DELIMITER AS '','';'; ++ tblarray2 := tblarray2 || buffer2; ++ -- Issue #81 reformat COPY command for upload ++ -- buffer2:= 'COPY ' || quote_ident(dest_schema) || '.' || quote_ident(tblname) || ' FROM ''C:\WINDOWS\TEMP\cloneschema.tmp'' (DELIMITER '','', NULL '''');'; ++ buffer2 := 'COPY ' || quote_ident(dest_schema) || '.' || quote_ident(tblname) || ' FROM ''C:\WINDOWS\TEMP\cloneschema.tmp'' (DELIMITER '','', NULL ''\N'', FORMAT CSV);'; ++ tblarray2 := tblarray2 || buffer2; ++ ELSE ++ buffer2 := 'COPY ' || quote_ident(source_schema) || '.' || quote_ident(tblname) || ' TO ''/tmp/cloneschema.tmp'' WITH DELIMITER AS '','';'; ++ tblarray2 := tblarray2 || buffer2; ++ -- Issue #81 reformat COPY command for upload ++ -- buffer2 := 'COPY ' || quote_ident(dest_schema) || '.' || quote_ident(tblname) || ' FROM ''/tmp/cloneschema.tmp'' (DELIMITER '','', NULL '''');'; ++ -- works--> COPY sample.timestamptbl2 FROM '/tmp/cloneschema.tmp' WITH (DELIMITER ',', NULL '\N', FORMAT CSV) ; ++ buffer2 := 'COPY ' || quote_ident(dest_schema) || '.' || quote_ident(tblname) || ' FROM ''/tmp/cloneschema.tmp'' (DELIMITER '','', NULL ''\N'', FORMAT CSV);'; ++ tblarray2 := tblarray2 || buffer2; ++ END IF; ++ ELSE ++ -- Issue#101: assume direct copy with text cast, add to separate array ++ SELECT * INTO buffer3 FROM public.get_insert_stmt_ddl(quote_ident(source_schema), quote_ident(dest_schema), quote_ident(tblname), True); ++ tblarray3 := tblarray3 || buffer3; ++ END IF; ++ ELSE ++ -- bypass child tables since we populate them when we populate the parents ++ IF bDebug THEN RAISE NOTICE 'DEBUG: tblname=% bRelispart=% relknd=% l_child=% bChild=%', tblname, bRelispart, relknd, l_child, bChild; END IF; ++ IF NOT bRelispart AND NOT bChild THEN ++ -- Issue#75: Must defer population of tables until child tables have been added to parents ++ -- Issue#101 Offer alternative of copy to/from file. Although originally intended for tables with UDTs, it is now expanded to handle all cases for performance improvement perhaps for large tables. ++ -- Issue#106 buffer3 shouldn't be in the mix ++ -- revisited: buffer3 should be in play for PG versions that handle IDENTITIES ++ buffer2 := 'INSERT INTO ' || buffer || buffer3 || ' SELECT * FROM ' || quote_ident(source_schema) || '.' || quote_ident(tblname) || ';'; ++ -- buffer2 := 'INSERT INTO ' || buffer || ' SELECT * FROM ' || quote_ident(source_schema) || '.' || quote_ident(tblname) || ';'; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: buffer2=%',buffer2; END IF; ++ IF bFileCopy THEN ++ tblarray2:= tblarray2 || buffer2; ++ ELSE ++ tblarray := tblarray || buffer2; ++ END IF; ++ END IF; ++ END IF; + END IF; + +- SET search_path = ''; ++ -- Issue#61 FIX: use set_config for empty string ++ -- SET search_path = ''; ++ SELECT set_config('search_path', '', false) into v_dummy; ++ + FOR column_, default_ IN + SELECT column_name::text, +- REPLACE(column_default::text, source_schema, dest_schema) +- FROM information_schema.COLUMNS +- WHERE table_schema = source_schema +- AND TABLE_NAME = object +- AND column_default LIKE 'nextval(%' || quote_ident(source_schema) || '%::regclass)' ++ REPLACE(column_default::text, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.') ++ FROM information_schema.COLUMNS ++ WHERE table_schema = source_schema ++ AND TABLE_NAME = tblname ++ AND column_default LIKE 'nextval(%' || quote_ident(source_schema) || '%::regclass)' + LOOP +- IF ddl_only THEN ++ -- Issue#78 FIX: handle case-sensitive names with quote_ident() on column name ++ buffer2 = 'ALTER TABLE ' || buffer || ' ALTER COLUMN ' || quote_ident(column_) || ' SET DEFAULT ' || default_ || ';'; ++ IF bDDLOnly THEN + -- May need to come back and revisit this since previous sql will not return anything since no schema as created! +- RAISE INFO '%', 'ALTER TABLE ' || buffer || ' ALTER COLUMN ' || column_ || ' SET DEFAULT ' || default_ || ';'; ++ RAISE INFO '%', buffer2; + ELSE +- EXECUTE 'ALTER TABLE ' || buffer || ' ALTER COLUMN ' || column_ || ' SET DEFAULT ' || default_; ++ EXECUTE buffer2; + END IF; + END LOOP; +- EXECUTE 'SET search_path = ' || quote_ident(source_schema) ; + ++ EXECUTE 'SET search_path = ' || quote_ident(source_schema) ; + END LOOP; +- RAISE NOTICE ' TABLES cloned: %', LPAD(cnt::text, 5, ' '); +- +- -- add FK constraint +- action := 'FK Constraints'; +- cnt := 0; +- SET search_path = ''; +- FOR qry IN +- SELECT 'ALTER TABLE ' || quote_ident(dest_schema) || '.' || quote_ident(rn.relname) +- || ' ADD CONSTRAINT ' || quote_ident(ct.conname) || ' ' || REPLACE(pg_get_constraintdef(ct.oid), 'REFERENCES ' ||quote_ident(source_schema), 'REFERENCES ' || quote_ident(dest_schema)) || ';' +- FROM pg_constraint ct +- JOIN pg_class rn ON rn.oid = ct.conrelid +- WHERE connamespace = src_oid +- AND rn.relkind = 'r' +- AND ct.contype = 'f' ++ ELSE ++ -- Handle 9.6 versions 90600 ++ FOR tblname, relpersist, relknd, data_type, udt_name, udt_schema, ocomment, l_child, isGenerated, tblowner, tblspace IN ++ -- 2021-03-08 MJV #39 fix: change sql to get indicator of user-defined columns to issue warnings ++ -- select c.relname, c.relpersistence, c.relispartition, c.relkind ++ -- FROM pg_class c, pg_namespace n where n.oid = c.relnamespace and n.nspname = quote_ident(source_schema) and c.relkind in ('r','p') and ++ -- order by c.relkind desc, c.relname ++ --Fix#65 add another left join to distinguish child tables by inheritance ++ -- Fix#86 add is_generated to column select ++ -- Fix#91 add tblowner to the select ++ -- Fix#105 need a different kinda distinct to avoid retrieving a table twice in the case of a table with multiple USER-DEFINED datatypes using DISTINCT ON instead of just DISTINCT ++ -- Fixed Issue#108: double quote roles to avoid problems with special characters in OWNER TO statements ++ --SELECT DISTINCT c.relname, c.relpersistence, c.relispartition, c.relkind, co.data_type, co.udt_name, co.udt_schema, obj_description(c.oid), i.inhrelid, ++ -- COALESCE(co.is_generated, ''), pg_catalog.pg_get_userbyid(c.relowner) as "Owner", CASE WHEN reltablespace = 0 THEN 'pg_default' ELSE ts.spcname END as tablespace ++ -- SELECT DISTINCT ON (c.relname, c.relpersistence, c.relkind, co.data_type) c.relname, c.relpersistence, c.relkind, co.data_type, co.udt_name, co.udt_schema, obj_description(c.oid), i.inhrelid, ++ -- COALESCE(co.is_generated, ''), pg_catalog.pg_get_userbyid(c.relowner) as "Owner", CASE WHEN reltablespace = 0 THEN 'pg_default' ELSE ts.spcname END as tablespace ++ SELECT DISTINCT ON (c.relname, c.relpersistence, c.relkind, co.data_type) c.relname, c.relpersistence, c.relkind, co.data_type, co.udt_name, co.udt_schema, obj_description(c.oid), i.inhrelid, ++ COALESCE(co.is_generated, ''), '"' || pg_catalog.pg_get_userbyid(c.relowner) || '"' as "Owner", CASE WHEN reltablespace = 0 THEN 'pg_default' ELSE ts.spcname END as tablespace ++ FROM pg_class c ++ JOIN pg_namespace n ON (n.oid = c.relnamespace ++ AND n.nspname = quote_ident(source_schema) ++ AND c.relkind IN ('r', 'p')) ++ LEFT JOIN information_schema.columns co ON (co.table_schema = n.nspname ++ AND co.table_name = c.relname ++ AND (co.data_type = 'USER-DEFINED' OR co.is_generated = 'ALWAYS')) ++ LEFT JOIN pg_inherits i ON (c.oid = i.inhrelid) ++ -- issue#99 added join ++ LEFT JOIN pg_tablespace ts ON (c.reltablespace = ts.oid) ++ ORDER BY c.relkind DESC, c.relname + LOOP + cnt := cnt + 1; +- IF ddl_only THEN +- RAISE INFO '%', qry; ++ IF l_child IS NULL THEN ++ bChild := False; + ELSE +- EXECUTE qry; ++ bChild := True; + END IF; +- END LOOP; +- EXECUTE 'SET search_path = ' || quote_ident(source_schema) ; +- RAISE NOTICE ' FKEYS cloned: %', LPAD(cnt::text, 5, ' '); +- +--- Create views +- action := 'Views'; +- cnt := 0; +- FOR object IN +- SELECT table_name::text, +- view_definition +- FROM information_schema.views +- WHERE table_schema = quote_ident(source_schema) +- +- LOOP +- cnt := cnt + 1; +- buffer := quote_ident(dest_schema) || '.' || quote_ident(object); +- SELECT view_definition INTO v_def +- FROM information_schema.views +- WHERE table_schema = quote_ident(source_schema) +- AND table_name = quote_ident(object); ++ IF bDebug THEN RAISE NOTICE 'DEBUG: TABLE START --> table=% bRelispart=NA relkind=% bChild=%',tblname, relknd, bChild; END IF; + +- IF ddl_only THEN +- RAISE INFO '%', 'CREATE OR REPLACE VIEW ' || buffer || ' AS ' || v_def || ';' ; +- ELSE +- EXECUTE 'CREATE OR REPLACE VIEW ' || buffer || ' AS ' || v_def || ';' ; ++ IF data_type = 'USER-DEFINED' THEN ++ -- RAISE NOTICE ' Table (%) has column(s) with user-defined types so using get_table_ddl() instead of CREATE TABLE LIKE construct.',tblname; ++ cnt :=cnt; + END IF; +- END LOOP; +- RAISE NOTICE ' VIEWS cloned: %', LPAD(cnt::text, 5, ' '); +- +- -- Create Materialized views +- action := 'Mat. Views'; +- cnt := 0; +- FOR object IN +- SELECT matviewname::text, +- definition +- FROM pg_catalog.pg_matviews +- WHERE schemaname = quote_ident(source_schema) +- +- LOOP +- cnt := cnt + 1; +- buffer := dest_schema || '.' || quote_ident(object); +- SELECT replace(definition,';','') INTO v_def +- FROM pg_catalog.pg_matviews +- WHERE schemaname = quote_ident(source_schema) +- AND matviewname = quote_ident(object); +- +- IF include_recs THEN +- EXECUTE 'CREATE MATERIALIZED VIEW ' || buffer || ' AS ' || v_def || ';' ; +- ELSE +- IF ddl_only THEN +- RAISE INFO '%', 'CREATE MATERIALIZED VIEW ' || buffer || ' AS ' || v_def || ' WITH NO DATA;' ; +- ELSE +- EXECUTE 'CREATE MATERIALIZED VIEW ' || buffer || ' AS ' || v_def || ' WITH NO DATA;' ; +- END IF; ++ buffer := quote_ident(dest_schema) || '.' || quote_ident(tblname); ++ buffer2 := ''; ++ IF relpersist = 'u' THEN ++ buffer2 := 'UNLOGGED '; ++ END IF; ++ IF relknd = 'r' THEN ++ IF bDDLOnly THEN ++ IF data_type = 'USER-DEFINED' THEN ++ -- FIXED #65, #67 ++ -- SELECT * INTO buffer3 FROM public.pg_get_tabledef(quote_ident(source_schema), tblname); ++ SELECT * INTO buffer3 FROM public.get_table_ddl(quote_ident(source_schema), tblname, False); ++ ++ buffer3 := REPLACE(buffer3, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.'); ++ RAISE INFO '%', buffer3; ++ -- issue#91 fix ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ RAISE INFO 'ALTER TABLE IF EXISTS % OWNER TO %;', quote_ident(dest_schema) || '.' || tblname, tblowner; ++ END IF; ++ ELSE ++ IF NOT bChild THEN ++ RAISE INFO '%', 'CREATE ' || buffer2 || 'TABLE ' || buffer || ' (LIKE ' || quote_ident(source_schema) || '.' || quote_ident(tblname) || ' INCLUDING ALL);'; ++ -- issue#91 fix ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ RAISE INFO 'ALTER TABLE IF EXISTS % OWNER TO %;', quote_ident(dest_schema) || '.' || tblname, tblowner; ++ END IF; + +- END IF; ++ -- issue#99 ++ IF tblspace <> 'pg_default' THEN ++ -- replace with user-defined tablespace ++ -- ALTER TABLE myschema.mytable SET TABLESPACE usrtblspc; ++ RAISE INFO 'ALTER TABLE IF EXISTS % SET TABLESPACE %;', quote_ident(dest_schema) || '.' || tblname, tblspace; ++ END IF; ++ ELSE ++ -- FIXED #65, #67 ++ -- SELECT * INTO buffer3 FROM public.pg_get_tabledef(quote_ident(source_schema), tblname); ++ SELECT * INTO buffer3 FROM public.get_table_ddl(quote_ident(source_schema), tblname, False); ++ buffer3 := REPLACE(buffer3, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.'); ++ RAISE INFO '%', buffer3; ++ -- issue#91 fix ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ RAISE INFO 'ALTER TABLE IF EXISTS % OWNER TO %;', quote_ident(dest_schema) || '.' || tblname, tblowner; ++ END IF; ++ END IF; ++ END IF; ++ ELSE ++ IF data_type = 'USER-DEFINED' THEN ++ -- FIXED #65, #67 ++ -- SELECT * INTO buffer3 FROM public.pg_get_tabledef(quote_ident(source_schema), tblname); ++ SELECT * INTO buffer3 FROM public.get_table_ddl(quote_ident(source_schema), tblname, False); ++ buffer3 := REPLACE(buffer3, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.'); ++ IF bDebug THEN RAISE NOTICE 'DEBUG: tabledef01:%', buffer3; END IF; ++ -- #82: Table def should be fully qualified with target schema, ++ -- so just make search path = public to handle extension types that should reside in public schema ++ v_dummy = 'public'; ++ SELECT set_config('search_path', v_dummy, false) into v_dummy; ++ EXECUTE buffer3; ++ -- issue#91 fix ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ buffer3 = 'ALTER TABLE IF EXISTS ' || quote_ident(dest_schema) || '.' || tblname || ' OWNER TO ' || tblowner; ++ lastsql = buffer3; ++ EXECUTE buffer3; ++ END IF; ++ ELSE ++ IF (NOT bChild) THEN ++ buffer3 := 'CREATE ' || buffer2 || 'TABLE ' || buffer || ' (LIKE ' || quote_ident(source_schema) || '.' || quote_ident(tblname) || ' INCLUDING ALL)'; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: tabledef02:%', buffer3; END IF; ++ EXECUTE buffer3; ++ -- issue#91 fix ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ buffer3 = 'ALTER TABLE IF EXISTS ' || quote_ident(dest_schema) || '.' || quote_ident(tblname) || ' OWNER TO ' || tblowner; ++ lastsql = buffer3; ++ EXECUTE buffer3; ++ END IF; ++ ++ -- issue#99 ++ IF tblspace <> 'pg_default' THEN ++ -- replace with user-defined tablespace ++ -- ALTER TABLE myschema.mytable SET TABLESPACE usrtblspc; ++ buffer3 = 'ALTER TABLE IF EXISTS ' || quote_ident(dest_schema) || '.' || tblname || ' SET TABLESPACE ' || tblspace; ++ EXECUTE buffer3; ++ END IF; ++ ++ ELSE ++ -- FIXED #65, #67 ++ -- SELECT * INTO buffer3 FROM public.pg_get_tabledef(quote_ident(source_schema), tblname); ++ SELECT * INTO buffer3 FROM public.get_table_ddl(quote_ident(source_schema), tblname, False); ++ ++ buffer3 := REPLACE(buffer3, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.'); ++ -- set client_min_messages higher to avoid messages like this: ++ -- NOTICE: merging column "city_id" with inherited definition ++ set client_min_messages = 'WARNING'; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: tabledef03:%', buffer3; END IF; ++ EXECUTE buffer3; ++ -- issue#91 fix ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ buffer3 = 'ALTER TABLE IF EXISTS ' || quote_ident(dest_schema) || '.' || tblname || ' OWNER TO ' || tblowner; ++ lastsql = buffer3; ++ EXECUTE buffer3; ++ END IF; ++ ++ -- reset it back, only get these for inheritance-based tables ++ set client_min_messages = 'notice'; ++ END IF; ++ END IF; ++ -- Add table comment. ++ IF ocomment IS NOT NULL THEN ++ EXECUTE 'COMMENT ON TABLE ' || buffer || ' IS ' || quote_literal(ocomment); ++ END IF; ++ END IF; ++ ELSIF relknd = 'p' THEN ++ -- define parent table and assume child tables have already been created based on top level sort order. ++ -- Issue #103 Put the complex query into its own function, get_table_ddl_complex() ++ SELECT * INTO qry FROM public.get_table_ddl_complex(source_schema, dest_schema, tblname, sq_server_version_num); ++ IF bDebug THEN RAISE NOTICE 'DEBUG: tabledef04 - %', buffer; END IF; ++ ++ -- consider replacing complicated query above with this simple call to get_table_ddl()... ++ -- SELECT * INTO qry FROM public.get_table_ddl(quote_ident(source_schema), tblname, False); ++ -- qry := REPLACE(qry, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.'); ++ ++ IF bDDLOnly THEN ++ RAISE INFO '%', qry; ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ RAISE INFO 'ALTER TABLE IF EXISTS % OWNER TO %;', quote_ident(dest_schema) || '.' || quote_ident(tblname), tblowner; ++ END IF; ++ ELSE ++ -- Issue#103: we need to always set search_path priority to target schema when we execute DDL ++ IF bDebug THEN RAISE NOTICE 'DEBUG: tabledef04 context: old search path=% new search path=% current search path=%', src_path_old, src_path_new, v_dummy; END IF; ++ SELECT setting INTO spath_tmp FROM pg_settings WHERE name = 'search_path'; ++ IF spath_tmp <> dest_schema THEN ++ -- change it to target schema and don't forget to change it back after we execute the DDL ++ spath = 'SET search_path = "' || dest_schema || '"'; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: changing search_path --> %', spath; END IF; ++ EXECUTE spath; ++ SELECT setting INTO v_dummy FROM pg_settings WHERE name = 'search_path'; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: search_path changed to %', v_dummy; END IF; ++ END IF; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: tabledef04:%', qry; END IF; ++ EXECUTE qry; ++ ++ -- Issue#103 ++ -- Set search path back to what it was ++ spath = 'SET search_path = "' || spath_tmp || '"'; ++ EXECUTE spath; ++ SELECT setting INTO v_dummy FROM pg_settings WHERE name = 'search_path'; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: search_path changed back to %', v_dummy; END IF; ++ ++ -- issue#91 fix ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ buffer3 = 'ALTER TABLE IF EXISTS ' || quote_ident(dest_schema) || '.' || quote_ident(tblname) || ' OWNER TO ' || tblowner; ++ EXECUTE buffer3; ++ END IF; ++ ++ END IF; ++ -- loop for child tables and alter them to attach to parent for specific partition method. ++ -- Issue#103 fix: only loop for the table we are currently processing, tblname! ++ FOR aname, part_range, object IN ++ SELECT quote_ident(dest_schema) || '.' || c1.relname as tablename, pg_catalog.pg_get_expr(c1.relpartbound, c1.oid) as partrange, quote_ident(dest_schema) || '.' || c2.relname as object ++ FROM pg_catalog.pg_class c1, pg_namespace n, pg_catalog.pg_inherits i, pg_class c2 ++ WHERE n.nspname = quote_ident(source_schema) AND c1.relnamespace = n.oid AND c1.relkind = 'r' ++ -- Issue#103: added this condition to only work on current partitioned table. The problem was regression testing previously only worked on one partition table clone case ++ AND c2.relname = tblname AND ++ c1.relispartition AND c1.oid=i.inhrelid AND i.inhparent = c2.oid AND c2.relnamespace = n.oid ORDER BY pg_catalog.pg_get_expr(c1.relpartbound, c1.oid) = 'DEFAULT', ++ c1.oid::pg_catalog.regclass::pg_catalog.text ++ LOOP ++ qry := 'ALTER TABLE ONLY ' || object || ' ATTACH PARTITION ' || aname || ' ' || part_range || ';'; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: %',qry; END IF; ++ -- issue#91, not sure if we need to do this for child tables ++ -- issue#95 we dont set ownership here ++ IF bDDLOnly THEN ++ RAISE INFO '%', qry; ++ IF NOT bNoOwner THEN ++ NULL; ++ END IF; ++ ELSE ++ EXECUTE qry; ++ IF NOT bNoOwner THEN ++ NULL; ++ END IF; ++ END IF; ++ END LOOP; ++ END IF; ++ ++ -- INCLUDING ALL creates new index names, we restore them to the old name. ++ -- There should be no conflicts since they live in different schemas ++ FOR ix_old_name, ix_new_name IN ++ SELECT old.indexname, new.indexname ++ FROM pg_indexes old, pg_indexes new ++ WHERE old.schemaname = source_schema ++ AND new.schemaname = dest_schema ++ AND old.tablename = new.tablename ++ AND old.tablename = tblname ++ AND old.indexname <> new.indexname ++ AND regexp_replace(old.indexdef, E'.*USING','') = regexp_replace(new.indexdef, E'.*USING','') ++ ORDER BY old.indexdef, new.indexdef ++ LOOP ++ lastsql = ''; ++ IF bDDLOnly THEN ++ RAISE INFO '%', 'ALTER INDEX ' || quote_ident(dest_schema) || '.' || quote_ident(ix_new_name) || ' RENAME TO ' || quote_ident(ix_old_name) || ';'; ++ ELSE ++ -- The SELECT query above may return duplicate names when a column is ++ -- indexed twice the same manner with 2 different names. Therefore, to ++ -- avoid a 'relation "xxx" already exists' we test if the index name ++ -- is in use or free. Skipping existing index will fallback on unused ++ -- ones and every duplicate will be mapped to distinct old names. ++ IF NOT EXISTS ( ++ SELECT TRUE ++ FROM pg_indexes ++ WHERE schemaname = dest_schema ++ AND tablename = tblname ++ AND indexname = quote_ident(ix_old_name)) ++ AND EXISTS ( ++ SELECT TRUE ++ FROM pg_indexes ++ WHERE schemaname = dest_schema ++ AND tablename = tblname ++ AND indexname = quote_ident(ix_new_name)) ++ THEN ++ EXECUTE 'ALTER INDEX ' || quote_ident(dest_schema) || '.' || quote_ident(ix_new_name) || ' RENAME TO ' || quote_ident(ix_old_name) || ';'; ++ END IF; ++ END IF; ++ END LOOP; + ++ IF bData THEN ++ -- Insert records from source table ++ ++ -- 2021-03-03 MJV FIX ++ buffer := quote_ident(dest_schema) || '.' || quote_ident(tblname); ++ ++ -- Issue#86 fix: ++ -- IF data_type = 'USER-DEFINED' THEN ++ IF bDebug THEN RAISE NOTICE 'DEBUG: includerecs branch table=% data_type=% isgenerated=%', tblname, data_type, isGenerated; END IF; ++ IF data_type = 'USER-DEFINED' OR isGenerated = 'ALWAYS' THEN ++ ++ -- RAISE WARNING 'Bypassing copying rows for table (%) with user-defined data types. You must copy them manually.', tblname; ++ -- won't work --> INSERT INTO clone1.address (id2, id3, addr) SELECT cast(id2 as clone1.udt_myint), cast(id3 as clone1.udt_myint), addr FROM sample.address; ++ -- Issue#101 --> INSERT INTO clone1.address2 (id2, id3, addr) SELECT id2::text::clone1.udt_myint, id3::text::clone1.udt_myint, addr FROM sample.address; ++ ++ -- Issue#79 implementation follows ++ -- COPY sample.statuses(id, s) TO '/tmp/statuses.txt' WITH DELIMITER AS ','; ++ -- COPY sample_clone1.statuses FROM '/tmp/statuses.txt' (DELIMITER ',', NULL ''); ++ -- Issue#101 fix: use text cast to get around the problem. ++ IF bFileCopy THEN ++ IF bWindows THEN ++ buffer2 := 'COPY ' || quote_ident(source_schema) || '.' || quote_ident(tblname) || ' TO ''C:\WINDOWS\TEMP\cloneschema.tmp'' WITH DELIMITER AS '','';'; ++ tblarray2 := tblarray2 || buffer2; ++ -- Issue #81 reformat COPY command for upload ++ -- buffer2:= 'COPY ' || quote_ident(dest_schema) || '.' || quote_ident(tblname) || ' FROM ''C:\WINDOWS\TEMP\cloneschema.tmp'' (DELIMITER '','', NULL '''');'; ++ buffer2 := 'COPY ' || quote_ident(dest_schema) || '.' || quote_ident(tblname) || ' FROM ''C:\WINDOWS\TEMP\cloneschema.tmp'' (DELIMITER '','', NULL ''\N'', FORMAT CSV);'; ++ tblarray2 := tblarray2 || buffer2; ++ ELSE ++ buffer2 := 'COPY ' || quote_ident(source_schema) || '.' || quote_ident(tblname) || ' TO ''/tmp/cloneschema.tmp'' WITH DELIMITER AS '','';'; ++ tblarray2 := tblarray2 || buffer2; ++ -- Issue #81 reformat COPY command for upload ++ -- buffer2 := 'COPY ' || quote_ident(dest_schema) || '.' || quote_ident(tblname) || ' FROM ''/tmp/cloneschema.tmp'' (DELIMITER '','', NULL '''');'; ++ -- works--> COPY sample.timestamptbl2 FROM '/tmp/cloneschema.tmp' WITH (DELIMITER ',', NULL '\N', FORMAT CSV) ; ++ buffer2 := 'COPY ' || quote_ident(dest_schema) || '.' || quote_ident(tblname) || ' FROM ''/tmp/cloneschema.tmp'' (DELIMITER '','', NULL ''\N'', FORMAT CSV);'; ++ tblarray2 := tblarray2 || buffer2; ++ END IF; ++ ELSE ++ -- Issue#101: assume direct copy with text cast, add to separate array ++ SELECT * INTO buffer3 FROM public.get_insert_stmt_ddl(quote_ident(source_schema), quote_ident(dest_schema), quote_ident(tblname), True); ++ tblarray3 := tblarray3 || buffer3; ++ END IF; ++ ELSE ++ -- bypass child tables since we populate them when we populate the parents ++ IF bDebug THEN RAISE NOTICE 'DEBUG: tblname=% bRelispart=NA relknd=% l_child=% bChild=%', tblname, relknd, l_child, bChild; END IF; ++ ++ IF NOT bChild THEN ++ -- Issue#75: Must defer population of tables until child tables have been added to parents ++ -- Issue#101 Offer alternative of copy to/from file. Although originally intended for tables with UDTs, it is now expanded to handle all cases for performance improvement perhaps for large tables. ++ -- buffer2 := 'INSERT INTO ' || buffer || buffer3 || ' SELECT * FROM ' || quote_ident(source_schema) || '.' || quote_ident(tblname) || ';'; ++ buffer2 := 'INSERT INTO ' || buffer || ' SELECT * FROM ' || quote_ident(source_schema) || '.' || quote_ident(tblname) || ';'; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: buffer2=%',buffer2; END IF; ++ IF bFileCopy THEN ++ tblarray2:= tblarray2 || buffer2; ++ ELSE ++ tblarray := tblarray || buffer2; ++ END IF; ++ END IF; ++ END IF; ++ END IF; ++ ++ -- Issue#61 FIX: use set_config for empty string ++ -- SET search_path = ''; ++ SELECT set_config('search_path', '', false) into v_dummy; ++ ++ FOR column_, default_ IN ++ SELECT column_name::text, ++ REPLACE(column_default::text, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.') ++ FROM information_schema.COLUMNS ++ WHERE table_schema = source_schema ++ AND TABLE_NAME = tblname ++ AND column_default LIKE 'nextval(%' || quote_ident(source_schema) || '%::regclass)' ++ LOOP ++ -- Issue#78 FIX: handle case-sensitive names with quote_ident() on column name ++ buffer2 = 'ALTER TABLE ' || buffer || ' ALTER COLUMN ' || quote_ident(column_) || ' SET DEFAULT ' || default_ || ';'; ++ IF bDDLOnly THEN ++ -- May need to come back and revisit this since previous sql will not return anything since no schema as created! ++ RAISE INFO '%', buffer2; ++ ELSE ++ EXECUTE buffer2; ++ END IF; + END LOOP; +- RAISE NOTICE ' MAT VIEWS cloned: %', LPAD(cnt::text, 5, ' '); + +--- Create functions +- action := 'Functions'; ++ EXECUTE 'SET search_path = ' || quote_ident(source_schema) ; ++ END LOOP; ++ END IF; ++ -- end of 90600 branch ++ ++ RAISE NOTICE ' TABLES cloned: %', LPAD(cnt::text, 5, ' '); ++ ++ SELECT setting INTO v_dummy FROM pg_settings WHERE name = 'search_path'; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: search_path=%', v_dummy; END IF; ++ ++ -- Assigning sequences to table columns. ++ action := 'Sequences assigning'; + cnt := 0; +- FOR func_oid IN +- SELECT oid +- FROM pg_proc +- WHERE pronamespace = src_oid ++ FOR object IN ++ SELECT sequence_name::text ++ FROM information_schema.sequences ++ WHERE sequence_schema = quote_ident(source_schema) + LOOP + cnt := cnt + 1; +- SELECT pg_get_functiondef(func_oid) INTO qry; +- SELECT replace(qry, source_schema, dest_schema) INTO dest_qry; +- IF ddl_only THEN +- RAISE INFO '%', dest_qry; ++ srctbl := quote_ident(source_schema) || '.' || quote_ident(object); ++ ++ -- Get owning column, inspired from Sadique Ali post at: ++ -- https://sadique.io/blog/2019/05/07/viewing-sequence-ownership-information-in-postgres/ ++ -- Fixed via pull request#109 ++ SELECT ' OWNED BY ' ++ || quote_ident(dest_schema) ++ || '.' ++ || quote_ident(dc.relname) ++ || '.' ++ || quote_ident(a.attname) ++ INTO sq_owned ++ FROM pg_class AS c ++ JOIN pg_namespace n ON c.relnamespace = n.oid ++ JOIN pg_depend AS d ON c.relfilenode = d.objid ++ JOIN pg_class AS dc ON ( ++ d.refobjid = dc.relfilenode ++ AND dc.relnamespace = n.oid ++ ) ++ JOIN pg_attribute AS a ON ( ++ a.attnum = d.refobjsubid ++ AND a.attrelid = d.refobjid ++ ) ++ WHERE n.nspname = quote_ident(source_schema) ++ AND c.relkind = 'S' ++ AND c.relname = object; ++ ++ IF sq_owned IS NOT NULL THEN ++ qry := 'ALTER SEQUENCE ' ++ || quote_ident(dest_schema) ++ || '.' ++ || quote_ident(object) ++ || sq_owned ++ || ';'; ++ ++ IF bDDLOnly THEN ++ RAISE NOTICE 'DEBUG: %',qry; ++ RAISE INFO '%', qry; ++ ELSE ++ EXECUTE qry; ++ END IF; ++ ++ END IF; ++ ++ END LOOP; ++ RAISE NOTICE ' SEQUENCES set: %', LPAD(cnt::text, 5, ' '); ++ ++ -- Update IDENTITY sequences to the last value, bypass 9.6 versions ++ IF sq_server_version_num > 90624 THEN ++ action := 'Identity updating'; ++ cnt := 0; ++ FOR object, sq_last_value IN ++ SELECT sequencename::text, COALESCE(last_value, -999) from pg_sequences where schemaname = quote_ident(source_schema) ++ AND NOT EXISTS ++ (select 1 from information_schema.sequences where sequence_schema = quote_ident(source_schema) and sequence_name = sequencename) ++ LOOP ++ IF sq_last_value = -999 THEN ++ continue; ++ END IF; ++ cnt := cnt + 1; ++ buffer := quote_ident(dest_schema) || '.' || quote_ident(object); ++ IF bData THEN ++ EXECUTE 'SELECT setval( ''' || buffer || ''', ' || sq_last_value || ', ' || sq_is_called || ');' ; ++ ELSE ++ if bDDLOnly THEN ++ -- fix#63 ++ RAISE INFO '%', 'SELECT setval( ''' || buffer || ''', ' || sq_last_value || ', ' || sq_is_called || ');' ; ++ ELSE ++ -- fix#63 ++ EXECUTE 'SELECT setval( ''' || buffer || ''', ' || sq_last_value || ', ' || sq_is_called || ');' ; ++ END IF; ++ END IF; ++ END LOOP; ++ -- Fixed Issue#107: set lpad from 2 to 5 ++ RAISE NOTICE ' IDENTITIES set: %', LPAD(cnt::text, 5, ' '); ++ ELSE ++ -- Fixed Issue#107: set lpad from 2 to 5 ++ RAISE NOTICE ' IDENTITIES set: %', LPAD('-1'::text, 5, ' '); ++ END IF; ++ ++ -- Issue#78 forces us to defer FKeys until the end since we previously did row copies before FKeys ++ -- add FK constraint ++ -- action := 'FK Constraints'; ++ ++ -- Issue#62: Add comments on indexes, and then removed them from here and reworked later below. ++ ++ -- Issue 90: moved functions to here, before views or MVs that might use them ++ -- Create functions ++ action := 'Functions'; ++ cnt := 0; ++ -- MJV FIX per issue# 34 ++ -- SET search_path = ''; ++ EXECUTE 'SET search_path = ' || quote_ident(source_schema) ; ++ ++ -- Fixed Issue#65 ++ -- Fixed Issue#97 ++ -- FOR func_oid IN SELECT oid FROM pg_proc WHERE pronamespace = src_oid AND prokind != 'a' ++ IF is_prokind THEN ++ FOR func_oid, func_owner, func_name, func_args, func_argno, buffer3 IN ++ SELECT p.oid, pg_catalog.pg_get_userbyid(p.proowner), p.proname, oidvectortypes(p.proargtypes), p.pronargs, ++ CASE WHEN prokind = 'p' THEN 'PROCEDURE' WHEN prokind = 'f' THEN 'FUNCTION' ELSE '' END ++ FROM pg_proc p WHERE p.pronamespace = src_oid AND p.prokind != 'a' ++ LOOP ++ cnt := cnt + 1; ++ SELECT pg_get_functiondef(func_oid) ++ INTO qry; ++ ++ SELECT replace(qry, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.') INTO dest_qry; ++ IF bDDLOnly THEN ++ RAISE INFO '%;', dest_qry; ++ -- Issue#91 Fix ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ IF func_argno = 0 THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ RAISE INFO 'ALTER % %() OWNER TO %', buffer3, quote_ident(dest_schema) || '.' || quote_ident(func_name), '"' || func_owner || '";'; ++ ELSE ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ RAISE INFO 'ALTER % % OWNER TO %', buffer3, quote_ident(dest_schema) || '.' || quote_ident(func_name) || '(' || func_args || ')', '"' || func_owner || '";'; ++ END IF; ++ END IF; ++ ELSE ++ IF bDebug THEN RAISE NOTICE 'DEBUG: %', dest_qry; END IF; ++ EXECUTE dest_qry; ++ ++ -- Issue#91 Fix ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ IF func_argno = 0 THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ dest_qry = 'ALTER ' || buffer3 || ' ' || quote_ident(dest_schema) || '.' || quote_ident(func_name) || '() OWNER TO ' || '"' || func_owner || '";'; ++ ELSE ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ dest_qry = 'ALTER ' || buffer3 || ' ' || quote_ident(dest_schema) || '.' || quote_ident(func_name) || '(' || func_args || ') OWNER TO ' || '"' || func_owner || '";'; ++ END IF; ++ END IF; ++ EXECUTE dest_qry; ++ END IF; ++ END LOOP; ++ ELSE ++ FOR func_oid IN SELECT oid ++ FROM pg_proc ++ WHERE pronamespace = src_oid AND not proisagg ++ LOOP ++ cnt := cnt + 1; ++ SELECT pg_get_functiondef(func_oid) INTO qry; ++ SELECT replace(qry, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.') INTO dest_qry; ++ IF bDDLOnly THEN ++ RAISE INFO '%;', dest_qry; ++ ELSE ++ EXECUTE dest_qry; ++ END IF; ++ END LOOP; ++ END IF; ++ ++ -- Create aggregate functions. ++ -- Fixed Issue#65 ++ -- FOR func_oid IN SELECT oid FROM pg_proc WHERE pronamespace = src_oid AND prokind = 'a' ++ IF is_prokind THEN ++ FOR func_oid IN ++ SELECT oid ++ FROM pg_proc ++ WHERE pronamespace = src_oid AND prokind = 'a' ++ LOOP ++ cnt := cnt + 1; ++ SELECT ++ 'CREATE AGGREGATE ' ++ || dest_schema ++ || '.' ++ || p.proname ++ || '(' ++ -- || format_type(a.aggtranstype, NULL) ++ -- Issue#65 Fixes for specific datatype mappings ++ || CASE WHEN format_type(a.aggtranstype, NULL) = 'double precision[]' THEN 'float8' ++ WHEN format_type(a.aggtranstype, NULL) = 'anyarray' THEN 'anyelement' ++ ELSE format_type(a.aggtranstype, NULL) END ++ || ') (sfunc = ' ++ || regexp_replace(a.aggtransfn::text, '(^|\W)' || quote_ident(source_schema) || '\.', '\1' || quote_ident(dest_schema) || '.') ++ || ', stype = ' ++ -- || format_type(a.aggtranstype, NULL) ++ -- Issue#65 Fixes for specific datatype mappings ++ || CASE WHEN format_type(a.aggtranstype, NULL) = 'double precision[]' THEN 'float8[]' ELSE format_type(a.aggtranstype, NULL) END ++ || CASE ++ WHEN op.oprname IS NULL THEN '' ++ ELSE ', sortop = ' || op.oprname ++ END ++ || CASE ++ WHEN a.agginitval IS NULL THEN '' ++ ELSE ', initcond = ''' || a.agginitval || '''' ++ END ++ || ')' ++ INTO dest_qry ++ FROM pg_proc p ++ JOIN pg_aggregate a ON a.aggfnoid = p.oid ++ LEFT JOIN pg_operator op ON op.oid = a.aggsortop ++ WHERE p.oid = func_oid; ++ ++ IF bDDLOnly THEN ++ RAISE INFO '%;', dest_qry; ++ ELSE ++ EXECUTE dest_qry; ++ END IF; ++ ++ END LOOP; ++ RAISE NOTICE ' FUNCTIONS cloned: %', LPAD(cnt::text, 5, ' '); ++ + ELSE +- EXECUTE dest_qry; ++ FOR func_oid IN SELECT oid FROM pg_proc WHERE pronamespace = src_oid AND proisagg ++ LOOP ++ cnt := cnt + 1; ++ SELECT ++ 'CREATE AGGREGATE ' ++ || dest_schema ++ || '.' ++ || p.proname ++ || '(' ++ -- || format_type(a.aggtranstype, NULL) ++ -- Issue#65 Fixes for specific datatype mappings ++ || CASE WHEN format_type(a.aggtranstype, NULL) = 'double precision[]' THEN 'float8' ++ WHEN format_type(a.aggtranstype, NULL) = 'anyarray' THEN 'anyelement' ++ ELSE format_type(a.aggtranstype, NULL) END ++ || ') (sfunc = ' ++ || regexp_replace(a.aggtransfn::text, '(^|\W)' || quote_ident(source_schema) || '\.', '\1' || quote_ident(dest_schema) || '.') ++ || ', stype = ' ++ -- || format_type(a.aggtranstype, NULL) ++ -- Issue#65 Fixes for specific datatype mappings ++ || CASE WHEN format_type(a.aggtranstype, NULL) = 'double precision[]' THEN 'float8[]' ELSE format_type(a.aggtranstype, NULL) END ++ || CASE ++ WHEN op.oprname IS NULL THEN '' ++ ELSE ', sortop = ' || op.oprname ++ END ++ || CASE ++ WHEN a.agginitval IS NULL THEN '' ++ ELSE ', initcond = ''' || a.agginitval || '''' ++ END ++ || ')' ++ INTO dest_qry ++ FROM pg_proc p ++ JOIN pg_aggregate a ON a.aggfnoid = p.oid ++ LEFT JOIN pg_operator op ON op.oid = a.aggsortop ++ WHERE p.oid = func_oid; ++ ++ IF bDDLOnly THEN ++ RAISE INFO '%;', dest_qry; ++ ELSE ++ EXECUTE dest_qry; ++ END IF; ++ ++ END LOOP; ++ RAISE NOTICE ' FUNCTIONS cloned: %', LPAD(cnt::text, 5, ' '); + END IF; + ++ -- Create views ++ action := 'Views'; ++ ++ -- Issue#61 FIX: use set_config for empty string ++ -- MJV FIX #43: also had to reset search_path from source schema to empty. ++ -- SET search_path = ''; ++ SELECT set_config('search_path', '', false) ++ INTO v_dummy; ++ ++ cnt := 0; ++ --FOR object IN ++ -- SELECT table_name::text, view_definition ++ -- FROM information_schema.views ++ -- WHERE table_schema = quote_ident(source_schema) ++ ++ -- Issue#73 replace loop query to handle dependencies ++ -- Issue#91 get view_owner ++ FOR srctbl, aname, view_owner, object IN ++ WITH RECURSIVE views AS ( ++ SELECT n.nspname as schemaname, v.relname as tablename, v.oid::regclass AS viewname, ++ v.relkind = 'm' AS is_materialized, pg_catalog.pg_get_userbyid(v.relowner) as owner, ++ 1 AS level ++ FROM pg_depend AS d ++ JOIN pg_rewrite AS r ++ ON r.oid = d.objid ++ JOIN pg_class AS v ++ ON v.oid = r.ev_class ++ JOIN pg_namespace n ++ ON n.oid = v.relnamespace ++ -- WHERE v.relkind IN ('v', 'm') ++ WHERE v.relkind IN ('v') ++ AND d.classid = 'pg_rewrite'::regclass ++ AND d.refclassid = 'pg_class'::regclass ++ AND d.deptype = 'n' ++ UNION ++ -- add the views that depend on these ++ SELECT n.nspname as schemaname, v.relname as tablename, v.oid::regclass AS viewname, ++ v.relkind = 'm', pg_catalog.pg_get_userbyid(v.relowner) as owner, ++ views.level + 1 ++ FROM views ++ JOIN pg_depend AS d ++ ON d.refobjid = views.viewname ++ JOIN pg_rewrite AS r ++ ON r.oid = d.objid ++ JOIN pg_class AS v ++ ON v.oid = r.ev_class ++ JOIN pg_namespace n ++ ON n.oid = v.relnamespace ++ -- WHERE v.relkind IN ('v', 'm') ++ WHERE v.relkind IN ('v') ++ AND d.classid = 'pg_rewrite'::regclass ++ AND d.refclassid = 'pg_class'::regclass ++ AND d.deptype = 'n' ++ AND v.oid <> views.viewname ++ ) ++ SELECT tablename, viewname, owner, format('CREATE OR REPLACE%s VIEW %s AS%s', ++ CASE WHEN is_materialized ++ THEN ' MATERIALIZED' ++ ELSE '' ++ END, ++ viewname, ++ pg_get_viewdef(viewname)) ++ FROM views ++ WHERE schemaname = quote_ident(source_schema) ++ GROUP BY schemaname, tablename, viewname, owner, is_materialized ++ ORDER BY max(level), schemaname, tablename ++ LOOP ++ cnt := cnt + 1; ++ -- Issue#73 replace logic based on new loop sql ++ buffer := quote_ident(dest_schema) || '.' || quote_ident(aname); ++ -- MJV FIX: #43 ++ -- SELECT view_definition INTO v_def ++ -- SELECT REPLACE(view_definition, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.') INTO v_def ++ -- FROM information_schema.views ++ -- WHERE table_schema = quote_ident(source_schema) ++ -- AND table_name = quote_ident(object); ++ SELECT REPLACE(object, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.') INTO v_def; ++ -- NOTE: definition already includes the closing statement semicolon ++ SELECT REPLACE(aname, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.') INTO buffer3; ++ IF bDDLOnly THEN ++ RAISE INFO '%', v_def; ++ -- Issue#91 Fix ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ -- RAISE INFO 'ALTER TABLE % OWNER TO %', buffer3, view_owner || ';'; ++ RAISE INFO 'ALTER TABLE % OWNER TO %', buffer3, '"' ||view_owner || '";'; ++ END IF; ++ ELSE ++ -- EXECUTE 'CREATE OR REPLACE VIEW ' || buffer || ' AS ' || v_def; ++ EXECUTE v_def; ++ -- Issue#73: commented out comment logic for views since we do it elsewhere now. ++ -- Issue#91 Fix ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ v_def = 'ALTER TABLE ' || buffer3 || ' OWNER TO ' || '"' || view_owner || '";'; ++ EXECUTE v_def; ++ END IF; ++ END IF; + END LOOP; +- RAISE NOTICE ' FUNCTIONS cloned: %', LPAD(cnt::text, 5, ' '); ++ RAISE NOTICE ' VIEWS cloned: %', LPAD(cnt::text, 5, ' '); ++ ++ -- Create Materialized views ++ action := 'Mat. Views'; ++ cnt := 0; ++ -- Issue#91 get view_owner ++ FOR object, view_owner, v_def IN ++ SELECT matviewname::text, '"' || matviewowner::text || '"', replace(definition,';','') FROM pg_catalog.pg_matviews WHERE schemaname = quote_ident(source_schema) ++ LOOP ++ cnt := cnt + 1; ++ -- Issue#78 FIX: handle case-sensitive names with quote_ident() on target schema and object ++ buffer := quote_ident(dest_schema) || '.' || quote_ident(object); ++ ++ -- MJV FIX: #72 remove source schema in MV def ++ SELECT REPLACE(v_def, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.') INTO buffer2; ++ ++ IF bData THEN ++ -- issue#98 defer creation until after regular tables are populated. Also defer the ownership as well. ++ -- EXECUTE 'CREATE MATERIALIZED VIEW ' || buffer || ' AS ' || buffer2 || ' WITH DATA;' ; ++ buffer3 = 'CREATE MATERIALIZED VIEW ' || buffer || ' AS ' || buffer2 || ' WITH DATA;'; ++ mvarray := mvarray || buffer3; ++ ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ -- buffer3 = 'ALTER MATERIALIZED VIEW ' || buffer || ' OWNER TO ' || view_owner || ';' ; ++ -- EXECUTE buffer3; ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ buffer3 = 'ALTER MATERIALIZED VIEW ' || buffer || ' OWNER TO ' || view_owner || ';' ; ++ mvarray := mvarray || buffer3; ++ END IF; ++ ELSE ++ IF bDDLOnly THEN ++ RAISE INFO '%', 'CREATE MATERIALIZED VIEW ' || buffer || ' AS ' || buffer2 || ' WITH NO DATA;' ; ++ -- Issue#91 ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ RAISE INFO '%', 'ALTER MATERIALIZED VIEW ' || buffer || ' OWNER TO ' || view_owner || ';' ; ++ END IF; ++ ELSE ++ EXECUTE 'CREATE MATERIALIZED VIEW ' || buffer || ' AS ' || buffer2 || ' WITH NO DATA;' ; ++ -- Issue#91 ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ buffer3 = 'ALTER MATERIALIZED VIEW ' || buffer || ' OWNER TO ' || view_owner || ';' ; ++ EXECUTE buffer3; ++ END IF; ++ END IF; ++ END IF; ++ SELECT coalesce(obj_description(oid), '') into adef from pg_class where relkind = 'm' and relname = object; ++ IF adef <> '' THEN ++ IF bDDLOnly THEN ++ RAISE INFO '%', 'COMMENT ON MATERIALIZED VIEW ' || quote_ident(dest_schema) || '.' || object || ' IS ''' || adef || ''';'; ++ ELSE ++ -- Issue#$98: also defer if copy rows is on since we defer MVIEWS in that case ++ IF bData THEN ++ buffer3 = 'COMMENT ON MATERIALIZED VIEW ' || quote_ident(dest_schema) || '.' || object || ' IS ''' || adef || ''';'; ++ mvarray = mvarray || buffer3; ++ ELSE ++ EXECUTE 'COMMENT ON MATERIALIZED VIEW ' || quote_ident(dest_schema) || '.' || object || ' IS ''' || adef || ''';'; ++ END IF; ++ ++ END IF; ++ END IF; ++ ++ FOR aname, adef IN ++ SELECT indexname, replace(indexdef, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.') as newdef FROM pg_indexes where schemaname = quote_ident(source_schema) and tablename = object order by indexname ++ LOOP ++ IF bDDLOnly THEN ++ RAISE INFO '%', adef || ';'; ++ ELSE ++ EXECUTE adef || ';'; ++ END IF; ++ END LOOP; ++ ++ END LOOP; ++ RAISE NOTICE ' MAT VIEWS cloned: %', LPAD(cnt::text, 5, ' '); ++ ++ -- Issue 90 Move create functions to before views + + -- MV: Create Triggers ++ ++ -- MJV FIX: #38 ++ -- EXECUTE 'SET search_path = ' || quote_ident(source_schema) ; ++ ++ -- Issue#61 FIX: use set_config for empty string ++ -- SET search_path = ''; ++ SELECT set_config('search_path', '', false) into v_dummy; ++ + action := 'Triggers'; + cnt := 0; + FOR arec IN +- SELECT trigger_schema, trigger_name, event_object_table, action_order, action_condition, action_statement, action_orientation, action_timing, array_to_string(array_agg(event_manipulation::text), ' OR '), +- 'CREATE TRIGGER ' || trigger_name || ' ' || action_timing || ' ' || array_to_string(array_agg(event_manipulation::text), ' OR ') || ' ON ' || quote_ident(dest_schema) || '.' || event_object_table || +- ' FOR EACH ' || action_orientation || ' ' || action_statement || ';' as TRIG_DDL +- FROM information_schema.triggers where trigger_schema = quote_ident(source_schema) GROUP BY 1,2,3,4,5,6,7,8 ++ -- 2021-03-09 MJV FIX: #40 fixed sql to get the def using pg_get_triggerdef() sql ++ SELECT n.nspname, c.relname, t.tgname, p.proname, REPLACE(pg_get_triggerdef(t.oid), quote_ident(source_schema), quote_ident(dest_schema)) || ';' AS trig_ddl ++ FROM pg_trigger t, pg_class c, pg_namespace n, pg_proc p ++ WHERE n.nspname = quote_ident(source_schema) ++ AND n.oid = c.relnamespace ++ AND c.relkind in ('r','p') ++ AND n.oid = p.pronamespace ++ AND c.oid = t.tgrelid ++ AND p.oid = t.tgfoid ++ ORDER BY c.relname, t.tgname + LOOP + BEGIN + cnt := cnt + 1; +- IF ddl_only THEN ++ IF bDDLOnly THEN + RAISE INFO '%', arec.trig_ddl; + ELSE + EXECUTE arec.trig_ddl; +@@ -474,55 +2444,383 @@ + END LOOP; + RAISE NOTICE ' TRIGGERS cloned: %', LPAD(cnt::text, 5, ' '); + +- -- --------------------- +- -- MV: Permissions: Defaults +- -- --------------------- +- action := 'PRIVS: Defaults'; ++ ++ -- MV: Create Rules ++ -- Fixes Issue#59 Implement Rules ++ action := 'Rules'; + cnt := 0; + FOR arec IN +- SELECT pg_catalog.pg_get_userbyid(d.defaclrole) AS "owner", n.nspname AS schema, +- CASE d.defaclobjtype WHEN 'r' THEN 'table' WHEN 'S' THEN 'sequence' WHEN 'f' THEN 'function' WHEN 'T' THEN 'type' WHEN 'n' THEN 'schema' END AS atype, +- d.defaclacl as defaclacl, pg_catalog.array_to_string(d.defaclacl, ',') as defaclstr +- FROM pg_catalog.pg_default_acl d LEFT JOIN pg_catalog.pg_namespace n ON (n.oid = d.defaclnamespace) WHERE n.nspname IS NOT NULL and n.nspname = quote_ident(source_schema) ORDER BY 3, 2, 1 ++ SELECT regexp_replace(definition, E'[\\n\\r]+', ' ', 'g' ) as definition ++ FROM pg_rules ++ WHERE schemaname = quote_ident(source_schema) + LOOP +- BEGIN +- -- RAISE NOTICE 'owner=% type=% defaclacl=% defaclstr=%', arec.owner, arec.atype, arec.defaclacl, arec.defaclstr; ++ cnt := cnt + 1; ++ buffer := REPLACE(arec.definition, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.'); ++ IF bDDLOnly THEN ++ RAISE INFO '%', buffer; ++ ELSE ++ EXECUTE buffer; ++ END IF; ++ END LOOP; ++ RAISE NOTICE ' RULES cloned: %', LPAD(cnt::text, 5, ' '); ++ ++ ++ -- MV: Create Policies ++ -- Fixes Issue#66 Implement Security policies for RLS ++ action := 'Policies'; ++ cnt := 0; ++ -- #106 Handle 9.6 which doesn't have "permissive" ++ IF sq_server_version_num > 90624 THEN ++ FOR arec IN ++ -- Issue#78 FIX: handle case-sensitive names with quote_ident() on policy, tablename ++ SELECT schemaname as schemaname, tablename as tablename, 'CREATE POLICY ' || policyname || ' ON ' || quote_ident(dest_schema) || '.' || quote_ident(tablename) || ' AS ' || permissive || ' FOR ' || cmd || ' TO ' ++ || array_to_string(roles, ',', '*') || ' USING (' || regexp_replace(qual, E'[\\n\\r]+', ' ', 'g' ) || ')' ++ || CASE WHEN with_check IS NOT NULL THEN ' WITH CHECK (' ELSE '' END || coalesce(with_check, '') || CASE WHEN with_check IS NOT NULL THEN ');' ELSE ';' END as definition ++ FROM pg_policies ++ WHERE schemaname = quote_ident(source_schema) ++ ORDER BY policyname ++ LOOP ++ cnt := cnt + 1; ++ IF bDDLOnly THEN ++ RAISE INFO '%', arec.definition; ++ ELSE ++ EXECUTE arec.definition; ++ END IF; ++ ++ -- Issue#76: Enable row security if indicated ++ SELECT c.relrowsecurity INTO abool FROM pg_class c, pg_namespace n where n.nspname = quote_ident(arec.schemaname) AND n.oid = c.relnamespace AND c.relname = quote_ident(arec.tablename) and c.relkind = 'r'; ++ IF abool THEN ++ buffer = 'ALTER TABLE ' || quote_ident(dest_schema) || '.' || arec.tablename || ' ENABLE ROW LEVEL SECURITY;'; ++ IF bDDLOnly THEN ++ RAISE INFO '%', buffer; ++ ELSE ++ EXECUTE buffer; ++ END IF; ++ END IF; ++ END LOOP; ++ ELSE ++ -- handle 9.6 versions ++ FOR arec IN ++ -- Issue#78 FIX: handle case-sensitive names with quote_ident() on policy, tablename ++ SELECT schemaname as schemaname, tablename as tablename, 'CREATE POLICY ' || policyname || ' ON ' || quote_ident(dest_schema) || '.' || quote_ident(tablename) || ' FOR ' || cmd || ' TO ' ++ || array_to_string(roles, ',', '*') || ' USING (' || regexp_replace(qual, E'[\\n\\r]+', ' ', 'g' ) || ')' ++ || CASE WHEN with_check IS NOT NULL THEN ' WITH CHECK (' ELSE '' END || coalesce(with_check, '') || CASE WHEN with_check IS NOT NULL THEN ');' ELSE ';' END as definition ++ FROM pg_policies ++ WHERE schemaname = quote_ident(source_schema) ++ ORDER BY policyname ++ LOOP ++ cnt := cnt + 1; ++ IF bDDLOnly THEN ++ RAISE INFO '%', arec.definition; ++ ELSE ++ EXECUTE arec.definition; ++ END IF; ++ ++ -- Issue#76: Enable row security if indicated ++ SELECT c.relrowsecurity INTO abool FROM pg_class c, pg_namespace n where n.nspname = quote_ident(arec.schemaname) AND n.oid = c.relnamespace AND c.relname = quote_ident(arec.tablename) and c.relkind = 'r'; ++ IF abool THEN ++ buffer = 'ALTER TABLE ' || quote_ident(dest_schema) || '.' || arec.tablename || ' ENABLE ROW LEVEL SECURITY;'; ++ IF bDDLOnly THEN ++ RAISE INFO '%', buffer; ++ ELSE ++ EXECUTE buffer; ++ END IF; ++ END IF; ++ END LOOP; ++ END IF; ++ RAISE NOTICE ' POLICIES cloned: %', LPAD(cnt::text, 5, ' '); ++ ++ ++ -- MJV Fixed #62 for comments (PASS 1) ++ action := 'Comments1'; ++ cnt := 0; ++ FOR qry IN ++ -- Issue#74 Fix: Change schema from source to target. Also, do not include comments on foreign tables since we do not clone foreign tables at this time. ++ SELECT 'COMMENT ON ' || CASE WHEN c.relkind in ('r','p') AND a.attname IS NULL THEN 'TABLE ' WHEN c.relkind in ('r','p') AND ++ a.attname IS NOT NULL THEN 'COLUMN ' WHEN c.relkind = 'f' THEN 'FOREIGN TABLE ' WHEN c.relkind = 'm' THEN 'MATERIALIZED VIEW ' WHEN c.relkind = 'v' THEN 'VIEW ' ++ WHEN c.relkind = 'i' THEN 'INDEX ' WHEN c.relkind = 'S' THEN 'SEQUENCE ' ELSE 'XX' END || quote_ident(dest_schema) || '.' || CASE WHEN c.relkind in ('r','p') AND ++ -- Issue#78: handle case-sensitive names with quote_ident() ++ a.attname IS NOT NULL THEN quote_ident(c.relname) || '.' || a.attname ELSE quote_ident(c.relname) END || ++ -- Issue#74 Fix ++ -- ' IS ''' || d.description || ''';' as ddl ++ ' IS ' || quote_literal(d.description) || ';' as ddl ++ FROM pg_class c ++ JOIN pg_namespace n ON (n.oid = c.relnamespace) ++ LEFT JOIN pg_description d ON (c.oid = d.objoid) ++ LEFT JOIN pg_attribute a ON (c.oid = a.attrelid ++ AND a.attnum > 0 and a.attnum = d.objsubid) ++ WHERE c.relkind <> 'f' AND d.description IS NOT NULL AND n.nspname = quote_ident(source_schema) ++ ORDER BY ddl ++ LOOP ++ cnt := cnt + 1; ++ ++ -- BAD : "COMMENT ON SEQUENCE sample_clone2.CaseSensitive_ID_seq IS 'just a comment on CaseSensitive sequence';" ++ -- GOOD: "COMMENT ON SEQUENCE "CaseSensitive_ID_seq" IS 'just a comment on CaseSensitive sequence';" ++ ++ -- Issue#98 For MVs we create comments when we create the MVs ++ IF substring(qry,1,28) = 'COMMENT ON MATERIALIZED VIEW' THEN ++ IF bDebug THEN RAISE NOTICE 'DEBUG: deferring comments on MVs'; END IF; ++ cnt = cnt - 1; ++ continue; ++ END IF; ++ ++ IF bDDLOnly THEN ++ RAISE INFO '%', qry; ++ ELSE ++ EXECUTE qry; ++ END IF; ++ END LOOP; ++ RAISE NOTICE ' COMMENTS(1) cloned: %', LPAD(cnt::text, 5, ' '); ++ ++ -- MJV Fixed #62 for comments (PASS 2) ++ action := 'Comments2'; ++ cnt2 := 0; ++ IF is_prokind THEN ++ FOR qry IN ++ -- Issue#74 Fix: Change schema from source to target. ++ SELECT 'COMMENT ON SCHEMA ' || quote_ident(dest_schema) || ++ -- Issue#74 Fix ++ -- ' IS ''' || d.description || ''';' as ddl ++ ' IS ' || quote_literal(d.description) || ';' as ddl ++ from pg_namespace n, pg_description d where d.objoid = n.oid and n.nspname = quote_ident(source_schema) ++ UNION ++ -- Issue#74 Fix: need to replace source schema inline ++ -- SELECT 'COMMENT ON TYPE ' || pg_catalog.format_type(t.oid, NULL) || ' IS ''' || pg_catalog.obj_description(t.oid, 'pg_type') || ''';' as ddl ++ SELECT 'COMMENT ON TYPE ' || REPLACE(pg_catalog.format_type(t.oid, NULL), quote_ident(source_schema), quote_ident(dest_schema)) || ' IS ''' || pg_catalog.obj_description(t.oid, 'pg_type') || ''';' as ddl ++ FROM pg_catalog.pg_type t ++ JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace ++ WHERE (t.typrelid = 0 OR (SELECT c.relkind = 'c' FROM pg_catalog.pg_class c WHERE c.oid = t.typrelid)) ++ AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type el WHERE el.oid = t.typelem AND el.typarray = t.oid) ++ AND n.nspname = quote_ident(source_schema) COLLATE pg_catalog.default ++ AND pg_catalog.obj_description(t.oid, 'pg_type') IS NOT NULL and t.typtype = 'c' ++ UNION ++ -- Issue#78: handle case-sensitive names with quote_ident() ++ SELECT 'COMMENT ON COLLATION ' || quote_ident(dest_schema) || '.' || quote_ident(c.collname) || ' IS ''' || pg_catalog.obj_description(c.oid, 'pg_collation') || ''';' as ddl ++ FROM pg_catalog.pg_collation c, pg_catalog.pg_namespace n ++ WHERE n.oid = c.collnamespace AND c.collencoding IN (-1, pg_catalog.pg_char_to_encoding(pg_catalog.getdatabaseencoding())) ++ AND n.nspname = quote_ident(source_schema) COLLATE pg_catalog.default AND pg_catalog.obj_description(c.oid, 'pg_collation') IS NOT NULL ++ UNION ++ SELECT 'COMMENT ON ' || CASE WHEN p.prokind = 'f' THEN 'FUNCTION ' WHEN p.prokind = 'p' THEN 'PROCEDURE ' WHEN p.prokind = 'a' THEN 'AGGREGATE ' END || ++ quote_ident(dest_schema) || '.' || p.proname || ' (' || oidvectortypes(p.proargtypes) || ')' ++ -- Issue#74 Fix ++ -- ' IS ''' || d.description || ''';' as ddl ++ ' IS ' || quote_literal(d.description) || ';' as ddl ++ FROM pg_catalog.pg_namespace n ++ JOIN pg_catalog.pg_proc p ON p.pronamespace = n.oid ++ JOIN pg_description d ON (d.objoid = p.oid) ++ WHERE n.nspname = quote_ident(source_schema) ++ UNION ++ SELECT 'COMMENT ON POLICY ' || p1.policyname || ' ON ' || quote_ident(dest_schema) || '.' || p1.tablename || ++ -- Issue#74 Fix ++ -- ' IS ''' || d.description || ''';' as ddl ++ ' IS ' || quote_literal(d.description) || ';' as ddl ++ FROM pg_policies p1, pg_policy p2, pg_class c, pg_namespace n, pg_description d ++ WHERE p1.schemaname = n.nspname AND p1.tablename = c.relname AND n.oid = c.relnamespace ++ AND c.relkind in ('r','p') AND p1.policyname = p2.polname AND d.objoid = p2.oid AND p1.schemaname = quote_ident(source_schema) ++ UNION ++ SELECT 'COMMENT ON DOMAIN ' || quote_ident(dest_schema) || '.' || t.typname || ++ -- Issue#74 Fix ++ -- ' IS ''' || d.description || ''';' as ddl ++ ' IS ' || quote_literal(d.description) || ';' as ddl ++ FROM pg_catalog.pg_type t ++ LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace ++ JOIN pg_catalog.pg_description d ON d.classoid = t.tableoid AND d.objoid = t.oid AND d.objsubid = 0 ++ WHERE t.typtype = 'd' AND n.nspname = quote_ident(source_schema) COLLATE pg_catalog.default ++ ORDER BY 1 ++ LOOP ++ cnt2 := cnt2 + 1; ++ IF bDDLOnly THEN ++ RAISE INFO '%', qry; ++ ELSE ++ EXECUTE qry; ++ END IF; ++ END LOOP; ++ ELSE -- must be v 10 or less ++ FOR qry IN ++ -- Issue#74 Fix: Change schema from source to target. ++ SELECT 'COMMENT ON SCHEMA ' || quote_ident(dest_schema) || ++ -- Issue#74 Fix ++ -- ' IS ''' || d.description || ''';' as ddl ++ ' IS ' || quote_literal(d.description) || ';' as ddl ++ from pg_namespace n, pg_description d where d.objoid = n.oid and n.nspname = quote_ident(source_schema) ++ UNION ++ -- Issue#74 Fix: need to replace source schema inline ++ -- SELECT 'COMMENT ON TYPE ' || pg_catalog.format_type(t.oid, NULL) || ' IS ''' || pg_catalog.obj_description(t.oid, 'pg_type') || ''';' as ddl ++ SELECT 'COMMENT ON TYPE ' || REPLACE(pg_catalog.format_type(t.oid, NULL), quote_ident(source_schema), quote_ident(dest_schema)) || ' IS ''' || pg_catalog.obj_description(t.oid, 'pg_type') || ''';' as ddl ++ FROM pg_catalog.pg_type t ++ JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace ++ WHERE (t.typrelid = 0 OR (SELECT c.relkind = 'c' ++ FROM pg_catalog.pg_class c ++ WHERE c.oid = t.typrelid)) ++ AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type el ++ WHERE el.oid = t.typelem AND el.typarray = t.oid) ++ AND n.nspname = quote_ident(source_schema) COLLATE pg_catalog.default ++ AND pg_catalog.obj_description(t.oid, 'pg_type') IS NOT NULL and t.typtype = 'c' ++ UNION ++ -- FIX Issue#87 by adding double quotes around collation name ++ SELECT 'COMMENT ON COLLATION ' || quote_ident(dest_schema) || '."' || c.collname || '" IS ''' || pg_catalog.obj_description(c.oid, 'pg_collation') || ''';' as ddl ++ FROM pg_catalog.pg_collation c, pg_catalog.pg_namespace n ++ WHERE n.oid = c.collnamespace AND c.collencoding IN (-1, pg_catalog.pg_char_to_encoding(pg_catalog.getdatabaseencoding())) ++ AND n.nspname = quote_ident(source_schema) COLLATE pg_catalog.default AND pg_catalog.obj_description(c.oid, 'pg_collation') IS NOT NULL ++ UNION ++ SELECT 'COMMENT ON ' || CASE WHEN proisagg THEN 'AGGREGATE ' ELSE 'FUNCTION ' END || ++ quote_ident(dest_schema) || '.' || p.proname || ' (' || oidvectortypes(p.proargtypes) || ')' ++ -- Issue#74 Fix ++ -- ' IS ''' || d.description || ''';' as ddl ++ ' IS ' || quote_literal(d.description) || ';' as ddl ++ FROM pg_catalog.pg_namespace n ++ JOIN pg_catalog.pg_proc p ON p.pronamespace = n.oid ++ JOIN pg_description d ON (d.objoid = p.oid) ++ WHERE n.nspname = quote_ident(source_schema) ++ UNION ++ SELECT 'COMMENT ON POLICY ' || p1.policyname || ' ON ' || quote_ident(dest_schema) || '.' || p1.tablename || ++ -- Issue#74 Fix ++ -- ' IS ''' || d.description || ''';' as ddl ++ ' IS ' || quote_literal(d.description) || ';' as ddl ++ FROM pg_policies p1, pg_policy p2, pg_class c, pg_namespace n, pg_description d ++ WHERE p1.schemaname = n.nspname AND p1.tablename = c.relname AND n.oid = c.relnamespace ++ AND c.relkind in ('r','p') AND p1.policyname = p2.polname AND d.objoid = p2.oid AND p1.schemaname = quote_ident(source_schema) ++ UNION ++ SELECT 'COMMENT ON DOMAIN ' || quote_ident(dest_schema) || '.' || t.typname || ++ -- Issue#74 Fix ++ -- ' IS ''' || d.description || ''';' as ddl ++ ' IS ' || quote_literal(d.description) || ';' as ddl ++ FROM pg_catalog.pg_type t ++ LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace ++ JOIN pg_catalog.pg_description d ON d.classoid = t.tableoid AND d.objoid = t.oid AND d.objsubid = 0 ++ WHERE t.typtype = 'd' AND n.nspname = quote_ident(source_schema) COLLATE pg_catalog.default ++ ORDER BY 1 ++ LOOP ++ cnt2 := cnt2 + 1; ++ IF bDDLOnly THEN ++ RAISE INFO '%', qry; ++ ELSE ++ EXECUTE qry; ++ END IF; ++ END LOOP; ++ END IF; ++ RAISE NOTICE ' COMMENTS(2) cloned: %', LPAD(cnt2::text, 5, ' '); + +- FOREACH aclstr IN ARRAY arec.defaclacl +- LOOP +- cnt := cnt + 1; +- -- RAISE NOTICE 'aclstr=%', aclstr; +- -- break up into grantor, grantee, and privs, mydb_update=rwU/mydb_owner +- SELECT split_part(aclstr, '=',1) INTO grantee; +- SELECT split_part(aclstr, '=',2) INTO grantor; +- SELECT split_part(grantor, '/',1) INTO privs; +- SELECT split_part(grantor, '/',2) INTO grantor; +- -- RAISE NOTICE 'grantor=% grantee=% privs=%', grantor, grantee, privs; +- +- IF arec.atype = 'function' THEN +- -- Just having execute is enough to grant all apparently. +- buffer := 'ALTER DEFAULT PRIVILEGES FOR ROLE ' || grantor || ' IN SCHEMA ' || quote_ident(dest_schema) || ' GRANT ALL ON FUNCTIONS TO "' || grantee || '";'; +- IF ddl_only THEN +- RAISE INFO '%', buffer; +- ELSE +- EXECUTE buffer; +- END IF; + +- ELSIF arec.atype = 'sequence' THEN +- IF POSITION('r' IN privs) > 0 AND POSITION('w' IN privs) > 0 AND POSITION('U' IN privs) > 0 THEN +- -- arU is enough for all privs +- buffer := 'ALTER DEFAULT PRIVILEGES FOR ROLE ' || grantor || ' IN SCHEMA ' || quote_ident(dest_schema) || ' GRANT ALL ON SEQUENCES TO "' || grantee || '";'; +- IF ddl_only THEN ++ -- Issue#95 bypass if No ACL specified. ++ IF NOT bNoACL THEN ++ -- --------------------- ++ -- MV: Permissions: Defaults ++ -- --------------------- ++ EXECUTE 'SET search_path = ' || quote_ident(source_schema) ; ++ action := 'PRIVS: Defaults'; ++ cnt := 0; ++ FOR arec IN ++ SELECT pg_catalog.pg_get_userbyid(d.defaclrole) AS "owner", n.nspname AS schema, ++ CASE d.defaclobjtype WHEN 'r' THEN 'table' WHEN 'S' THEN 'sequence' WHEN 'f' THEN 'function' WHEN 'T' THEN 'type' WHEN 'n' THEN 'schema' END AS atype, ++ d.defaclacl as defaclacl, pg_catalog.array_to_string(d.defaclacl, ',') as defaclstr ++ FROM pg_catalog.pg_default_acl d LEFT JOIN pg_catalog.pg_namespace n ON (n.oid = d.defaclnamespace) ++ WHERE n.nspname IS NOT NULL AND n.nspname = quote_ident(source_schema) ++ ORDER BY 3, 2, 1 ++ LOOP ++ BEGIN ++ -- RAISE NOTICE ' owner=% type=% defaclacl=% defaclstr=%', arec.owner, arec.atype, arec.defaclacl, arec.defaclstr; ++ ++ FOREACH aclstr IN ARRAY arec.defaclacl ++ LOOP ++ cnt := cnt + 1; ++ -- RAISE NOTICE ' aclstr=%', aclstr; ++ -- break up into grantor, grantee, and privs, mydb_update=rwU/mydb_owner ++ SELECT split_part(aclstr, '=',1) INTO grantee; ++ SELECT split_part(aclstr, '=',2) INTO grantor; ++ SELECT split_part(grantor, '/',1) INTO privs; ++ SELECT split_part(grantor, '/',2) INTO grantor; ++ -- RAISE NOTICE ' grantor=% grantee=% privs=%', grantor, grantee, privs; ++ ++ IF arec.atype = 'function' THEN ++ -- Just having execute is enough to grant all apparently. ++ buffer := 'ALTER DEFAULT PRIVILEGES FOR ROLE ' || grantor || ' IN SCHEMA ' || quote_ident(dest_schema) || ' GRANT ALL ON FUNCTIONS TO "' || grantee || '";'; ++ ++ -- Issue#92 Fix ++ -- set role = cm_stage_ro_grp; ++ -- ALTER DEFAULT PRIVILEGES FOR ROLE cm_stage_ro_grp IN SCHEMA cm_stage GRANT REFERENCES, TRIGGER ON TABLES TO cm_stage_ro_grp; ++ IF grantor = grantee THEN ++ -- append set role to statement ++ buffer = 'SET ROLE = ' || grantor || '; ' || buffer; ++ END IF; ++ ++ IF bDDLOnly THEN + RAISE INFO '%', buffer; + ELSE + EXECUTE buffer; + END IF; ++ -- Issue#92 Fix: ++ EXECUTE 'SET ROLE = ' || calleruser; ++ ++ ELSIF arec.atype = 'sequence' THEN ++ IF POSITION('r' IN privs) > 0 AND POSITION('w' IN privs) > 0 AND POSITION('U' IN privs) > 0 THEN ++ -- arU is enough for all privs ++ buffer := 'ALTER DEFAULT PRIVILEGES FOR ROLE ' || grantor || ' IN SCHEMA ' || quote_ident(dest_schema) || ' GRANT ALL ON SEQUENCES TO "' || grantee || '";'; ++ ++ -- Issue#92 Fix ++ IF grantor = grantee THEN ++ -- append set role to statement ++ buffer = 'SET ROLE = ' || grantor || '; ' || buffer; ++ END IF; + +- ELSE +- -- have to specify each priv individually ++ IF bDDLOnly THEN ++ RAISE INFO '%', buffer; ++ ELSE ++ EXECUTE buffer; ++ END IF; ++ -- Issue#92 Fix: ++ EXECUTE 'SET ROLE = ' || calleruser; ++ ++ ELSE ++ -- have to specify each priv individually ++ buffer2 := ''; ++ IF POSITION('r' IN privs) > 0 THEN ++ buffer2 := 'SELECT'; ++ END IF; ++ IF POSITION('w' IN privs) > 0 THEN ++ IF buffer2 = '' THEN ++ buffer2 := 'UPDATE'; ++ ELSE ++ buffer2 := buffer2 || ', UPDATE'; ++ END IF; ++ END IF; ++ IF POSITION('U' IN privs) > 0 THEN ++ IF buffer2 = '' THEN ++ buffer2 := 'USAGE'; ++ ELSE ++ buffer2 := buffer2 || ', USAGE'; ++ END IF; ++ END IF; ++ buffer := 'ALTER DEFAULT PRIVILEGES FOR ROLE ' || grantor || ' IN SCHEMA ' || quote_ident(dest_schema) || ' GRANT ' || buffer2 || ' ON SEQUENCES TO "' || grantee || '";'; ++ ++ -- Issue#92 Fix ++ IF grantor = grantee THEN ++ -- append set role to statement ++ buffer = 'SET ROLE = ' || grantor || '; ' || buffer; ++ END IF; ++ ++ IF bDDLOnly THEN ++ RAISE INFO '%', buffer; ++ ELSE ++ EXECUTE buffer; ++ END IF; ++ select current_user into buffer; ++ -- Issue#92 Fix: ++ EXECUTE 'SET ROLE = ' || calleruser; ++ END IF; ++ ++ ELSIF arec.atype = 'table' THEN ++ -- do each priv individually, jeeeesh! + buffer2 := ''; ++ IF POSITION('a' IN privs) > 0 THEN ++ buffer2 := 'INSERT'; ++ END IF; + IF POSITION('r' IN privs) > 0 THEN +- buffer2 := 'SELECT'; ++ IF buffer2 = '' THEN ++ buffer2 := 'SELECT'; ++ ELSE ++ buffer2 := buffer2 || ', SELECT'; ++ END IF; + END IF; + IF POSITION('w' IN privs) > 0 THEN + IF buffer2 = '' THEN +@@ -531,181 +2829,431 @@ + buffer2 := buffer2 || ', UPDATE'; + END IF; + END IF; +- IF POSITION('U' IN privs) > 0 THEN +- IF buffer2 = '' THEN +- buffer2 := 'USAGE'; ++ IF POSITION('d' IN privs) > 0 THEN ++ IF buffer2 = '' THEN ++ buffer2 := 'DELETE'; + ELSE +- buffer2 := buffer2 || ', USAGE'; ++ buffer2 := buffer2 || ', DELETE'; + END IF; + END IF; +- buffer := 'ALTER DEFAULT PRIVILEGES FOR ROLE ' || grantor || ' IN SCHEMA ' || quote_ident(dest_schema) || ' GRANT ' || buffer2 || ' ON SEQUENCES TO "' || grantee || '";'; +- IF ddl_only THEN +- RAISE INFO '%', buffer; +- ELSE +- EXECUTE buffer; +- END IF; +- +- END IF; +- ELSIF arec.atype = 'table' THEN +- -- do each priv individually, jeeeesh! +- buffer2 := ''; +- IF POSITION('a' IN privs) > 0 THEN +- buffer2 := 'INSERT'; +- END IF; +- IF POSITION('r' IN privs) > 0 THEN +- IF buffer2 = '' THEN +- buffer2 := 'SELECT'; +- ELSE +- buffer2 := buffer2 || ', SELECT'; ++ IF POSITION('t' IN privs) > 0 THEN ++ IF buffer2 = '' THEN ++ buffer2 := 'TRIGGER'; ++ ELSE ++ buffer2 := buffer2 || ', TRIGGER'; ++ END IF; + END IF; +- END IF; +- IF POSITION('w' IN privs) > 0 THEN +- IF buffer2 = '' THEN +- buffer2 := 'UPDATE'; +- ELSE +- buffer2 := buffer2 || ', UPDATE'; ++ IF POSITION('T' IN privs) > 0 THEN ++ IF buffer2 = '' THEN ++ buffer2 := 'TRUNCATE'; ++ ELSE ++ buffer2 := buffer2 || ', TRUNCATE'; ++ END IF; + END IF; +- END IF; +- IF POSITION('d' IN privs) > 0 THEN +- IF buffer2 = '' THEN +- buffer2 := 'DELETE'; +- ELSE +- buffer2 := buffer2 || ', DELETE'; ++ buffer := 'ALTER DEFAULT PRIVILEGES FOR ROLE ' || grantor || ' IN SCHEMA ' || quote_ident(dest_schema) || ' GRANT ' || buffer2 || ' ON TABLES TO "' || grantee || '";'; ++ ++ -- Issue#92 Fix ++ IF grantor = grantee THEN ++ -- append set role to statement ++ buffer = 'SET ROLE = ' || grantor || '; ' || buffer; + END IF; +- END IF; +- IF POSITION('t' IN privs) > 0 THEN +- IF buffer2 = '' THEN +- buffer2 := 'TRIGGER'; ++ ++ IF bDDLOnly THEN ++ RAISE INFO '%', buffer; + ELSE +- buffer2 := buffer2 || ', TRIGGER'; ++ EXECUTE buffer; + END IF; +- END IF; +- IF POSITION('T' IN privs) > 0 THEN +- IF buffer2 = '' THEN +- buffer2 := 'TRUNCATE'; ++ select current_user into buffer; ++ -- Issue#92 Fix: ++ EXECUTE 'SET ROLE = ' || calleruser; ++ ++ ELSIF arec.atype = 'type' THEN ++ IF POSITION('r' IN privs) > 0 AND POSITION('w' IN privs) > 0 AND POSITION('U' IN privs) > 0 THEN ++ -- arU is enough for all privs ++ buffer := 'ALTER DEFAULT PRIVILEGES FOR ROLE ' || grantor || ' IN SCHEMA ' || quote_ident(dest_schema) || ' GRANT ALL ON TYPES TO "' || grantee || '";'; ++ ++ -- Issue#92 Fix ++ IF grantor = grantee THEN ++ -- append set role to statement ++ buffer = 'SET ROLE = ' || grantor || '; ' || buffer; ++ END IF; ++ ++ IF bDDLOnly THEN ++ RAISE INFO '%', buffer; ++ ELSE ++ EXECUTE buffer; ++ END IF; ++ -- Issue#92 Fix: ++ EXECUTE 'SET ROLE = ' || calleruser; ++ ++ ELSIF POSITION('U' IN privs) THEN ++ buffer := 'ALTER DEFAULT PRIVILEGES FOR ROLE ' || grantor || ' IN SCHEMA ' || quote_ident(dest_schema) || ' GRANT USAGE ON TYPES TO "' || grantee || '";'; ++ ++ -- Issue#92 Fix ++ IF grantor = grantee THEN ++ -- append set role to statement ++ buffer = 'SET ROLE = ' || grantor || '; ' || buffer; ++ END IF; ++ ++ IF bDDLOnly THEN ++ RAISE INFO '%', buffer; ++ ELSE ++ EXECUTE buffer; ++ END IF; ++ -- Issue#92 Fix: ++ EXECUTE 'SET ROLE = ' || calleruser; ++ + ELSE +- buffer2 := buffer2 || ', TRUNCATE'; +- END IF; ++ RAISE WARNING 'Unhandled TYPE Privs:: type=% privs=% owner=% defaclacl=% defaclstr=% grantor=% grantee=% ', arec.atype, privs, arec.owner, arec.defaclacl, arec.defaclstr, grantor, grantee; + END IF; +- buffer := 'ALTER DEFAULT PRIVILEGES FOR ROLE ' || grantor || ' IN SCHEMA ' || quote_ident(dest_schema) || ' GRANT ' || buffer2 || ' ON TABLES TO "' || grantee || '";'; +- IF ddl_only THEN +- RAISE INFO '%', buffer; +- ELSE +- EXECUTE buffer; +- END IF; +- + ELSE +- RAISE WARNING 'Doing nothing for type=% privs=%', arec.atype, privs; ++ RAISE WARNING 'Unhandled Privs:: type=% privs=% owner=% defaclacl=% defaclstr=% grantor=% grantee=% ', arec.atype, privs, arec.owner, arec.defaclacl, arec.defaclstr, grantor, grantee; + END IF; +- END LOOP; +- END; +- END LOOP; ++ END LOOP; ++ END; ++ END LOOP; + +- RAISE NOTICE ' DFLT PRIVS cloned: %', LPAD(cnt::text, 5, ' '); ++ RAISE NOTICE ' DFLT PRIVS cloned: %', LPAD(cnt::text, 5, ' '); ++ END IF; -- NO ACL BRANCH + +- -- MV: PRIVS: schema +- -- crunchy data extension, check_access +- -- SELECT role_path, base_role, as_role, objtype, schemaname, objname, array_to_string(array_agg(privname),',') as privs FROM all_access() +- -- WHERE base_role != CURRENT_USER and objtype = 'schema' and schemaname = 'public' group by 1,2,3,4,5,6; ++ -- Issue#95 bypass if No ACL specified ++ IF NOT bNoACL THEN ++ -- MV: PRIVS: schema ++ -- crunchy data extension, check_access ++ -- SELECT role_path, base_role, as_role, objtype, schemaname, objname, array_to_string(array_agg(privname),',') as privs FROM all_access() ++ -- WHERE base_role != CURRENT_USER and objtype = 'schema' and schemaname = 'public' group by 1,2,3,4,5,6; + +- action := 'PRIVS: Schema'; +- cnt := 0; +- FOR arec IN +- SELECT 'GRANT ' || p.perm::perm_type || ' ON SCHEMA ' || quote_ident(dest_schema) || ' TO "' || r.rolname || '";' as schema_ddl +- FROM pg_catalog.pg_namespace AS n CROSS JOIN pg_catalog.pg_roles AS r CROSS JOIN (VALUES ('USAGE'), ('CREATE')) AS p(perm) +- WHERE n.nspname = quote_ident(source_schema) AND NOT r.rolsuper AND has_schema_privilege(r.oid, n.oid, p.perm) order by r.rolname, p.perm::perm_type +- LOOP +- BEGIN +- cnt := cnt + 1; +- IF ddl_only THEN +- RAISE INFO '%', arec.schema_ddl; +- ELSE +- EXECUTE arec.schema_ddl; +- END IF; ++ action := 'PRIVS: Schema'; ++ cnt := 0; ++ FOR arec IN ++ SELECT 'GRANT ' || p.perm::perm_type || ' ON SCHEMA ' || quote_ident(dest_schema) || ' TO "' || r.rolname || '";' as schema_ddl ++ FROM pg_catalog.pg_namespace AS n ++ CROSS JOIN pg_catalog.pg_roles AS r ++ CROSS JOIN (VALUES ('USAGE'), ('CREATE')) AS p(perm) ++ WHERE n.nspname = quote_ident(source_schema) AND NOT r.rolsuper AND has_schema_privilege(r.oid, n.oid, p.perm) ++ ORDER BY r.rolname, p.perm::perm_type ++ LOOP ++ BEGIN ++ cnt := cnt + 1; ++ IF bDDLOnly THEN ++ RAISE INFO '%', arec.schema_ddl; ++ ELSE ++ EXECUTE arec.schema_ddl; ++ END IF; + +- END; +- END LOOP; +- RAISE NOTICE 'SCHEMA PRIVS cloned: %', LPAD(cnt::text, 5, ' '); ++ END; ++ END LOOP; ++ RAISE NOTICE 'SCHEMA PRIVS cloned: %', LPAD(cnt::text, 5, ' '); ++ END IF; -- NO ACL BRANCH + +- -- MV: PRIVS: sequences +- action := 'PRIVS: Sequences'; +- cnt := 0; +- FOR arec IN +- SELECT 'GRANT ' || p.perm::perm_type || ' ON ' || quote_ident(dest_schema) || '.' || t.relname::text || ' TO "' || r.rolname || '";' as seq_ddl +- FROM pg_catalog.pg_class AS t CROSS JOIN pg_catalog.pg_roles AS r CROSS JOIN (VALUES ('SELECT'), ('USAGE'), ('UPDATE')) AS p(perm) +- WHERE t.relnamespace::regnamespace::name = quote_ident(source_schema) AND t.relkind = 'S' AND NOT r.rolsuper AND has_sequence_privilege(r.oid, t.oid, p.perm) +- LOOP +- BEGIN +- cnt := cnt + 1; +- IF ddl_only OR seq_cnt = 0 THEN +- RAISE INFO '%', arec.seq_ddl; +- ELSE +- EXECUTE arec.seq_ddl; +- END IF; ++ -- Issue#95 bypass if No ACL specified ++ IF NOT bNoACL THEN ++ -- MV: PRIVS: sequences ++ action := 'PRIVS: Sequences'; ++ cnt := 0; ++ FOR arec IN ++ -- Issue#78 FIX: handle case-sensitive names with quote_ident() on t.relname ++ SELECT 'GRANT ' || p.perm::perm_type || ' ON ' || quote_ident(dest_schema) || '.' || quote_ident(t.relname::text) || ' TO "' || r.rolname || '";' as seq_ddl ++ FROM pg_catalog.pg_class AS t ++ CROSS JOIN pg_catalog.pg_roles AS r ++ CROSS JOIN (VALUES ('SELECT'), ('USAGE'), ('UPDATE')) AS p(perm) ++ WHERE t.relnamespace::regnamespace::name = quote_ident(source_schema) AND t.relkind = 'S' AND NOT r.rolsuper AND has_sequence_privilege(r.oid, t.oid, p.perm) ++ LOOP ++ BEGIN ++ cnt := cnt + 1; ++ -- IF bDebug THEN RAISE NOTICE 'DEBUG: ddl=%', arec.seq_ddl; END IF; ++ IF bDDLOnly THEN ++ RAISE INFO '%', arec.seq_ddl; ++ ELSE ++ EXECUTE arec.seq_ddl; ++ END IF; ++ END; ++ END LOOP; ++ RAISE NOTICE ' SEQ. PRIVS cloned: %', LPAD(cnt::text, 5, ' '); ++ END IF; -- NO ACL BRANCH + +- END; +- END LOOP; +- RAISE NOTICE ' SEQ. PRIVS cloned: %', LPAD(cnt::text, 5, ' '); ++ -- Issue#95 bypass if No ACL specified ++ IF NOT bNoACL THEN ++ -- MV: PRIVS: functions ++ action := 'PRIVS: Functions/Procedures'; ++ cnt := 0; + +- -- MV: PRIVS: functions +- action := 'PRIVS: Functions'; +- cnt := 0; +- FOR arec IN +- SELECT 'GRANT EXECUTE ON FUNCTION ' || quote_ident(dest_schema) || '.' || regexp_replace(f.oid::regprocedure::text, '^((("[^"]*")|([^"][^.]*))\.)?', '') || ' TO "' || r.rolname || '";' as func_ddl +- FROM pg_catalog.pg_proc f CROSS JOIN pg_catalog.pg_roles AS r WHERE f.pronamespace::regnamespace::name = quote_ident(source_schema) AND NOT r.rolsuper AND has_function_privilege(r.oid, f.oid, 'EXECUTE') +- order by regexp_replace(f.oid::regprocedure::text, '^((("[^"]*")|([^"][^.]*))\.)?', '') +- LOOP +- BEGIN +- cnt := cnt + 1; +- IF ddl_only THEN +- RAISE INFO '%', arec.func_ddl; +- ELSE +- EXECUTE arec.func_ddl; ++ -- Issue#61 FIX: use set_config for empty string ++ -- SET search_path = ''; ++ SELECT set_config('search_path', '', false) into v_dummy; ++ ++ -- RAISE NOTICE ' source_schema=% dest_schema=%',source_schema, dest_schema; ++ FOR arec IN ++ -- 2021-03-05 MJV FIX: issue#35: caused exception in some functions with parameters and gave privileges to other users that should not have gotten them. ++ -- SELECT 'GRANT EXECUTE ON FUNCTION ' || quote_ident(dest_schema) || '.' || replace(regexp_replace(f.oid::regprocedure::text, '^((("[^"]*")|([^"][^.]*))\.)?', ''), source_schema, dest_schema) || ' TO "' || r.rolname || '";' as func_ddl ++ -- FROM pg_catalog.pg_proc f CROSS JOIN pg_catalog.pg_roles AS r WHERE f.pronamespace::regnamespace::name = quote_ident(source_schema) AND NOT r.rolsuper AND has_function_privilege(r.oid, f.oid, 'EXECUTE') ++ -- order by regexp_replace(f.oid::regprocedure::text, '^((("[^"]*")|([^"][^.]*))\.)?', '') ++ ++ -- 2021-03-05 MJV FIX: issue#37: defaults cause problems, use system function that returns args WITHOUT DEFAULTS ++ -- COALESCE(r.routine_type, 'FUNCTION'): for aggregate functions, information_schema.routines contains NULL as routine_type value. ++ -- Issue#78 FIX: handle case-sensitive names with quote_ident() on rp.routine_name ++ SELECT 'GRANT ' || rp.privilege_type || ' ON ' || COALESCE(r.routine_type, 'FUNCTION') || ' ' || quote_ident(dest_schema) || '.' || quote_ident(rp.routine_name) || ' (' || pg_get_function_identity_arguments(p.oid) || ') TO ' || string_agg(distinct rp.grantee, ',') || ';' as func_dcl ++ FROM information_schema.routine_privileges rp, information_schema.routines r, pg_proc p, pg_namespace n ++ WHERE rp.routine_schema = quote_ident(source_schema) ++ AND rp.is_grantable = 'YES' ++ AND rp.routine_schema = r.routine_schema ++ AND rp.routine_name = r.routine_name ++ AND rp.routine_schema = n.nspname ++ AND n.oid = p.pronamespace ++ AND p.proname = r.routine_name ++ GROUP BY rp.privilege_type, r.routine_type, rp.routine_name, pg_get_function_identity_arguments(p.oid) ++ LOOP ++ BEGIN ++ cnt := cnt + 1; ++ IF bDDLOnly THEN ++ RAISE INFO '%', arec.func_dcl; ++ ELSE ++ EXECUTE arec.func_dcl; ++ END IF; ++ END; ++ END LOOP; ++ EXECUTE 'SET search_path = ' || quote_ident(source_schema) ; ++ RAISE NOTICE ' FUNC PRIVS cloned: %', LPAD(cnt::text, 5, ' '); ++ END IF; -- NO ACL BRANCH ++ ++ -- Issue#95 bypass if No ACL specified ++ IF NOT bNoACL THEN ++ -- MV: PRIVS: tables ++ action := 'PRIVS: Tables'; ++ -- regular, partitioned, and foreign tables plus view and materialized view permissions. Ignored for now: implement foreign table defs. ++ cnt := 0; ++ FOR arec IN ++ -- SELECT 'GRANT ' || p.perm::perm_type || CASE WHEN t.relkind in ('r', 'p', 'f') THEN ' ON TABLE ' WHEN t.relkind in ('v', 'm') THEN ' ON ' END || quote_ident(dest_schema) || '.' || t.relname::text || ' TO "' || r.rolname || '";' as tbl_ddl, ++ -- has_table_privilege(r.oid, t.oid, p.perm) AS granted, t.relkind ++ -- FROM pg_catalog.pg_class AS t CROSS JOIN pg_catalog.pg_roles AS r CROSS JOIN (VALUES (TEXT 'SELECT'), ('INSERT'), ('UPDATE'), ('DELETE'), ('TRUNCATE'), ('REFERENCES'), ('TRIGGER')) AS p(perm) ++ -- WHERE t.relnamespace::regnamespace::name = quote_ident(source_schema) AND t.relkind in ('r', 'p', 'f', 'v', 'm') AND NOT r.rolsuper AND has_table_privilege(r.oid, t.oid, p.perm) order by t.relname::text, t.relkind ++ -- 2021-03-05 MJV FIX: Fixed Issue#36 for tables ++ SELECT c.relkind, 'GRANT ' || tb.privilege_type || CASE WHEN c.relkind in ('r', 'p') THEN ' ON TABLE ' WHEN c.relkind in ('v', 'm') THEN ' ON ' END || ++ -- Issue#78 FIX: handle case-sensitive names with quote_ident() on t.relname ++ -- Issue#108 FIX: enclose double-quote grantees with special characters ++ -- quote_ident(dest_schema) || '.' || quote_ident(tb.table_name) || ' TO ' || string_agg(tb.grantee, ',') || ';' as tbl_dcl ++ quote_ident(dest_schema) || '.' || quote_ident(tb.table_name) || ' TO ' || string_agg('"' || tb.grantee || '"', ',') || ';' as tbl_dcl ++ FROM information_schema.table_privileges tb, pg_class c, pg_namespace n ++ WHERE tb.table_schema = quote_ident(source_schema) AND tb.table_name = c.relname AND c.relkind in ('r', 'p', 'v', 'm') ++ AND c.relnamespace = n.oid AND n.nspname = quote_ident(source_schema) ++ GROUP BY c.relkind, tb.privilege_type, tb.table_schema, tb.table_name ++ LOOP ++ BEGIN ++ cnt := cnt + 1; ++ -- IF bDebug THEN RAISE NOTICE 'DEBUG: ddl=%', arec.tbl_dcl; END IF; ++ -- Issue#46. Fixed reference to invalid record name (tbl_ddl --> tbl_dcl). ++ IF arec.relkind = 'f' THEN ++ RAISE WARNING 'Foreign tables are not currently implemented, so skipping privs for them. ddl=%', arec.tbl_dcl; ++ ELSE ++ IF bDDLOnly THEN ++ RAISE INFO '%', arec.tbl_dcl; ++ ELSE ++ EXECUTE arec.tbl_dcl; ++ END IF; + END IF; ++ END; ++ END LOOP; ++ RAISE NOTICE ' TABLE PRIVS cloned: %', LPAD(cnt::text, 5, ' '); ++ END IF; -- NO ACL BRANCH ++ ++ -- LOOP for regular tables and populate them if specified ++ -- Issue#75 moved from big table loop above to here. ++ IF bData THEN ++ r = clock_timestamp(); ++ -- IF bVerbose THEN RAISE NOTICE 'START: copy rows %',clock_timestamp() - t; END IF; ++ IF bVerbose THEN RAISE NOTICE 'Copying rows...'; END IF; ++ ++ EXECUTE 'SET search_path = ' || quote_ident(dest_schema) ; ++ action := 'Copy Rows'; ++ FOREACH tblelement IN ARRAY tblarray ++ LOOP ++ s = clock_timestamp(); ++ IF bDebug THEN RAISE NOTICE 'DEBUG1: no UDTs %', tblelement; END IF; ++ EXECUTE tblelement; ++ GET DIAGNOSTICS cnt = ROW_COUNT; ++ buffer = substring(tblelement, 13); ++ SELECT POSITION(' OVERRIDING SYSTEM VALUE SELECT ' IN buffer) INTO cnt2; ++ IF cnt2 = 0 THEN ++ SELECT POSITION(' SELECT ' IN buffer) INTO cnt2; ++ buffer = substring(buffer,1, cnt2); ++ ELSE ++ buffer = substring(buffer,1, cnt2); ++ END IF; ++ SELECT RPAD(buffer, 35, ' ') INTO buffer; ++ cnt2 := cast(extract(epoch from (clock_timestamp() - s)) as numeric(18,3)); ++ IF bVerbose THEN RAISE NOTICE 'Populated cloned table, % Rows Copied: % seconds: %', buffer, LPAD(cnt::text, 10, ' '), LPAD(cnt2::text, 5, ' '); END IF; ++ tblscopied := tblscopied + 1; ++ END LOOP; + +- END; +- END LOOP; +- RAISE NOTICE ' FUNC PRIVS cloned: %', LPAD(cnt::text, 5, ' '); ++ -- Issue#79 implementation ++ -- Do same for tables with user-defined elements using copy to file method ++ FOREACH tblelement IN ARRAY tblarray2 ++ LOOP ++ s = clock_timestamp(); ++ IF bDebug THEN RAISE NOTICE 'DEBUG2: UDTs %', tblelement; END IF; ++ EXECUTE tblelement; ++ GET DIAGNOSTICS cnt = ROW_COUNT; ++ ++ -- STATEMENT LOOKS LIKE THIS: ++ -- INSERT INTO sample11.warehouses SELECT * FROM sample.warehouses; ++ -- INSERT INTO sample11.person OVERRIDING SYSTEM VALUE SELECT * FROM sample.person; ++ -- COPY sample.address TO '/tmp/cloneschema.tmp' WITH DELIMITER AS ',';\ ++ buffer = TRIM(tblelement::text); ++ -- RAISE NOTICE 'element=%', buffer; ++ cnt1 = POSITION('INSERT INTO' IN buffer); ++ cnt2 = POSITION('COPY ' IN buffer); ++ IF cnt1 > 0 THEN ++ buffer = substring(buffer, 12); ++ ELSIF cnt2 > 0 THEN ++ buffer = substring(buffer, 5); ++ ELSE ++ RAISE EXCEPTION 'Programming Error for parsing tblarray2.'; ++ END IF; ++ ++ -- RAISE NOTICE 'buffer1=%', buffer; ++ cnt1 = POSITION(' OVERRIDING ' IN buffer); ++ cnt2 = POSITION('SELECT * FROM ' IN buffer); ++ cnt3 = POSITION(' FROM ' IN buffer); ++ cnt4 = POSITION(' TO ' IN buffer); ++ IF cnt1 > 0 THEN ++ buffer = substring(buffer, 1, cnt1-2); ++ ELSIF cnt2 > 0 THEN ++ buffer = substring(buffer, 1, cnt2-2); ++ ELSIF cnt3 > 0 THEN ++ buffer = substring(buffer, 1, cnt3-1); ++ ELSIF cnt4 > 0 THEN ++ -- skip the COPY TO statements ++ continue; ++ ELSE ++ RAISE EXCEPTION 'Programming Error for parsing tblarray2.'; ++ END IF; ++ -- RAISE NOTICE 'buffer2=%', buffer; ++ ++ SELECT RPAD(buffer, 35, ' ') INTO buffer; ++ -- RAISE NOTICE 'buffer3=%', buffer; ++ cnt2 := cast(extract(epoch from (clock_timestamp() - s)) as numeric(18,3)); ++ IF bVerbose THEN RAISE NOTICE 'Populated cloned table, % Rows Copied: % seconds: %', buffer, LPAD(cnt::text, 10, ' '), LPAD(cnt2::text, 5, ' '); END IF; ++ tblscopied := tblscopied + 1; ++ END LOOP; + +- -- MV: PRIVS: tables +- action := 'PRIVS: Tables'; +- -- regular, partitioned, and foreign tables plus view and materialized view permissions. TODO: implement foreign table defs. ++ -- Issue#101 ++ -- Do same for tables with user-defined elements using direct method with text cast ++ FOREACH tblelement IN ARRAY tblarray3 ++ LOOP ++ s = clock_timestamp(); ++ IF bDebug THEN RAISE NOTICE 'DEBUG3: UDTs %', tblelement; END IF; ++ EXECUTE tblelement; ++ GET DIAGNOSTICS cnt = ROW_COUNT; ++ cnt2 = POSITION(' (' IN tblelement::text); ++ IF cnt2 > 0 THEN ++ buffer = substring(tblelement, 1, cnt2); ++ buffer = substring(buffer, 6); ++ SELECT RPAD(buffer, 35, ' ') INTO buffer; ++ cnt2 := cast(extract(epoch from (clock_timestamp() - s)) as numeric(18,3)); ++ IF bVerbose THEN RAISE NOTICE 'Populated cloned table, % Rows Copied: % seconds: %', buffer, LPAD(cnt::text, 10, ' '), LPAD(cnt2::text, 5, ' '); END IF; ++ tblscopied := tblscopied + 1; ++ END IF; ++ END LOOP; ++ ++ -- Issue#98 MVs deferred until now ++ FOREACH tblelement IN ARRAY mvarray ++ LOOP ++ s = clock_timestamp(); ++ EXECUTE tblelement; ++ -- get diagnostics for MV creates or refreshes does not work, always returns 1 ++ GET DIAGNOSTICS cnt = ROW_COUNT; ++ buffer = substring(tblelement, 25); ++ cnt2 = POSITION(' AS ' IN buffer); ++ IF cnt2 > 0 THEN ++ buffer = substring(buffer, 1, cnt2); ++ SELECT RPAD(buffer, 36, ' ') INTO buffer; ++ cnt2 := cast(extract(epoch from (clock_timestamp() - s)) as numeric(18,3)); ++ IF bVerbose THEN RAISE NOTICE 'Populated Mat. View, % Rows Inserted: ? seconds: %', buffer, LPAD(cnt2::text, 5, ' '); END IF; ++ mvscopied := mvscopied + 1; ++ END IF; ++ END LOOP; ++ ++ cnt := cast(extract(epoch from (clock_timestamp() - r)) as numeric(18,3)); ++ IF bVerbose THEN RAISE NOTICE 'Copy rows duration: % seconds',cnt; END IF; ++ END IF; ++ RAISE NOTICE ' TABLES copied: %', LPAD(tblscopied::text, 5, ' '); ++ RAISE NOTICE ' MATVIEWS refreshed: %', LPAD(mvscopied::text, 5, ' '); ++ ++ ++ -- Issue#78 forces us to defer FKeys until the end since we previously did row copies before FKeys ++ -- add FK constraint ++ action := 'FK Constraints'; + cnt := 0; +- FOR arec IN +- SELECT 'GRANT ' || p.perm::perm_type || CASE WHEN t.relkind in ('r', 'p', 'f') THEN ' ON TABLE ' WHEN t.relkind in ('v', 'm') THEN ' ON ' END || quote_ident(dest_schema) || '.' || t.relname::text || ' TO "' || r.rolname || '";' as tbl_ddl, +- has_table_privilege(r.oid, t.oid, p.perm) AS granted, t.relkind +- FROM pg_catalog.pg_class AS t CROSS JOIN pg_catalog.pg_roles AS r CROSS JOIN (VALUES (TEXT 'SELECT'), ('INSERT'), ('UPDATE'), ('DELETE'), ('TRUNCATE'), ('REFERENCES'), ('TRIGGER')) AS p(perm) +- WHERE t.relnamespace::regnamespace::name = quote_ident(source_schema) AND t.relkind in ('r', 'p', 'f', 'v', 'm') AND NOT r.rolsuper AND has_table_privilege(r.oid, t.oid, p.perm) order by t.relname::text, t.relkind +- LOOP +- BEGIN +- cnt := cnt + 1; +- -- RAISE NOTICE 'ddl=%', arec.tbl_ddl; +- IF arec.relkind = 'f' THEN +- RAISE WARNING 'Foreign tables are not currently implemented, so skipping privs for them. ddl=%', arec.tbl_ddl; +- ELSE +- IF ddl_only THEN +- RAISE INFO '%', arec.tbl_ddl; +- ELSE +- EXECUTE arec.tbl_ddl; +- END IF; + +- END IF; +- END; ++ -- Issue#61 FIX: use set_config for empty string ++ -- SET search_path = ''; ++ SELECT set_config('search_path', '', false) into v_dummy; ++ ++ FOR qry IN ++ SELECT 'ALTER TABLE ' || quote_ident(dest_schema) || '.' || quote_ident(rn.relname) ++ || ' ADD CONSTRAINT ' || quote_ident(ct.conname) || ' ' || REPLACE(pg_get_constraintdef(ct.oid), 'REFERENCES ' || quote_ident(source_schema) || '.', 'REFERENCES ' ++ || quote_ident(dest_schema) || '.') || ';' ++ FROM pg_constraint ct ++ JOIN pg_class rn ON rn.oid = ct.conrelid ++ -- Issue#103 needed to add this left join ++ LEFT JOIN pg_inherits i ON (rn.oid = i.inhrelid) ++ WHERE connamespace = src_oid ++ AND rn.relkind = 'r' ++ AND ct.contype = 'f' ++ -- Issue#103 fix: needed to also add this null check ++ AND i.inhrelid is null ++ LOOP ++ cnt := cnt + 1; ++ IF bDDLOnly THEN ++ RAISE INFO '%', qry; ++ ELSE ++ IF bDebug THEN RAISE NOTICE 'DEBUG: adding FKEY constraint: %', qry; END IF; ++ EXECUTE qry; ++ END IF; + END LOOP; +- RAISE NOTICE ' TABLE PRIVS cloned: %', LPAD(cnt::text, 5, ' '); ++ EXECUTE 'SET search_path = ' || quote_ident(source_schema) ; ++ RAISE NOTICE ' FKEYS cloned: %', LPAD(cnt::text, 5, ' '); + +- -- Set the search_path back to what it was before +- EXECUTE 'SET search_path = ' || src_path_old; ++ ++ IF src_path_old = '' OR src_path_old = '""' THEN ++ -- RAISE NOTICE 'Restoring old search_path to empty string'; ++ SELECT set_config('search_path', '', false) into v_dummy; ++ ELSE ++ -- RAISE NOTICE 'Restoring old search_path to:%', src_path_old; ++ EXECUTE 'SET search_path = ' || src_path_old; ++ END IF; ++ SELECT setting INTO v_dummy FROM pg_settings WHERE name = 'search_path'; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: setting search_path back to what it was: %', v_dummy; END IF; ++ cnt := cast(extract(epoch from (clock_timestamp() - t)) as numeric(18,3)); ++ IF bVerbose THEN RAISE NOTICE 'clone_schema duration: % seconds',cnt; END IF; + + EXCEPTION + WHEN others THEN + BEGIN + GET STACKED DIAGNOSTICS v_diag1 = MESSAGE_TEXT, v_diag2 = PG_EXCEPTION_DETAIL, v_diag3 = PG_EXCEPTION_HINT, v_diag4 = RETURNED_SQLSTATE, v_diag5 = PG_CONTEXT, v_diag6 = PG_EXCEPTION_CONTEXT; +- -- v_ret := 'line=' || v_diag6 || '. '|| v_diag4 || '. ' || v_diag1 || ' .' || v_diag2 || ' .' || v_diag3; +- v_ret := 'line=' || v_diag6 || '. '|| v_diag4 || '. ' || v_diag1; +- RAISE EXCEPTION 'Action: % Diagnostics: %',action, v_ret; +- -- Set the search_path back to what it was before +- EXECUTE 'SET search_path = ' || src_path_old; ++ v_ret := 'line=' || v_diag6 || '. '|| v_diag4 || '. ' || v_diag1; ++ -- Issue#101: added version to exception output ++ -- RAISE NOTICE 'v_diag1=% v_diag2=% v_diag3=% v_diag4=% v_diag5=% v_diag6=%', v_diag1, v_diag2, v_diag3, v_diag4, v_diag5, v_diag6; ++ buffer2 = ''; ++ IF action = 'Copy Rows' AND v_diag4 = '42704' THEN ++ -- Issue#105 Help user to fix the problem. ++ buffer2 = 'It appears you have a USER-DEFINED column type mismatch. Try running clone_schema with the FILECOPY option. '; ++ END IF; ++ IF lastsql <> '' THEN ++ buffer = v_ret || E'\n'|| buffer2 || E'\n'|| lastsql; ++ ELSE ++ buffer = v_ret || E'\n'|| buffer2; ++ END IF; ++ RAISE EXCEPTION 'Version: % Action: % Diagnostics: %',v_version, action, buffer; ++ ++ IF src_path_old = '' THEN ++ -- RAISE NOTICE 'setting old search_path to empty string'; ++ SELECT set_config('search_path', '', false); ++ ELSE ++ -- RAISE NOTICE 'setting old search_path to:%', src_path_old; ++ EXECUTE 'SET search_path = ' || src_path_old; ++ END IF; ++ + RETURN; + END; + +@@ -713,14 +3261,14 @@ + END; + + $BODY$ +- LANGUAGE plpgsql VOLATILE +- COST 100; +-ALTER FUNCTION public.clone_schema(text, text, boolean, boolean) OWNER TO "{db_user}"; +-""" ++ LANGUAGE plpgsql VOLATILE COST 100; + ++ALTER FUNCTION public.clone_schema(text, text, cloneparms[]) OWNER TO "{db_user}"; ++-- REVOKE ALL PRIVILEGES ON FUNCTION clone_schema(text, text, cloneparms[]) FROM public; ++""" # noqa + +-class CloneSchema: + ++class CloneSchema: + def _create_clone_schema_function(self): + """ + Creates a postgres function `clone_schema` that copies a schema and its +@@ -752,9 +3300,8 @@ def clone_schema(self, base_schema_name, new_schema_name, set_connection=True): + if schema_exists(new_schema_name): + raise ValidationError("New schema name already exists") + +- sql = 'SELECT clone_schema(%(base_schema)s, %(new_schema)s, true, false)' ++ sql = "SELECT clone_schema(%(base_schema)s, %(new_schema)s, 'DATA')" + cursor.execute( +- sql, +- {'base_schema': base_schema_name, 'new_schema': new_schema_name} ++ sql, {"base_schema": base_schema_name, "new_schema": new_schema_name} + ) + cursor.close() + +From c49b4a1c254ebe713259515a4c8373a9b19dd000 Mon Sep 17 00:00:00 2001 +From: Marc 'risson' Schmitt +Date: Thu, 16 Nov 2023 13:32:06 +0100 +Subject: [PATCH 2/3] clone: allow setting up the clone mode (DATA, NODATA) + +Signed-off-by: Marc 'risson' Schmitt +--- + django_tenants/clone.py | 13 ++++++++++--- + django_tenants/models.py | 11 ++++++++++- + 2 files changed, 20 insertions(+), 4 deletions(-) + +diff --git a/django_tenants/clone.py b/django_tenants/clone.py +index 3afce109..6fa52c04 100644 +--- a/django_tenants/clone.py ++++ b/django_tenants/clone.py +@@ -3281,7 +3281,9 @@ def _create_clone_schema_function(self): + cursor.execute(CLONE_SCHEMA_FUNCTION.format(db_user=db_user)) + cursor.close() + +- def clone_schema(self, base_schema_name, new_schema_name, set_connection=True): ++ def clone_schema( ++ self, base_schema_name, new_schema_name, clone_mode="DATA", set_connection=True ++ ): + """ + Creates a new schema `new_schema_name` as a clone of an existing schema + `old_schema_name`. +@@ -3300,8 +3302,13 @@ def clone_schema(self, base_schema_name, new_schema_name, set_connection=True): + if schema_exists(new_schema_name): + raise ValidationError("New schema name already exists") + +- sql = "SELECT clone_schema(%(base_schema)s, %(new_schema)s, 'DATA')" ++ sql = "SELECT clone_schema(%(base_schema)s, %(new_schema)s, %(clone_mode)s)" + cursor.execute( +- sql, {"base_schema": base_schema_name, "new_schema": new_schema_name} ++ sql, ++ { ++ "base_schema": base_schema_name, ++ "new_schema": new_schema_name, ++ "clone_mode": clone_mode, ++ }, + ) + cursor.close() +diff --git a/django_tenants/models.py b/django_tenants/models.py +index 0d1812d8..655e1994 100644 +--- a/django_tenants/models.py ++++ b/django_tenants/models.py +@@ -29,6 +29,13 @@ class TenantMixin(models.Model): + to be automatically created upon save. + """ + ++ clone_mode = "DATA" ++ """ ++ One of "DATA", "NODATA". ++ When using TENANT_BASE_SCHEMA, controls whether only the database ++ structure will be copied, or if data will be copied along with it. ++ """ ++ + schema_name = models.CharField(max_length=63, unique=True, db_index=True, + validators=[_check_schema_name]) + +@@ -184,7 +191,9 @@ def create_schema(self, check_if_exists=False, sync_schema=True, + # copy tables and data from provided model schema + base_schema = get_tenant_base_schema() + clone_schema = CloneSchema() +- clone_schema.clone_schema(base_schema, self.schema_name) ++ clone_schema.clone_schema( ++ base_schema, self.schema_name, self.clone_mode ++ ) + + call_command('migrate_schemas', + tenant=True, + +From 218fbcd3bfa555b20c6fb904e5fcf307d69f18af Mon Sep 17 00:00:00 2001 +From: Marc 'risson' Schmitt +Date: Thu, 16 Nov 2023 13:32:54 +0100 +Subject: [PATCH 3/3] clone: always (re-)create the clone_schema function + +Signed-off-by: Marc 'risson' Schmitt +--- + django_tenants/clone.py | 10 +++------- + 1 file changed, 3 insertions(+), 7 deletions(-) + +diff --git a/django_tenants/clone.py b/django_tenants/clone.py +index 6fa52c04..63fb8e22 100644 +--- a/django_tenants/clone.py ++++ b/django_tenants/clone.py +@@ -1,7 +1,6 @@ + from django.conf import settings + from django.core.exceptions import ValidationError + from django.db import connection, transaction +-from django.db.utils import ProgrammingError + + from django_tenants.utils import schema_exists + +@@ -3292,12 +3291,9 @@ def clone_schema( + connection.set_schema_to_public() + cursor = connection.cursor() + +- # check if the clone_schema function already exists in the db +- try: +- cursor.execute("SELECT 'clone_schema'::regproc") +- except ProgrammingError: +- self._create_clone_schema_function() +- transaction.commit() ++ # create or update the clone_schema function in the db ++ self._create_clone_schema_function() ++ transaction.commit() + + if schema_exists(new_schema_name): + raise ValidationError("New schema name already exists") diff --git a/ilot/py3-django-tenants/APKBUILD b/ilot/py3-django-tenants/APKBUILD new file mode 100644 index 0000000..ce949ad --- /dev/null +++ b/ilot/py3-django-tenants/APKBUILD @@ -0,0 +1,43 @@ +# Contributor: Antoine Martin (ayakael) +# Maintainer: Antoine Martin (ayakael) +pkgname=py3-django-tenants +#_pkgreal is used by apkbuild-pypi to find modules at PyPI +_pkgreal=django-tenants +pkgver=3.6.1 +pkgrel=5 +pkgdesc="Tenant support for Django using PostgreSQL schemas." +url="https://pypi.python.org/project/django-tenants" +arch="noarch" +license="MIT" +depends="py3-django py3-psycopg py3-gunicorn py3-coverage" +checkdepends="python3-dev py3-pytest" +makedepends="py3-setuptools py3-gpep517 py3-wheel" +source=" + $pkgname-$pkgver.tar.gz::https://codeload.github.com/django-tenants/django-tenants/tar.gz/refs/tags/v$pkgver + 997_update-from-pgclone-schema.patch + " +builddir="$srcdir/$_pkgreal-$pkgver" +options="!check" # Requires setting up test database +subpackages="$pkgname-pyc" + +build() { + gpep517 build-wheel \ + --wheel-dir .dist \ + --output-fd 3 3>&1 >&2 +} + +check() { + python3 -m venv --clear --without-pip --system-site-packages .testenv + .testenv/bin/python3 -m installer .dist/*.whl + DJANGO_SETTINGS_MODULE=tests.settings .testenv/bin/python3 -m pytest -v +} + +package() { + python3 -m installer -d "$pkgdir" \ + .dist/*.whl +} + +sha512sums=" +b18afce81ccc89e49fcc4ebe85d90be602415ca898c1660a4e71e2bef6a3ed2e8c724e94b61d8c6f48f3fb19eb2a87d6a6f5bbf449b3e2f661f87e4b5638eafb py3-django-tenants-3.6.1.tar.gz +f2424bb188db2e3c7d13c15e5bdf0959c6f794e68dbc677c8b876d4faa321f78aded5565539f1bfd97583c6df0fcc19ec05abe203b08407e4446dd7194756825 997_update-from-pgclone-schema.patch +" diff --git a/ilot/py3-drf-orjson-renderer/APKBUILD b/ilot/py3-drf-orjson-renderer/APKBUILD new file mode 100644 index 0000000..52a7c47 --- /dev/null +++ b/ilot/py3-drf-orjson-renderer/APKBUILD @@ -0,0 +1,39 @@ +# Contributor: Antoine Martin (ayakael) +# Maintainer: Antoine Martin (ayakael) +pkgname=py3-drf-orjson-renderer +#_pkgreal is used by apkbuild-pypi to find modules at PyPI +_pkgreal=drf_orjson_renderer +pkgver=1.7.3 +_gittag=9a59352f82e262bd78ccc0228361bcb321a33623 +pkgrel=0 +pkgdesc="Django RestFramework JSON Renderer Backed by orjson" +url="https://pypi.python.org/project/drf-orjson-renderer" +arch="noarch" +license="MIT" +depends="py3-django-rest-framework py3-orjson" +checkdepends="py3-pytest-django py3-numpy" +makedepends="py3-setuptools py3-gpep517 py3-wheel" +source="$pkgname-$pkgver.tar.gz::https://github.com/brianjbuck/drf_orjson_renderer/archive/$_gittag.tar.gz" +builddir="$srcdir/$_pkgreal-$_gittag" +subpackages="$pkgname-pyc" + +build() { + gpep517 build-wheel \ + --wheel-dir .dist \ + --output-fd 3 3>&1 >&2 +} + +check() { + python3 -m venv --clear --without-pip --system-site-packages .testenv + .testenv/bin/python3 -m installer .dist/*.whl + .testenv/bin/python3 -m pytest -v +} + +package() { + python3 -m installer -d "$pkgdir" \ + .dist/*.whl +} + +sha512sums=" +7870aebf6bcc249228b1620f4b50124eef54e251dcac236e23be4287284461617d630b073d2e9122f66779a908dfd69c5e16b486b23de0114b06b3df6b468e95 py3-drf-orjson-renderer-1.7.3.tar.gz +" diff --git a/ilot/py3-kadmin-rs/APKBUILD b/ilot/py3-kadmin-rs/APKBUILD deleted file mode 100644 index fae9ad1..0000000 --- a/ilot/py3-kadmin-rs/APKBUILD +++ /dev/null @@ -1,56 +0,0 @@ -# Contributor: Antoine Martin (ayakael) -# Maintainer: Antoine Martin (ayakael) -pkgname=py3-kadmin-rs -pkgver=0.5.3 -pkgrel=0 -pkgdesc="Rust and Python interfaces to the Kerberos administration interface (kadm5)" -url="https://github.com/authentik-community/kadmin-rs" -arch="all" -license="MIT" -checkdepends="py3-pytest py3-k5test" -makedepends=" - cargo - cargo-auditable - clang-libclang - py3-setuptools - py3-setuptools-rust - py3-gpep517 - py3-wheel - poetry - python3-dev - sccache - " -source="$pkgname-$pkgver.tar.gz::https://github.com/authentik-community/kadmin-rs/archive/refs/tags/kadmin/version/$pkgver.tar.gz" -builddir="$srcdir"/kadmin-rs-kadmin-version-$pkgver -subpackages="$pkgname-pyc" - -prepare() { - default_prepare - - cargo fetch --target="$CTARGET" --locked -} - - -build() { - cargo auditable build --release --locked - gpep517 build-wheel \ - --wheel-dir .dist \ - --output-fd 3 3>&1 >&2 -} - -check() { - cargo test --locked - - python3 -m venv --clear --without-pip --system-site-packages .testenv - .testenv/bin/python3 -m installer .dist/*.whl - .testenv/bin/python3 -m unittest python/tests/test_*.py -} - -package() { - python3 -m installer -d "$pkgdir" \ - .dist/*.whl -} - -sha512sums=" -61d3ddfe619827cef83af944b2281f2cf6966d95c3d4a5883b82169bf1f34e6b7173cfa086198e3e0f9a227590a497dcb1c9b209cd4d0c6d361fdfce9b98eec0 py3-kadmin-rs-0.5.3.tar.gz -" diff --git a/ilot/py3-microsoft-kiota-abstractions/APKBUILD b/ilot/py3-microsoft-kiota-abstractions/APKBUILD index 24d7e1e..b298650 100644 --- a/ilot/py3-microsoft-kiota-abstractions/APKBUILD +++ b/ilot/py3-microsoft-kiota-abstractions/APKBUILD @@ -3,23 +3,21 @@ pkgname=py3-microsoft-kiota-abstractions #_pkgreal is used by apkbuild-pypi to find modules at PyPI _pkgreal=microsoft-kiota-abstractions -pkgver=1.6.8 +pkgver=1.3.3 pkgrel=0 pkgdesc="Abstractions library for Kiota generated Python clients" url="https://pypi.python.org/project/microsoft-kiota-abstractions" arch="noarch" license="MIT" depends=" - py3-std-uritemplate<2.0.0 + py3-std-uritemplate py3-opentelemetry-sdk py3-importlib-metadata " -checkdepends="py3-pytest py3-pytest-asyncio" -makedepends="poetry py3-gpep517 py3-wheel py3-flit" -source=" - $pkgname-$pkgver.tar.gz::https://github.com/microsoft/kiota-python/archive/refs/tags/microsoft-kiota-abstractions-v$pkgver.tar.gz - " -builddir="$srcdir/kiota-python-microsoft-kiota-abstractions-v$pkgver/packages/abstractions" +checkdepends="py3-pytest" +makedepends="py3-setuptools py3-gpep517 py3-wheel py3-flit" +source="$pkgname-$pkgver.tar.gz::https://github.com/microsoft/kiota-abstractions-python/archive/refs/tags/v$pkgver.tar.gz" +builddir="$srcdir/kiota-abstractions-python-$pkgver" subpackages="$pkgname-pyc" build() { @@ -40,5 +38,5 @@ package() { } sha512sums=" -55341b1ff3fb1a516ceb84817db991d6e6aa83b01326f64cf21690dee1ab84e9c9c4f7162f9f71ec1261b4e0380b73b13284128bd786b80da29faf968720b355 py3-microsoft-kiota-abstractions-1.6.8.tar.gz +b416b14cc68dab4eb99d8abc2378c8691781c984f453c7328eefe5bc10788d8244bdc0e3c98d4c2cdbad60d5f672893da4eeed99037d4e361849bcef458547e1 py3-microsoft-kiota-abstractions-1.3.3.tar.gz " diff --git a/ilot/py3-microsoft-kiota-authentication-azure/APKBUILD b/ilot/py3-microsoft-kiota-authentication-azure/APKBUILD index c84acdc..2149918 100644 --- a/ilot/py3-microsoft-kiota-authentication-azure/APKBUILD +++ b/ilot/py3-microsoft-kiota-authentication-azure/APKBUILD @@ -3,7 +3,7 @@ pkgname=py3-microsoft-kiota-authentication-azure #_pkgreal is used by apkbuild-pypi to find modules at PyPI _pkgreal=microsoft-kiota-authentication-azure -pkgver=1.6.8 +pkgver=1.1.0 pkgrel=0 pkgdesc="Authentication provider for Kiota using Azure Identity" url="https://pypi.python.org/project/microsoft-kiota-authentication-azure" @@ -15,12 +15,10 @@ depends=" py3-importlib-metadata " checkdepends="py3-pytest" -makedepends="poetry py3-gpep517 py3-wheel py3-flit" -source=" - $pkgname-$pkgver.tar.gz::https://github.com/microsoft/kiota-python/archive/refs/tags/microsoft-kiota-authentication-azure-v$pkgver.tar.gz - " +makedepends="py3-setuptools py3-gpep517 py3-wheel py3-flit" options="!check" # TODO -builddir="$srcdir/kiota-python-microsoft-kiota-authentication-azure-v$pkgver/packages/authentication/azure" +source="$pkgname-$pkgver.tar.gz::https://github.com/microsoft/kiota-authentication-azure-python/archive/refs/tags/v$pkgver.tar.gz" +builddir="$srcdir/kiota-authentication-azure-python-$pkgver" subpackages="$pkgname-pyc" build() { @@ -41,5 +39,5 @@ package() { } sha512sums=" -d661d379f036b45bf356e349e28d3478f4a10b351dfde2d1b11a429c0f2160cde9696990cc18d72a224cfd3cc4c90bdc2e6f07d9e4763bd126cd9f66a09b9bec py3-microsoft-kiota-authentication-azure-1.6.8.tar.gz +4a58a49c027951dd856bc24b03c6ba44b448949bcd3210237d2574e3ceec32eefb403057720e4d517027494d6f977874dd48abbfb5cf856399eb5d1c895376fc py3-microsoft-kiota-authentication-azure-1.1.0.tar.gz " diff --git a/ilot/py3-microsoft-kiota-http/APKBUILD b/ilot/py3-microsoft-kiota-http/APKBUILD index bebb592..e1ded6f 100644 --- a/ilot/py3-microsoft-kiota-http/APKBUILD +++ b/ilot/py3-microsoft-kiota-http/APKBUILD @@ -3,7 +3,7 @@ pkgname=py3-microsoft-kiota-http #_pkgreal is used by apkbuild-pypi to find modules at PyPI _pkgreal=microsoft-kiota-http -pkgver=1.6.8 +pkgver=1.3.3 pkgrel=0 pkgdesc="Kiota http request adapter implementation for httpx library" url="https://pypi.python.org/project/microsoft-kiota-http" @@ -14,12 +14,10 @@ depends=" py3-httpx " checkdepends="py3-pytest" -makedepends="poetry py3-gpep517 py3-wheel py3-flit" -source=" - $pkgname-$pkgver.tar.gz::https://github.com/microsoft/kiota-python/archive/refs/tags/microsoft-kiota-http-v$pkgver.tar.gz - " +makedepends="py3-setuptools py3-gpep517 py3-wheel py3-flit" +source="$pkgname-$pkgver.tar.gz::https://github.com/microsoft/kiota-http-python/archive/refs/tags/v$pkgver.tar.gz" options="!check" # TODO -builddir="$srcdir/kiota-python-microsoft-kiota-http-v$pkgver/packages/http/httpx" +builddir="$srcdir/kiota-http-python-$pkgver" subpackages="$pkgname-pyc" build() { @@ -40,5 +38,5 @@ package() { } sha512sums=" -c453c89d31cc062f2d8be4a28bda0666dbde6b5a8e42855892cda72e5d104e6bb5516db01d9feb7f619b8fa77237c9e3badd24b29326f627f95b69210835321d py3-microsoft-kiota-http-1.6.8.tar.gz +fff2dc37a0e379ad5689ff9532b43e6ee62ca97589b2feed39898c17a45c5cdb17c20bd714c46cd6ae6e2522de695b6c747aaf5fb0ef96dfd504cd37a6169a87 py3-microsoft-kiota-http-1.3.3.tar.gz " diff --git a/ilot/py3-microsoft-kiota-serialization-form/APKBUILD b/ilot/py3-microsoft-kiota-serialization-form/APKBUILD index fccfd62..de1939f 100644 --- a/ilot/py3-microsoft-kiota-serialization-form/APKBUILD +++ b/ilot/py3-microsoft-kiota-serialization-form/APKBUILD @@ -3,7 +3,7 @@ pkgname=py3-microsoft-kiota-serialization-form #_pkgreal is used by apkbuild-pypi to find modules at PyPI _pkgreal=microsoft-kiota-serialization-form -pkgver=1.6.8 +pkgver=0.1.1 pkgrel=0 pkgdesc="Kiota Form encoded serialization implementation for Python" url="https://pypi.python.org/project/microsoft-kiota-serialization-form" @@ -14,11 +14,9 @@ depends=" py3-pendulum " checkdepends="py3-pytest" -makedepends="poetry py3-gpep517 py3-wheel py3-flit" -source=" - $pkgname-$pkgver.tar.gz::https://github.com/microsoft/kiota-python/archive/refs/tags/microsoft-kiota-serialization-form-v$pkgver.tar.gz - " -builddir="$srcdir/kiota-python-microsoft-kiota-serialization-form-v$pkgver/packages/serialization/form" +makedepends="py3-setuptools py3-gpep517 py3-wheel py3-flit" +source="$pkgname-$pkgver.tar.gz::https://github.com/microsoft/kiota-serialization-form-python/archive/refs/tags/v$pkgver.tar.gz" +builddir="$srcdir/kiota-serialization-form-python-$pkgver" subpackages="$pkgname-pyc" build() { @@ -39,5 +37,5 @@ package() { } sha512sums=" -0e4fabe18980612ca3f55fd7350148d2393da3f35dc79cd4fe56b01f50bc2af147bde5e294580d83b97b4a549d77e6581ece8ddb19ea09ee92fd6cbfead0d3db py3-microsoft-kiota-serialization-form-1.6.8.tar.gz +0afb2b3f0f7d325e630b8a11d17a98b2c42446cb803384e36406074c62ade2be994e29b9d7cb098d9de55609dda28c339eed6397ec373375caaf158b139a5449 py3-microsoft-kiota-serialization-form-0.1.1.tar.gz " diff --git a/ilot/py3-microsoft-kiota-serialization-json/APKBUILD b/ilot/py3-microsoft-kiota-serialization-json/APKBUILD index f59d827..8efe8fa 100644 --- a/ilot/py3-microsoft-kiota-serialization-json/APKBUILD +++ b/ilot/py3-microsoft-kiota-serialization-json/APKBUILD @@ -3,7 +3,7 @@ pkgname=py3-microsoft-kiota-serialization-json #_pkgreal is used by apkbuild-pypi to find modules at PyPI _pkgreal=microsoft-kiota-serialization-json -pkgver=1.6.8 +pkgver=1.3.2 pkgrel=0 pkgdesc="JSON serialization implementation for Kiota clients in Python" url="https://pypi.python.org/project/microsoft-kiota-serialization-json" @@ -14,12 +14,10 @@ depends=" py3-pendulum " checkdepends="py3-pytest" -makedepends="poetry py3-gpep517 py3-wheel py3-flit" -source=" - $pkgname-$pkgver.tar.gz::https://github.com/microsoft/kiota-python/archive/refs/tags/microsoft-kiota-serialization-json-v$pkgver.tar.gz - " +makedepends="py3-setuptools py3-gpep517 py3-wheel py3-flit" +source="$pkgname-$pkgver.tar.gz::https://github.com/microsoft/kiota-serialization-json-python/archive/refs/tags/v$pkgver.tar.gz" options="!check" # TODO -builddir="$srcdir/kiota-python-microsoft-kiota-serialization-json-v$pkgver/packages/serialization/json" +builddir="$srcdir/kiota-serialization-json-python-$pkgver" subpackages="$pkgname-pyc" build() { @@ -40,5 +38,5 @@ package() { } sha512sums=" -42b8e1d2bfb175e52876314a598647de7b70acb8140cefbfb20d0f8de241bbb03a1cfe6c7108a56047f2a8e3f8f781a23fe54d5612d68a5966340279ff0eb8bc py3-microsoft-kiota-serialization-json-1.6.8.tar.gz +bdf2a42d4509b7a6f093295c8f5d661e771d040965ebdd7fb7772503482fbc6d449c5ac7b16f5f497d9005018d463d3a68650b4b4da0f1a7fbcb0ad3377d12b5 py3-microsoft-kiota-serialization-json-1.3.2.tar.gz " diff --git a/ilot/py3-microsoft-kiota-serialization-multipart/APKBUILD b/ilot/py3-microsoft-kiota-serialization-multipart/APKBUILD index c0da7ff..bf088b7 100644 --- a/ilot/py3-microsoft-kiota-serialization-multipart/APKBUILD +++ b/ilot/py3-microsoft-kiota-serialization-multipart/APKBUILD @@ -3,7 +3,7 @@ pkgname=py3-microsoft-kiota-serialization-multipart #_pkgreal is used by apkbuild-pypi to find modules at PyPI _pkgreal=microsoft-kiota-serialization-multipart -pkgver=1.6.8 +pkgver=0.1.0 pkgrel=0 pkgdesc="Multipart serialization implementation for python based kiota clients" url="https://pypi.python.org/project/microsoft-kiota-serialization-multipart" @@ -11,11 +11,9 @@ arch="noarch" license="MIT" depends="py3-microsoft-kiota-abstractions py3-microsoft-kiota-serialization-json" checkdepends="py3-pytest" -makedepends="poetry py3-gpep517 py3-wheel py3-flit" -source=" - $pkgname-$pkgver.tar.gz::https://github.com/microsoft/kiota-python/archive/refs/tags/microsoft-kiota-serialization-multipart-v$pkgver.tar.gz - " -builddir="$srcdir/kiota-python-microsoft-kiota-serialization-multipart-v$pkgver/packages/serialization/multipart" +makedepends="py3-setuptools py3-gpep517 py3-wheel py3-flit" +source="$pkgname-$pkgver.tar.gz::https://github.com/microsoft/kiota-serialization-multipart-python/archive/refs/tags/v$pkgver.tar.gz" +builddir="$srcdir/kiota-serialization-multipart-python-$pkgver" subpackages="$pkgname-pyc" build() { @@ -36,5 +34,5 @@ package() { } sha512sums=" -d6d6d36fe55f4aa595d380e43f93f3de7674633edba676aec16fc26254a12e4f700427fedf1bedfddde30a7f708c93ccbbe586bb0e6950748a2debe609bf44c1 py3-microsoft-kiota-serialization-multipart-1.6.8.tar.gz +a402f4fc891a70789c8ac6cb16ae30f2059e6aed4013c7601a33f37b959446067cbf0abc630f15aadeb4c85eb04703cead3c19fbbff628332efdebce3c4badb8 py3-microsoft-kiota-serialization-multipart-0.1.0.tar.gz " diff --git a/ilot/py3-microsoft-kiota-serialization-text/APKBUILD b/ilot/py3-microsoft-kiota-serialization-text/APKBUILD index 3c38b26..7684d45 100644 --- a/ilot/py3-microsoft-kiota-serialization-text/APKBUILD +++ b/ilot/py3-microsoft-kiota-serialization-text/APKBUILD @@ -3,7 +3,7 @@ pkgname=py3-microsoft-kiota-serialization-text #_pkgreal is used by apkbuild-pypi to find modules at PyPI _pkgreal=microsoft-kiota-serialization-text -pkgver=1.6.8 +pkgver=1.0.0 pkgrel=0 pkgdesc="Text serialization implementation for Kiota generated clients in Python" url="https://pypi.python.org/project/microsoft-kiota-abstractions" @@ -14,11 +14,9 @@ depends=" py3-dateutil " checkdepends="py3-pytest" -makedepends="poetry py3-gpep517 py3-wheel py3-flit" -source=" - $pkgname-$pkgver.tar.gz::https://github.com/microsoft/kiota-python/archive/refs/tags/microsoft-kiota-serialization-text-v$pkgver.tar.gz - " -builddir="$srcdir/kiota-python-microsoft-kiota-serialization-text-v$pkgver/packages/serialization/text" +makedepends="py3-setuptools py3-gpep517 py3-wheel py3-flit" +source="$pkgname-$pkgver.tar.gz::https://github.com/microsoft/kiota-serialization-text-python/archive/refs/tags/v$pkgver.tar.gz" +builddir="$srcdir/kiota-serialization-text-python-$pkgver" subpackages="$pkgname-pyc" build() { @@ -39,5 +37,5 @@ package() { } sha512sums=" -55dbc87253819f496e2f25de2bf24b170761f335117da414bb35c6db9008e9ca8c6fd13d5e429914c322a850a57858d9abdee7dc209ad55e469182995290d568 py3-microsoft-kiota-serialization-text-1.6.8.tar.gz +b3b0d0a7e69c70c14ed606f70179a49107f6df6f2ba577e9bacbdb15b3071062a180d2e6b77a43d82fe7a67264ad24aa685c71695042ffd54ea4406f9b990208 py3-microsoft-kiota-serialization-text-1.0.0.tar.gz " diff --git a/ilot/py3-msal/APKBUILD b/ilot/py3-msal/APKBUILD index 02b267a..826a32a 100644 --- a/ilot/py3-msal/APKBUILD +++ b/ilot/py3-msal/APKBUILD @@ -3,7 +3,7 @@ pkgname=py3-msal #_pkgreal is used by apkbuild-pypi to find modules at PyPI _pkgreal=msal -pkgver=1.31.1 +pkgver=1.31.0 pkgrel=0 pkgdesc="Microsoft Authentication Library (MSAL) for Python" url="https://pypi.org/project/msal" @@ -39,5 +39,5 @@ package() { } sha512sums=" -f75541337f09ba29d4de13206346ad7793b3f2bdbdbf8fcb050ee7976b397ca666d61aee21121a4efdd7c150c9d2f87f75812e7b8aa96a5f8ac5219e7a946af2 py3-msal-1.31.1.tar.gz +712342167c7cc958c16c45d9c21a58d83efd9ff3dccf4494d7c83fb226678ed944fef1751a4002fcb292450884c682f1b5d00cdca248d1def54d6f884cdb5dc2 py3-msal-1.31.0.tar.gz " diff --git a/ilot/py3-msgraph-core/APKBUILD b/ilot/py3-msgraph-core/APKBUILD index e8d9cb5..affc2a5 100644 --- a/ilot/py3-msgraph-core/APKBUILD +++ b/ilot/py3-msgraph-core/APKBUILD @@ -3,7 +3,7 @@ pkgname=py3-msgraph-core #_pkgreal is used by apkbuild-pypi to find modules at PyPI _pkgreal=msgraph-core -pkgver=1.1.8 +pkgver=1.1.3 pkgrel=0 pkgdesc="The Microsoft Graph Python SDK" url="https://pypi.python.org/project/msgraph-core" @@ -39,5 +39,5 @@ package() { } sha512sums=" -0cae6f76cb1373d1ef76448e47b9951e5076a144140c19edc14186f7bfd92930e50c9f6c459170e3362ef267903cdf261d1897566983a7302beab205f9d61389 py3-msgraph-core-1.1.8.tar.gz +48b47b5b02062fe05f9f917d1c6461f539f9ff6dfbafb4a2dcfbe91237725eb7427b2673aec7eb994f733fab109879d96e06e122d72ecab69ff77a1f76fafe49 py3-msgraph-core-1.1.3.tar.gz " diff --git a/ilot/py3-msgraph-sdk/APKBUILD b/ilot/py3-msgraph-sdk/APKBUILD index f23f733..d59a1e7 100644 --- a/ilot/py3-msgraph-sdk/APKBUILD +++ b/ilot/py3-msgraph-sdk/APKBUILD @@ -3,7 +3,7 @@ pkgname=py3-msgraph-sdk #_pkgreal is used by apkbuild-pypi to find modules at PyPI _pkgreal=msgraph-sdk -pkgver=1.16.0 +pkgver=1.8.0 pkgrel=0 pkgdesc="The Microsoft Graph Python SDK" url="https://pypi.python.org/project/msgraph-sdk" @@ -40,5 +40,5 @@ package() { } sha512sums=" -af930e5e470f6ac78724650885f70cf447482a53f90043d326b3e00dc7572fd0d476658ebb1677118010e38b54f1e4e609dcfb5fcef5664f05b25062786d11af py3-msgraph-sdk-1.16.0.tar.gz +e7d93a4b0f29023dcce0529b54a397b568f44ff40b1efe52e1c060b4552dd055e6a62e0ebcb72cbf3c1babe00440c41e6f897e86a01c3e261a8b88b23cd3af2c py3-msgraph-sdk-1.8.0.tar.gz " diff --git a/ilot/py3-opentelemetry-sdk/APKBUILD b/ilot/py3-opentelemetry-sdk/APKBUILD index 08bc2ad..14a474c 100644 --- a/ilot/py3-opentelemetry-sdk/APKBUILD +++ b/ilot/py3-opentelemetry-sdk/APKBUILD @@ -3,7 +3,7 @@ pkgname=py3-opentelemetry-sdk #_pkgreal is used by apkbuild-pypi to find modules at PyPI _pkgreal=opentelemetry-sdk -pkgver=1.29.0 +pkgver=1.27.0 pkgrel=0 pkgdesc="OpenTelemetry Python SDK" url="https://github.com/open-telemetry/opentelemetry-python/tree/main" @@ -71,5 +71,5 @@ proto() { } sha512sums=" -92c90e6a684d8cfab3bba4d72612ccf53ae54cdd9784e3434b25adc3730fe114f21fd7aa21da80edf6e0e7c80b39c64ee31fb16f68b04809289bbf5d49d4ca2e py3-opentelemetry-sdk-1.29.0.tar.gz +d8b5a617c7e804b4e6e1b508395e87481a3dcc3b375573110750830a1cf6037cfeb5c09dba3e7cfa472e385dbf619afedd79b1c31c5bfe4e87d44ea65f4d2f0b py3-opentelemetry-sdk-1.27.0.tar.gz " diff --git a/ilot/py3-pyrad/APKBUILD b/ilot/py3-pyrad/APKBUILD new file mode 100644 index 0000000..21a2d72 --- /dev/null +++ b/ilot/py3-pyrad/APKBUILD @@ -0,0 +1,39 @@ +# Contributor: Antoine Martin (ayakael) +# Maintainer: Antoine Martin (ayakael) +pkgname=py3-pyrad +#_pkgreal is used by apkbuild-pypi to find modules at PyPI +_pkgreal=pyrad +pkgver=2.4 +pkgrel=0 +pkgdesc="Python RADIUS Implementation" +url="https://pypi.python.org/project/pyrad" +arch="noarch" +license="BSD-3-Clause" +depends="py3-netaddr" +checkdepends="py3-pytest" +makedepends="py3-setuptools py3-gpep517 py3-wheel poetry" +source="$pkgname-$pkgver.tar.gz::https://github.com/pyradius/pyrad/archive/refs/tags/$pkgver.tar.gz" +options="!check" # TODO +builddir="$srcdir/$_pkgreal-$pkgver" +subpackages="$pkgname-pyc" + +build() { + gpep517 build-wheel \ + --wheel-dir .dist \ + --output-fd 3 3>&1 >&2 +} + +check() { + python3 -m venv --clear --without-pip --system-site-packages .testenv + .testenv/bin/python3 -m installer .dist/*.whl + .testenv/bin/python3 -m pytest -v +} + +package() { + python3 -m installer -d "$pkgdir" \ + .dist/*.whl +} + +sha512sums=" +e4f4c687596bd226cf2cdb409a8d940c7b665fb7f722d09113dd9a1b05ab176ce8f920b235918ec01695f262930d13b4057b199cf6aac72afa54950c1fb59166 py3-pyrad-2.4.tar.gz +" diff --git a/ilot/py3-std-uritemplate/APKBUILD b/ilot/py3-std-uritemplate/APKBUILD index caca02f..9294abc 100644 --- a/ilot/py3-std-uritemplate/APKBUILD +++ b/ilot/py3-std-uritemplate/APKBUILD @@ -3,7 +3,7 @@ pkgname=py3-std-uritemplate #_pkgreal is used by apkbuild-pypi to find modules at PyPI _pkgreal=std-uritemplate -pkgver=2.0.1 +pkgver=1.0.6 pkgrel=0 pkgdesc="A complete and maintained cross-language implementation of the Uri Template specification RFC 6570 Level 4" url="https://pypi.python.org/project/std-uritemplate" @@ -37,5 +37,5 @@ package() { } sha512sums=" -e073a1204d65bb639cc93480b0f68e1edfe5ac3cff607b72c8da8916b7660eea2b2b246b5db02979cd5c856087958c84dc3bc5e9d76a9540f2ac2a7da8cd18df py3-std-uritemplate-2.0.1.tar.gz +4873ce356170aea1b45479d5ded0b596265782c097d3fd9d1bb4cc8ad902067bab654057173a2e2b1da37e5ac36ebee024feca43b0e4298b103dc979f97e7c1c py3-std-uritemplate-1.0.6.tar.gz " diff --git a/ilot/py3-tenant-schemas-celery/APKBUILD b/ilot/py3-tenant-schemas-celery/APKBUILD new file mode 100644 index 0000000..696c391 --- /dev/null +++ b/ilot/py3-tenant-schemas-celery/APKBUILD @@ -0,0 +1,41 @@ +# Contributor: Antoine Martin (ayakael) +# Maintainer: Antoine Martin (ayakael) +pkgname=py3-tenant-schemas-celery +#_pkgreal is used by apkbuild-pypi to find modules at PyPI +_pkgreal=tenant-schemas-celery +pkgver=2.2.0 +pkgrel=2 +pkgdesc="Celery integration for django-tenant-schemas and django-tenants" +url="https://pypi.python.org/project/tenant-schemas-celery" +arch="noarch" +license="MIT" +depends="py3-django-tenants py3-celery" +checkdepends="python3-dev py3-pytest" +makedepends="py3-setuptools py3-gpep517 py3-wheel" +source=" + $pkgname-$pkgver.tar.gz::https://codeload.github.com/maciej-gol/tenant-schemas-celery/tar.gz/refs/tags/$pkgver + " +options="!check" # Test suite wants docker +builddir="$srcdir/$_pkgreal-$pkgver" +subpackages="$pkgname-pyc" + +build() { + gpep517 build-wheel \ + --wheel-dir .dist \ + --output-fd 3 3>&1 >&2 +} + +check() { + python3 -m venv --clear --without-pip --system-site-packages .testenv + .testenv/bin/python3 -m installer .dist/*.whl + DJANGO_SETTINGS_MODULE=tests.settings .testenv/bin/python3 -m pytest -v +} + +package() { + python3 -m installer -d "$pkgdir" \ + .dist/*.whl +} + +sha512sums=" +dad71011306936dc84d966797b113008780750e9e973513092bec892be0d1468e0a0e7e8e2fcca9765309a27767e1c72bdaad7c8aca16353ae1eef783c239148 py3-tenant-schemas-celery-2.2.0.tar.gz +" diff --git a/ilot/uptime-kuma/APKBUILD b/ilot/uptime-kuma/APKBUILD index 6bc88c8..f8c48e2 100644 --- a/ilot/uptime-kuma/APKBUILD +++ b/ilot/uptime-kuma/APKBUILD @@ -1,7 +1,7 @@ # Contributor: Antoine Martin (ayakael) # Maintainer: Antoine Martin (ayakael) pkgname=uptime-kuma -pkgver=1.23.16 +pkgver=1.23.15 pkgrel=0 pkgdesc='A fancy self-hosted monitoring tool' arch="all" @@ -43,7 +43,7 @@ package() { mv "$pkgdir"/usr/share/webapps/uptime-kuma/LICENSE "$pkgdir"/usr/share/licenses/uptime-kuma/. } sha512sums=" -a132d1cd796fbd868782627edfd45d2a6bd3d2fadece23e0bbf000e6a30482659062a43c4590c98e390cac9b8c1926efd8ff01c5b358b7ccea4438259b86f24e uptime-kuma-1.23.16.tar.gz +eb2210ac27a375e4e7116282436cc98e8c24c9f290e20af69dcb8069bdeca79457e9eb3971982c552b856dc22643a8c9f551723aade18d28cbf93881e3e1b182 uptime-kuma-1.23.15.tar.gz 0ceddb98a6f318029b8bd8b5a49b55c883e77a5f8fffe2b9b271c9abf0ac52dc7a6ea4dbb4a881124a7857f1e43040f18755c1c2a034479e6a94d2b65a73d847 uptime-kuma.openrc 1dbae536b23e3624e139155abbff383bba3209ff2219983da2616b4376b1a5041df812d1e5164716fc6e967a8446d94baae3b96ee575d400813cc6fdc2cc274e uptime-kuma.conf " diff --git a/ilot/uvicorn/2540_add-websocketssansioprotocol.patch b/ilot/uvicorn/2540_add-websocketssansioprotocol.patch deleted file mode 100644 index 0cb8db4..0000000 --- a/ilot/uvicorn/2540_add-websocketssansioprotocol.patch +++ /dev/null @@ -1,618 +0,0 @@ -diff --git a/docs/deployment.md b/docs/deployment.md -index d69fcf8..99dfbf3 100644 ---- a/docs/deployment.md -+++ b/docs/deployment.md -@@ -60,7 +60,7 @@ Options: - --loop [auto|asyncio|uvloop] Event loop implementation. [default: auto] - --http [auto|h11|httptools] HTTP protocol implementation. [default: - auto] -- --ws [auto|none|websockets|wsproto] -+ --ws [auto|none|websockets|websockets-sansio|wsproto] - WebSocket protocol implementation. - [default: auto] - --ws-max-size INTEGER WebSocket max size message in bytes -diff --git a/docs/index.md b/docs/index.md -index bb6fc32..50e2ab9 100644 ---- a/docs/index.md -+++ b/docs/index.md -@@ -130,7 +130,7 @@ Options: - --loop [auto|asyncio|uvloop] Event loop implementation. [default: auto] - --http [auto|h11|httptools] HTTP protocol implementation. [default: - auto] -- --ws [auto|none|websockets|wsproto] -+ --ws [auto|none|websockets|websockets-sansio|wsproto] - WebSocket protocol implementation. - [default: auto] - --ws-max-size INTEGER WebSocket max size message in bytes -diff --git a/pyproject.toml b/pyproject.toml -index 0a89966..8771bfb 100644 ---- a/pyproject.toml -+++ b/pyproject.toml -@@ -92,6 +92,10 @@ filterwarnings = [ - "ignore:Uvicorn's native WSGI implementation is deprecated.*:DeprecationWarning", - "ignore: 'cgi' is deprecated and slated for removal in Python 3.13:DeprecationWarning", - "ignore: remove second argument of ws_handler:DeprecationWarning:websockets", -+ "ignore: websockets.legacy is deprecated.*:DeprecationWarning", -+ "ignore: websockets.server.WebSocketServerProtocol is deprecated.*:DeprecationWarning", -+ "ignore: websockets.client.connect is deprecated.*:DeprecationWarning", -+ "ignore: websockets.exceptions.InvalidStatusCode is deprecated", - ] - - [tool.coverage.run] -diff --git a/tests/conftest.py b/tests/conftest.py -index 1b0c0e8..7061a14 100644 ---- a/tests/conftest.py -+++ b/tests/conftest.py -@@ -233,9 +233,9 @@ def unused_tcp_port() -> int: - marks=pytest.mark.skipif(not importlib.util.find_spec("wsproto"), reason="wsproto not installed."), - id="wsproto", - ), -+ pytest.param("uvicorn.protocols.websockets.websockets_impl:WebSocketProtocol", id="websockets"), - pytest.param( -- "uvicorn.protocols.websockets.websockets_impl:WebSocketProtocol", -- id="websockets", -+ "uvicorn.protocols.websockets.websockets_sansio_impl:WebSocketsSansIOProtocol", id="websockets-sansio" - ), - ] - ) -diff --git a/tests/middleware/test_logging.py b/tests/middleware/test_logging.py -index f27633a..63d7daf 100644 ---- a/tests/middleware/test_logging.py -+++ b/tests/middleware/test_logging.py -@@ -49,7 +49,9 @@ async def app(scope: Scope, receive: ASGIReceiveCallable, send: ASGISendCallable - await send({"type": "http.response.body", "body": b"", "more_body": False}) - - --async def test_trace_logging(caplog: pytest.LogCaptureFixture, logging_config, unused_tcp_port: int): -+async def test_trace_logging( -+ caplog: pytest.LogCaptureFixture, logging_config: dict[str, typing.Any], unused_tcp_port: int -+): - config = Config( - app=app, - log_level="trace", -@@ -91,8 +93,8 @@ async def test_trace_logging_on_http_protocol(http_protocol_cls, caplog, logging - - async def test_trace_logging_on_ws_protocol( - ws_protocol_cls: WSProtocol, -- caplog, -- logging_config, -+ caplog: pytest.LogCaptureFixture, -+ logging_config: dict[str, typing.Any], - unused_tcp_port: int, - ): - async def websocket_app(scope: Scope, receive: ASGIReceiveCallable, send: ASGISendCallable): -@@ -104,7 +106,7 @@ async def test_trace_logging_on_ws_protocol( - elif message["type"] == "websocket.disconnect": - break - -- async def open_connection(url): -+ async def open_connection(url: str): - async with websockets.client.connect(url) as websocket: - return websocket.open - -diff --git a/tests/middleware/test_proxy_headers.py b/tests/middleware/test_proxy_headers.py -index 0ade974..d300c45 100644 ---- a/tests/middleware/test_proxy_headers.py -+++ b/tests/middleware/test_proxy_headers.py -@@ -465,6 +465,7 @@ async def test_proxy_headers_websocket_x_forwarded_proto( - host, port = scope["client"] - await send({"type": "websocket.accept"}) - await send({"type": "websocket.send", "text": f"{scheme}://{host}:{port}"}) -+ await send({"type": "websocket.close"}) - - app_with_middleware = ProxyHeadersMiddleware(websocket_app, trusted_hosts="*") - config = Config( -diff --git a/tests/protocols/test_websocket.py b/tests/protocols/test_websocket.py -index 15ccfdd..e728544 100644 ---- a/tests/protocols/test_websocket.py -+++ b/tests/protocols/test_websocket.py -@@ -7,6 +7,8 @@ from copy import deepcopy - import httpx - import pytest - import websockets -+import websockets.asyncio -+import websockets.asyncio.client - import websockets.client - import websockets.exceptions - from typing_extensions import TypedDict -@@ -601,12 +603,9 @@ async def test_connection_lost_before_handshake_complete( - await send_accept_task.wait() - disconnect_message = await receive() # type: ignore - -- response: httpx.Response | None = None -- - async def websocket_session(uri: str): -- nonlocal response - async with httpx.AsyncClient() as client: -- response = await client.get( -+ await client.get( - f"http://127.0.0.1:{unused_tcp_port}", - headers={ - "upgrade": "websocket", -@@ -623,9 +622,6 @@ async def test_connection_lost_before_handshake_complete( - send_accept_task.set() - await asyncio.sleep(0.1) - -- assert response is not None -- assert response.status_code == 500, response.text -- assert response.text == "Internal Server Error" - assert disconnect_message == {"type": "websocket.disconnect", "code": 1006} - await task - -@@ -920,6 +916,9 @@ async def test_server_reject_connection_with_body_nolength( - async def test_server_reject_connection_with_invalid_msg( - ws_protocol_cls: WSProtocol, http_protocol_cls: HTTPProtocol, unused_tcp_port: int - ): -+ if ws_protocol_cls.__name__ == "WebSocketsSansIOProtocol": -+ pytest.skip("WebSocketsSansIOProtocol sends both start and body messages in one message.") -+ - async def app(scope: Scope, receive: ASGIReceiveCallable, send: ASGISendCallable): - assert scope["type"] == "websocket" - assert "extensions" in scope and "websocket.http.response" in scope["extensions"] -@@ -951,6 +950,9 @@ async def test_server_reject_connection_with_invalid_msg( - async def test_server_reject_connection_with_missing_body( - ws_protocol_cls: WSProtocol, http_protocol_cls: HTTPProtocol, unused_tcp_port: int - ): -+ if ws_protocol_cls.__name__ == "WebSocketsSansIOProtocol": -+ pytest.skip("WebSocketsSansIOProtocol sends both start and body messages in one message.") -+ - async def app(scope: Scope, receive: ASGIReceiveCallable, send: ASGISendCallable): - assert scope["type"] == "websocket" - assert "extensions" in scope and "websocket.http.response" in scope["extensions"] -@@ -986,6 +988,8 @@ async def test_server_multiple_websocket_http_response_start_events( - The server should raise an exception if it sends multiple - websocket.http.response.start events. - """ -+ if ws_protocol_cls.__name__ == "WebSocketsSansIOProtocol": -+ pytest.skip("WebSocketsSansIOProtocol sends both start and body messages in one message.") - exception_message: str | None = None - - async def app(scope: Scope, receive: ASGIReceiveCallable, send: ASGISendCallable): -diff --git a/uvicorn/config.py b/uvicorn/config.py -index 664d191..cbfeea6 100644 ---- a/uvicorn/config.py -+++ b/uvicorn/config.py -@@ -25,7 +25,7 @@ from uvicorn.middleware.proxy_headers import ProxyHeadersMiddleware - from uvicorn.middleware.wsgi import WSGIMiddleware - - HTTPProtocolType = Literal["auto", "h11", "httptools"] --WSProtocolType = Literal["auto", "none", "websockets", "wsproto"] -+WSProtocolType = Literal["auto", "none", "websockets", "websockets-sansio", "wsproto"] - LifespanType = Literal["auto", "on", "off"] - LoopSetupType = Literal["none", "auto", "asyncio", "uvloop"] - InterfaceType = Literal["auto", "asgi3", "asgi2", "wsgi"] -@@ -47,6 +47,7 @@ WS_PROTOCOLS: dict[WSProtocolType, str | None] = { - "auto": "uvicorn.protocols.websockets.auto:AutoWebSocketsProtocol", - "none": None, - "websockets": "uvicorn.protocols.websockets.websockets_impl:WebSocketProtocol", -+ "websockets-sansio": "uvicorn.protocols.websockets.websockets_sansio_impl:WebSocketsSansIOProtocol", - "wsproto": "uvicorn.protocols.websockets.wsproto_impl:WSProtocol", - } - LIFESPAN: dict[LifespanType, str] = { -diff --git a/uvicorn/protocols/websockets/websockets_sansio_impl.py b/uvicorn/protocols/websockets/websockets_sansio_impl.py -new file mode 100644 -index 0000000..994af07 ---- /dev/null -+++ b/uvicorn/protocols/websockets/websockets_sansio_impl.py -@@ -0,0 +1,405 @@ -+from __future__ import annotations -+ -+import asyncio -+import logging -+from asyncio.transports import BaseTransport, Transport -+from http import HTTPStatus -+from typing import Any, Literal, cast -+from urllib.parse import unquote -+ -+from websockets import InvalidState -+from websockets.extensions.permessage_deflate import ServerPerMessageDeflateFactory -+from websockets.frames import Frame, Opcode -+from websockets.http11 import Request -+from websockets.server import ServerProtocol -+ -+from uvicorn._types import ( -+ ASGIReceiveEvent, -+ ASGISendEvent, -+ WebSocketAcceptEvent, -+ WebSocketCloseEvent, -+ WebSocketDisconnectEvent, -+ WebSocketReceiveEvent, -+ WebSocketResponseBodyEvent, -+ WebSocketResponseStartEvent, -+ WebSocketScope, -+ WebSocketSendEvent, -+) -+from uvicorn.config import Config -+from uvicorn.logging import TRACE_LOG_LEVEL -+from uvicorn.protocols.utils import ( -+ ClientDisconnected, -+ get_local_addr, -+ get_path_with_query_string, -+ get_remote_addr, -+ is_ssl, -+) -+from uvicorn.server import ServerState -+ -+ -+class WebSocketsSansIOProtocol(asyncio.Protocol): -+ def __init__( -+ self, -+ config: Config, -+ server_state: ServerState, -+ app_state: dict[str, Any], -+ _loop: asyncio.AbstractEventLoop | None = None, -+ ) -> None: -+ if not config.loaded: -+ config.load() # pragma: no cover -+ -+ self.config = config -+ self.app = config.loaded_app -+ self.loop = _loop or asyncio.get_event_loop() -+ self.logger = logging.getLogger("uvicorn.error") -+ self.root_path = config.root_path -+ self.app_state = app_state -+ -+ # Shared server state -+ self.connections = server_state.connections -+ self.tasks = server_state.tasks -+ self.default_headers = server_state.default_headers -+ -+ # Connection state -+ self.transport: asyncio.Transport = None # type: ignore[assignment] -+ self.server: tuple[str, int] | None = None -+ self.client: tuple[str, int] | None = None -+ self.scheme: Literal["wss", "ws"] = None # type: ignore[assignment] -+ -+ # WebSocket state -+ self.queue: asyncio.Queue[ASGIReceiveEvent] = asyncio.Queue() -+ self.handshake_initiated = False -+ self.handshake_complete = False -+ self.close_sent = False -+ self.initial_response: tuple[int, list[tuple[str, str]], bytes] | None = None -+ -+ extensions = [] -+ if self.config.ws_per_message_deflate: -+ extensions = [ServerPerMessageDeflateFactory()] -+ self.conn = ServerProtocol( -+ extensions=extensions, -+ max_size=self.config.ws_max_size, -+ logger=logging.getLogger("uvicorn.error"), -+ ) -+ -+ self.read_paused = False -+ self.writable = asyncio.Event() -+ self.writable.set() -+ -+ # Buffers -+ self.bytes = b"" -+ -+ def connection_made(self, transport: BaseTransport) -> None: -+ """Called when a connection is made.""" -+ transport = cast(Transport, transport) -+ self.connections.add(self) -+ self.transport = transport -+ self.server = get_local_addr(transport) -+ self.client = get_remote_addr(transport) -+ self.scheme = "wss" if is_ssl(transport) else "ws" -+ -+ if self.logger.level <= TRACE_LOG_LEVEL: -+ prefix = "%s:%d - " % self.client if self.client else "" -+ self.logger.log(TRACE_LOG_LEVEL, "%sWebSocket connection made", prefix) -+ -+ def connection_lost(self, exc: Exception | None) -> None: -+ code = 1005 if self.handshake_complete else 1006 -+ self.queue.put_nowait({"type": "websocket.disconnect", "code": code}) -+ self.connections.remove(self) -+ -+ if self.logger.level <= TRACE_LOG_LEVEL: -+ prefix = "%s:%d - " % self.client if self.client else "" -+ self.logger.log(TRACE_LOG_LEVEL, "%sWebSocket connection lost", prefix) -+ -+ self.handshake_complete = True -+ if exc is None: -+ self.transport.close() -+ -+ def eof_received(self) -> None: -+ pass -+ -+ def shutdown(self) -> None: -+ if self.handshake_complete: -+ self.queue.put_nowait({"type": "websocket.disconnect", "code": 1012}) -+ self.conn.send_close(1012) -+ output = self.conn.data_to_send() -+ self.transport.write(b"".join(output)) -+ else: -+ self.send_500_response() -+ self.transport.close() -+ -+ def data_received(self, data: bytes) -> None: -+ self.conn.receive_data(data) -+ parser_exc = self.conn.parser_exc -+ if parser_exc is not None: -+ self.handle_parser_exception() -+ return -+ self.handle_events() -+ -+ def handle_events(self) -> None: -+ for event in self.conn.events_received(): -+ if isinstance(event, Request): -+ self.handle_connect(event) -+ if isinstance(event, Frame): -+ if event.opcode == Opcode.CONT: -+ self.handle_cont(event) -+ elif event.opcode == Opcode.TEXT: -+ self.handle_text(event) -+ elif event.opcode == Opcode.BINARY: -+ self.handle_bytes(event) -+ elif event.opcode == Opcode.PING: -+ self.handle_ping(event) -+ elif event.opcode == Opcode.CLOSE: -+ self.handle_close(event) -+ -+ # Event handlers -+ -+ def handle_connect(self, event: Request) -> None: -+ self.request = event -+ self.response = self.conn.accept(event) -+ self.handshake_initiated = True -+ if self.response.status_code != 101: -+ self.handshake_complete = True -+ self.close_sent = True -+ self.conn.send_response(self.response) -+ output = self.conn.data_to_send() -+ self.transport.write(b"".join(output)) -+ self.transport.close() -+ return -+ -+ headers = [ -+ (key.encode("ascii"), value.encode("ascii", errors="surrogateescape")) -+ for key, value in event.headers.raw_items() -+ ] -+ raw_path, _, query_string = event.path.partition("?") -+ self.scope: WebSocketScope = { -+ "type": "websocket", -+ "asgi": {"version": self.config.asgi_version, "spec_version": "2.3"}, -+ "http_version": "1.1", -+ "scheme": self.scheme, -+ "server": self.server, -+ "client": self.client, -+ "root_path": self.root_path, -+ "path": unquote(raw_path), -+ "raw_path": raw_path.encode("ascii"), -+ "query_string": query_string.encode("ascii"), -+ "headers": headers, -+ "subprotocols": event.headers.get_all("Sec-WebSocket-Protocol"), -+ "state": self.app_state.copy(), -+ "extensions": {"websocket.http.response": {}}, -+ } -+ self.queue.put_nowait({"type": "websocket.connect"}) -+ task = self.loop.create_task(self.run_asgi()) -+ task.add_done_callback(self.on_task_complete) -+ self.tasks.add(task) -+ -+ def handle_cont(self, event: Frame) -> None: -+ self.bytes += event.data -+ if event.fin: -+ self.send_receive_event_to_app() -+ -+ def handle_text(self, event: Frame) -> None: -+ self.bytes = event.data -+ self.curr_msg_data_type: Literal["text", "bytes"] = "text" -+ if event.fin: -+ self.send_receive_event_to_app() -+ -+ def handle_bytes(self, event: Frame) -> None: -+ self.bytes = event.data -+ self.curr_msg_data_type = "bytes" -+ if event.fin: -+ self.send_receive_event_to_app() -+ -+ def send_receive_event_to_app(self) -> None: -+ data_type = self.curr_msg_data_type -+ msg: WebSocketReceiveEvent -+ if data_type == "text": -+ msg = {"type": "websocket.receive", data_type: self.bytes.decode()} -+ else: -+ msg = {"type": "websocket.receive", data_type: self.bytes} -+ self.queue.put_nowait(msg) -+ if not self.read_paused: -+ self.read_paused = True -+ self.transport.pause_reading() -+ -+ def handle_ping(self, event: Frame) -> None: -+ output = self.conn.data_to_send() -+ self.transport.write(b"".join(output)) -+ -+ def handle_close(self, event: Frame) -> None: -+ if not self.close_sent and not self.transport.is_closing(): -+ disconnect_event: WebSocketDisconnectEvent = { -+ "type": "websocket.disconnect", -+ "code": self.conn.close_rcvd.code, # type: ignore[union-attr] -+ "reason": self.conn.close_rcvd.reason, # type: ignore[union-attr] -+ } -+ self.queue.put_nowait(disconnect_event) -+ output = self.conn.data_to_send() -+ self.transport.write(b"".join(output)) -+ self.transport.close() -+ -+ def handle_parser_exception(self) -> None: -+ disconnect_event: WebSocketDisconnectEvent = { -+ "type": "websocket.disconnect", -+ "code": self.conn.close_sent.code, # type: ignore[union-attr] -+ "reason": self.conn.close_sent.reason, # type: ignore[union-attr] -+ } -+ self.queue.put_nowait(disconnect_event) -+ output = self.conn.data_to_send() -+ self.transport.write(b"".join(output)) -+ self.close_sent = True -+ self.transport.close() -+ -+ def on_task_complete(self, task: asyncio.Task[None]) -> None: -+ self.tasks.discard(task) -+ -+ async def run_asgi(self) -> None: -+ try: -+ result = await self.app(self.scope, self.receive, self.send) -+ except ClientDisconnected: -+ self.transport.close() -+ except BaseException: -+ self.logger.exception("Exception in ASGI application\n") -+ self.send_500_response() -+ self.transport.close() -+ else: -+ if not self.handshake_complete: -+ msg = "ASGI callable returned without completing handshake." -+ self.logger.error(msg) -+ self.send_500_response() -+ self.transport.close() -+ elif result is not None: -+ msg = "ASGI callable should return None, but returned '%s'." -+ self.logger.error(msg, result) -+ self.transport.close() -+ -+ def send_500_response(self) -> None: -+ if self.initial_response or self.handshake_complete: -+ return -+ response = self.conn.reject(500, "Internal Server Error") -+ self.conn.send_response(response) -+ output = self.conn.data_to_send() -+ self.transport.write(b"".join(output)) -+ -+ async def send(self, message: ASGISendEvent) -> None: -+ await self.writable.wait() -+ -+ message_type = message["type"] -+ -+ if not self.handshake_complete and self.initial_response is None: -+ if message_type == "websocket.accept": -+ message = cast(WebSocketAcceptEvent, message) -+ self.logger.info( -+ '%s - "WebSocket %s" [accepted]', -+ self.scope["client"], -+ get_path_with_query_string(self.scope), -+ ) -+ headers = [ -+ (name.decode("latin-1").lower(), value.decode("latin-1").lower()) -+ for name, value in (self.default_headers + list(message.get("headers", []))) -+ ] -+ accepted_subprotocol = message.get("subprotocol") -+ if accepted_subprotocol: -+ headers.append(("Sec-WebSocket-Protocol", accepted_subprotocol)) -+ self.response.headers.update(headers) -+ -+ if not self.transport.is_closing(): -+ self.handshake_complete = True -+ self.conn.send_response(self.response) -+ output = self.conn.data_to_send() -+ self.transport.write(b"".join(output)) -+ -+ elif message_type == "websocket.close": -+ message = cast(WebSocketCloseEvent, message) -+ self.queue.put_nowait({"type": "websocket.disconnect", "code": 1006}) -+ self.logger.info( -+ '%s - "WebSocket %s" 403', -+ self.scope["client"], -+ get_path_with_query_string(self.scope), -+ ) -+ response = self.conn.reject(HTTPStatus.FORBIDDEN, "") -+ self.conn.send_response(response) -+ output = self.conn.data_to_send() -+ self.close_sent = True -+ self.handshake_complete = True -+ self.transport.write(b"".join(output)) -+ self.transport.close() -+ elif message_type == "websocket.http.response.start" and self.initial_response is None: -+ message = cast(WebSocketResponseStartEvent, message) -+ if not (100 <= message["status"] < 600): -+ raise RuntimeError("Invalid HTTP status code '%d' in response." % message["status"]) -+ self.logger.info( -+ '%s - "WebSocket %s" %d', -+ self.scope["client"], -+ get_path_with_query_string(self.scope), -+ message["status"], -+ ) -+ headers = [ -+ (name.decode("latin-1"), value.decode("latin-1")) -+ for name, value in list(message.get("headers", [])) -+ ] -+ self.initial_response = (message["status"], headers, b"") -+ else: -+ msg = ( -+ "Expected ASGI message 'websocket.accept', 'websocket.close' " -+ "or 'websocket.http.response.start' " -+ "but got '%s'." -+ ) -+ raise RuntimeError(msg % message_type) -+ -+ elif not self.close_sent and self.initial_response is None: -+ try: -+ if message_type == "websocket.send": -+ message = cast(WebSocketSendEvent, message) -+ bytes_data = message.get("bytes") -+ text_data = message.get("text") -+ if text_data: -+ self.conn.send_text(text_data.encode()) -+ elif bytes_data: -+ self.conn.send_binary(bytes_data) -+ output = self.conn.data_to_send() -+ self.transport.write(b"".join(output)) -+ -+ elif message_type == "websocket.close" and not self.transport.is_closing(): -+ message = cast(WebSocketCloseEvent, message) -+ code = message.get("code", 1000) -+ reason = message.get("reason", "") or "" -+ self.queue.put_nowait({"type": "websocket.disconnect", "code": code}) -+ self.conn.send_close(code, reason) -+ output = self.conn.data_to_send() -+ self.transport.write(b"".join(output)) -+ self.close_sent = True -+ self.transport.close() -+ else: -+ msg = "Expected ASGI message 'websocket.send' or 'websocket.close'," " but got '%s'." -+ raise RuntimeError(msg % message_type) -+ except InvalidState: -+ raise ClientDisconnected() -+ elif self.initial_response is not None: -+ if message_type == "websocket.http.response.body": -+ message = cast(WebSocketResponseBodyEvent, message) -+ body = self.initial_response[2] + message["body"] -+ self.initial_response = self.initial_response[:2] + (body,) -+ if not message.get("more_body", False): -+ response = self.conn.reject(self.initial_response[0], body.decode()) -+ response.headers.update(self.initial_response[1]) -+ self.queue.put_nowait({"type": "websocket.disconnect", "code": 1006}) -+ self.conn.send_response(response) -+ output = self.conn.data_to_send() -+ self.close_sent = True -+ self.transport.write(b"".join(output)) -+ self.transport.close() -+ else: -+ msg = "Expected ASGI message 'websocket.http.response.body' " "but got '%s'." -+ raise RuntimeError(msg % message_type) -+ -+ else: -+ msg = "Unexpected ASGI message '%s', after sending 'websocket.close'." -+ raise RuntimeError(msg % message_type) -+ -+ async def receive(self) -> ASGIReceiveEvent: -+ message = await self.queue.get() -+ if self.read_paused and self.queue.empty(): -+ self.read_paused = False -+ self.transport.resume_reading() -+ return message -diff --git a/uvicorn/server.py b/uvicorn/server.py -index cca2e85..50c5ed2 100644 ---- a/uvicorn/server.py -+++ b/uvicorn/server.py -@@ -23,9 +23,10 @@ if TYPE_CHECKING: - from uvicorn.protocols.http.h11_impl import H11Protocol - from uvicorn.protocols.http.httptools_impl import HttpToolsProtocol - from uvicorn.protocols.websockets.websockets_impl import WebSocketProtocol -+ from uvicorn.protocols.websockets.websockets_sansio_impl import WebSocketsSansIOProtocol - from uvicorn.protocols.websockets.wsproto_impl import WSProtocol - -- Protocols = Union[H11Protocol, HttpToolsProtocol, WSProtocol, WebSocketProtocol] -+ Protocols = Union[H11Protocol, HttpToolsProtocol, WSProtocol, WebSocketProtocol, WebSocketsSansIOProtocol] - - HANDLED_SIGNALS = ( - signal.SIGINT, # Unix signal 2. Sent by Ctrl+C. diff --git a/ilot/uvicorn/2541_bump-wesockets-on-requirements.patch b/ilot/uvicorn/2541_bump-wesockets-on-requirements.patch deleted file mode 100644 index c1179f3..0000000 --- a/ilot/uvicorn/2541_bump-wesockets-on-requirements.patch +++ /dev/null @@ -1,567 +0,0 @@ -diff --git a/requirements.txt b/requirements.txt -index e26e6b3..b16569f 100644 ---- a/requirements.txt -+++ b/requirements.txt -@@ -7,7 +7,7 @@ h11 @ git+https://github.com/python-hyper/h11.git@master - # Explicit optionals - a2wsgi==1.10.7 - wsproto==1.2.0 --websockets==13.1 -+websockets==14.1 - - # Packaging - build==1.2.2.post1 -diff --git a/tests/middleware/test_logging.py b/tests/middleware/test_logging.py -index 63d7daf..5aef174 100644 ---- a/tests/middleware/test_logging.py -+++ b/tests/middleware/test_logging.py -@@ -8,8 +8,7 @@ import typing - - import httpx - import pytest --import websockets --import websockets.client -+from websockets.asyncio.client import connect - - from tests.utils import run_server - from uvicorn import Config -@@ -107,8 +106,8 @@ async def test_trace_logging_on_ws_protocol( - break - - async def open_connection(url: str): -- async with websockets.client.connect(url) as websocket: -- return websocket.open -+ async with connect(url): -+ return True - - config = Config( - app=websocket_app, -diff --git a/tests/middleware/test_proxy_headers.py b/tests/middleware/test_proxy_headers.py -index d300c45..4b5f195 100644 ---- a/tests/middleware/test_proxy_headers.py -+++ b/tests/middleware/test_proxy_headers.py -@@ -5,7 +5,7 @@ from typing import TYPE_CHECKING - import httpx - import httpx._transports.asgi - import pytest --import websockets.client -+from websockets.asyncio.client import connect - - from tests.response import Response - from tests.utils import run_server -@@ -479,7 +479,7 @@ async def test_proxy_headers_websocket_x_forwarded_proto( - async with run_server(config): - url = f"ws://127.0.0.1:{unused_tcp_port}" - headers = {X_FORWARDED_FOR: "1.2.3.4", X_FORWARDED_PROTO: forwarded_proto} -- async with websockets.client.connect(url, extra_headers=headers) as websocket: -+ async with connect(url, additional_headers=headers) as websocket: - data = await websocket.recv() - assert data == expected - -diff --git a/tests/protocols/test_websocket.py b/tests/protocols/test_websocket.py -index e728544..b9035ec 100644 ---- a/tests/protocols/test_websocket.py -+++ b/tests/protocols/test_websocket.py -@@ -12,6 +12,8 @@ import websockets.asyncio.client - import websockets.client - import websockets.exceptions - from typing_extensions import TypedDict -+from websockets.asyncio.client import ClientConnection, connect -+from websockets.exceptions import ConnectionClosed, ConnectionClosedError, InvalidHandshake, InvalidStatus - from websockets.extensions.permessage_deflate import ClientPerMessageDeflateFactory - from websockets.typing import Subprotocol - -@@ -130,8 +132,8 @@ async def test_accept_connection(ws_protocol_cls: WSProtocol, http_protocol_cls: - await self.send({"type": "websocket.accept"}) - - async def open_connection(url: str): -- async with websockets.client.connect(url) as websocket: -- return websocket.open -+ async with connect(url): -+ return True - - config = Config(app=App, ws=ws_protocol_cls, http=http_protocol_cls, lifespan="off", port=unused_tcp_port) - async with run_server(config): -@@ -146,7 +148,7 @@ async def test_shutdown(ws_protocol_cls: WSProtocol, http_protocol_cls: HTTPProt - - config = Config(app=App, ws=ws_protocol_cls, http=http_protocol_cls, lifespan="off", port=unused_tcp_port) - async with run_server(config) as server: -- async with websockets.client.connect(f"ws://127.0.0.1:{unused_tcp_port}"): -+ async with connect(f"ws://127.0.0.1:{unused_tcp_port}"): - # Attempt shutdown while connection is still open - await server.shutdown() - -@@ -160,8 +162,8 @@ async def test_supports_permessage_deflate_extension( - - async def open_connection(url: str): - extension_factories = [ClientPerMessageDeflateFactory()] -- async with websockets.client.connect(url, extensions=extension_factories) as websocket: -- return [extension.name for extension in websocket.extensions] -+ async with connect(url, extensions=extension_factories) as websocket: -+ return [extension.name for extension in websocket.protocol.extensions] - - config = Config(app=App, ws=ws_protocol_cls, http=http_protocol_cls, lifespan="off", port=unused_tcp_port) - async with run_server(config): -@@ -180,8 +182,8 @@ async def test_can_disable_permessage_deflate_extension( - # enable per-message deflate on the client, so that we can check the server - # won't support it when it's disabled. - extension_factories = [ClientPerMessageDeflateFactory()] -- async with websockets.client.connect(url, extensions=extension_factories) as websocket: -- return [extension.name for extension in websocket.extensions] -+ async with connect(url, extensions=extension_factories) as websocket: -+ return [extension.name for extension in websocket.protocol.extensions] - - config = Config( - app=App, -@@ -203,8 +205,8 @@ async def test_close_connection(ws_protocol_cls: WSProtocol, http_protocol_cls: - - async def open_connection(url: str): - try: -- await websockets.client.connect(url) -- except websockets.exceptions.InvalidHandshake: -+ await connect(url) -+ except InvalidHandshake: - return False - return True # pragma: no cover - -@@ -224,8 +226,8 @@ async def test_headers(ws_protocol_cls: WSProtocol, http_protocol_cls: HTTPProto - await self.send({"type": "websocket.accept"}) - - async def open_connection(url: str): -- async with websockets.client.connect(url, extra_headers=[("username", "abraão")]) as websocket: -- return websocket.open -+ async with connect(url, additional_headers=[("username", "abraão")]): -+ return True - - config = Config(app=App, ws=ws_protocol_cls, http=http_protocol_cls, lifespan="off", port=unused_tcp_port) - async with run_server(config): -@@ -239,8 +241,9 @@ async def test_extra_headers(ws_protocol_cls: WSProtocol, http_protocol_cls: HTT - await self.send({"type": "websocket.accept", "headers": [(b"extra", b"header")]}) - - async def open_connection(url: str): -- async with websockets.client.connect(url) as websocket: -- return websocket.response_headers -+ async with connect(url) as websocket: -+ assert websocket.response -+ return websocket.response.headers - - config = Config(app=App, ws=ws_protocol_cls, http=http_protocol_cls, lifespan="off", port=unused_tcp_port) - async with run_server(config): -@@ -258,8 +261,8 @@ async def test_path_and_raw_path(ws_protocol_cls: WSProtocol, http_protocol_cls: - await self.send({"type": "websocket.accept"}) - - async def open_connection(url: str): -- async with websockets.client.connect(url) as websocket: -- return websocket.open -+ async with connect(url): -+ return True - - config = Config(app=App, ws=ws_protocol_cls, http=http_protocol_cls, lifespan="off", port=unused_tcp_port) - async with run_server(config): -@@ -276,7 +279,7 @@ async def test_send_text_data_to_client( - await self.send({"type": "websocket.send", "text": "123"}) - - async def get_data(url: str): -- async with websockets.client.connect(url) as websocket: -+ async with connect(url) as websocket: - return await websocket.recv() - - config = Config(app=App, ws=ws_protocol_cls, http=http_protocol_cls, lifespan="off", port=unused_tcp_port) -@@ -294,7 +297,7 @@ async def test_send_binary_data_to_client( - await self.send({"type": "websocket.send", "bytes": b"123"}) - - async def get_data(url: str): -- async with websockets.client.connect(url) as websocket: -+ async with connect(url) as websocket: - return await websocket.recv() - - config = Config(app=App, ws=ws_protocol_cls, http=http_protocol_cls, lifespan="off", port=unused_tcp_port) -@@ -313,7 +316,7 @@ async def test_send_and_close_connection( - await self.send({"type": "websocket.close"}) - - async def get_data(url: str): -- async with websockets.client.connect(url) as websocket: -+ async with connect(url) as websocket: - data = await websocket.recv() - is_open = True - try: -@@ -342,7 +345,7 @@ async def test_send_text_data_to_server( - await self.send({"type": "websocket.send", "text": _text}) - - async def send_text(url: str): -- async with websockets.client.connect(url) as websocket: -+ async with connect(url) as websocket: - await websocket.send("abc") - return await websocket.recv() - -@@ -365,7 +368,7 @@ async def test_send_binary_data_to_server( - await self.send({"type": "websocket.send", "bytes": _bytes}) - - async def send_text(url: str): -- async with websockets.client.connect(url) as websocket: -+ async with connect(url) as websocket: - await websocket.send(b"abc") - return await websocket.recv() - -@@ -387,7 +390,7 @@ async def test_send_after_protocol_close( - await self.send({"type": "websocket.send", "text": "123"}) - - async def get_data(url: str): -- async with websockets.client.connect(url) as websocket: -+ async with connect(url) as websocket: - data = await websocket.recv() - is_open = True - try: -@@ -407,14 +410,14 @@ async def test_missing_handshake(ws_protocol_cls: WSProtocol, http_protocol_cls: - async def app(scope: Scope, receive: ASGIReceiveCallable, send: ASGISendCallable): - pass - -- async def connect(url: str): -- await websockets.client.connect(url) -+ async def open_connection(url: str): -+ await connect(url) - - config = Config(app=app, ws=ws_protocol_cls, http=http_protocol_cls, lifespan="off", port=unused_tcp_port) - async with run_server(config): -- with pytest.raises(websockets.exceptions.InvalidStatusCode) as exc_info: -- await connect(f"ws://127.0.0.1:{unused_tcp_port}") -- assert exc_info.value.status_code == 500 -+ with pytest.raises(InvalidStatus) as exc_info: -+ await open_connection(f"ws://127.0.0.1:{unused_tcp_port}") -+ assert exc_info.value.response.status_code == 500 - - - async def test_send_before_handshake( -@@ -423,14 +426,14 @@ async def test_send_before_handshake( - async def app(scope: Scope, receive: ASGIReceiveCallable, send: ASGISendCallable): - await send({"type": "websocket.send", "text": "123"}) - -- async def connect(url: str): -- await websockets.client.connect(url) -+ async def open_connection(url: str): -+ await connect(url) - - config = Config(app=app, ws=ws_protocol_cls, http=http_protocol_cls, lifespan="off", port=unused_tcp_port) - async with run_server(config): -- with pytest.raises(websockets.exceptions.InvalidStatusCode) as exc_info: -- await connect(f"ws://127.0.0.1:{unused_tcp_port}") -- assert exc_info.value.status_code == 500 -+ with pytest.raises(InvalidStatus) as exc_info: -+ await open_connection(f"ws://127.0.0.1:{unused_tcp_port}") -+ assert exc_info.value.response.status_code == 500 - - - async def test_duplicate_handshake(ws_protocol_cls: WSProtocol, http_protocol_cls: HTTPProtocol, unused_tcp_port: int): -@@ -440,10 +443,10 @@ async def test_duplicate_handshake(ws_protocol_cls: WSProtocol, http_protocol_cl - - config = Config(app=app, ws=ws_protocol_cls, http=http_protocol_cls, lifespan="off", port=unused_tcp_port) - async with run_server(config): -- async with websockets.client.connect(f"ws://127.0.0.1:{unused_tcp_port}") as websocket: -- with pytest.raises(websockets.exceptions.ConnectionClosed): -+ async with connect(f"ws://127.0.0.1:{unused_tcp_port}") as websocket: -+ with pytest.raises(ConnectionClosed): - _ = await websocket.recv() -- assert websocket.close_code == 1006 -+ assert websocket.protocol.close_code == 1006 - - - async def test_asgi_return_value(ws_protocol_cls: WSProtocol, http_protocol_cls: HTTPProtocol, unused_tcp_port: int): -@@ -458,10 +461,10 @@ async def test_asgi_return_value(ws_protocol_cls: WSProtocol, http_protocol_cls: - - config = Config(app=app, ws=ws_protocol_cls, http=http_protocol_cls, lifespan="off", port=unused_tcp_port) - async with run_server(config): -- async with websockets.client.connect(f"ws://127.0.0.1:{unused_tcp_port}") as websocket: -- with pytest.raises(websockets.exceptions.ConnectionClosed): -+ async with connect(f"ws://127.0.0.1:{unused_tcp_port}") as websocket: -+ with pytest.raises(ConnectionClosed): - _ = await websocket.recv() -- assert websocket.close_code == 1006 -+ assert websocket.protocol.close_code == 1006 - - - @pytest.mark.parametrize("code", [None, 1000, 1001]) -@@ -493,13 +496,13 @@ async def test_app_close( - - config = Config(app=app, ws=ws_protocol_cls, http=http_protocol_cls, lifespan="off", port=unused_tcp_port) - async with run_server(config): -- async with websockets.client.connect(f"ws://127.0.0.1:{unused_tcp_port}") as websocket: -+ async with connect(f"ws://127.0.0.1:{unused_tcp_port}") as websocket: - await websocket.ping() - await websocket.send("abc") -- with pytest.raises(websockets.exceptions.ConnectionClosed): -+ with pytest.raises(ConnectionClosed): - await websocket.recv() -- assert websocket.close_code == (code or 1000) -- assert websocket.close_reason == (reason or "") -+ assert websocket.protocol.close_code == (code or 1000) -+ assert websocket.protocol.close_reason == (reason or "") - - - async def test_client_close(ws_protocol_cls: WSProtocol, http_protocol_cls: HTTPProtocol, unused_tcp_port: int): -@@ -518,7 +521,7 @@ async def test_client_close(ws_protocol_cls: WSProtocol, http_protocol_cls: HTTP - break - - async def websocket_session(url: str): -- async with websockets.client.connect(url) as websocket: -+ async with connect(url) as websocket: - await websocket.ping() - await websocket.send("abc") - await websocket.close(code=1001, reason="custom reason") -@@ -555,7 +558,7 @@ async def test_client_connection_lost( - port=unused_tcp_port, - ) - async with run_server(config): -- async with websockets.client.connect(f"ws://127.0.0.1:{unused_tcp_port}") as websocket: -+ async with connect(f"ws://127.0.0.1:{unused_tcp_port}") as websocket: - websocket.transport.close() - await asyncio.sleep(0.1) - got_disconnect_event_before_shutdown = got_disconnect_event -@@ -583,7 +586,7 @@ async def test_client_connection_lost_on_send( - config = Config(app=app, ws=ws_protocol_cls, http=http_protocol_cls, lifespan="off", port=unused_tcp_port) - async with run_server(config): - url = f"ws://127.0.0.1:{unused_tcp_port}" -- async with websockets.client.connect(url): -+ async with connect(url): - await asyncio.sleep(0.1) - disconnect.set() - -@@ -642,11 +645,11 @@ async def test_send_close_on_server_shutdown( - disconnect_message = message - break - -- websocket: websockets.client.WebSocketClientProtocol | None = None -+ websocket: ClientConnection | None = None - - async def websocket_session(uri: str): - nonlocal websocket -- async with websockets.client.connect(uri) as ws_connection: -+ async with connect(uri) as ws_connection: - websocket = ws_connection - await server_shutdown_event.wait() - -@@ -676,9 +679,7 @@ async def test_subprotocols( - await self.send({"type": "websocket.accept", "subprotocol": subprotocol}) - - async def get_subprotocol(url: str): -- async with websockets.client.connect( -- url, subprotocols=[Subprotocol("proto1"), Subprotocol("proto2")] -- ) as websocket: -+ async with connect(url, subprotocols=[Subprotocol("proto1"), Subprotocol("proto2")]) as websocket: - return websocket.subprotocol - - config = Config(app=App, ws=ws_protocol_cls, http=http_protocol_cls, lifespan="off", port=unused_tcp_port) -@@ -688,7 +689,7 @@ async def test_subprotocols( - - - MAX_WS_BYTES = 1024 * 1024 * 16 --MAX_WS_BYTES_PLUS1 = MAX_WS_BYTES + 1 -+MAX_WS_BYTES_PLUS1 = MAX_WS_BYTES + 10 - - - @pytest.mark.parametrize( -@@ -731,15 +732,15 @@ async def test_send_binary_data_to_server_bigger_than_default_on_websockets( - port=unused_tcp_port, - ) - async with run_server(config): -- async with websockets.client.connect(f"ws://127.0.0.1:{unused_tcp_port}", max_size=client_size_sent) as ws: -+ async with connect(f"ws://127.0.0.1:{unused_tcp_port}", max_size=client_size_sent) as ws: - await ws.send(b"\x01" * client_size_sent) - if expected_result == 0: - data = await ws.recv() - assert data == b"\x01" * client_size_sent - else: -- with pytest.raises(websockets.exceptions.ConnectionClosedError): -+ with pytest.raises(ConnectionClosedError): - await ws.recv() -- assert ws.close_code == expected_result -+ assert ws.protocol.close_code == expected_result - - - async def test_server_reject_connection( -@@ -764,10 +765,10 @@ async def test_server_reject_connection( - disconnected_message = await receive() - - async def websocket_session(url: str): -- with pytest.raises(websockets.exceptions.InvalidStatusCode) as exc_info: -- async with websockets.client.connect(url): -+ with pytest.raises(InvalidStatus) as exc_info: -+ async with connect(url): - pass # pragma: no cover -- assert exc_info.value.status_code == 403 -+ assert exc_info.value.response.status_code == 403 - - config = Config(app=app, ws=ws_protocol_cls, http=http_protocol_cls, lifespan="off", port=unused_tcp_port) - async with run_server(config): -@@ -937,10 +938,10 @@ async def test_server_reject_connection_with_invalid_msg( - await send(message) - - async def websocket_session(url: str): -- with pytest.raises(websockets.exceptions.InvalidStatusCode) as exc_info: -- async with websockets.client.connect(url): -+ with pytest.raises(InvalidStatus) as exc_info: -+ async with connect(url): - pass # pragma: no cover -- assert exc_info.value.status_code == 404 -+ assert exc_info.value.response.status_code == 404 - - config = Config(app=app, ws=ws_protocol_cls, http=http_protocol_cls, lifespan="off", port=unused_tcp_port) - async with run_server(config): -@@ -971,10 +972,10 @@ async def test_server_reject_connection_with_missing_body( - # no further message - - async def websocket_session(url: str): -- with pytest.raises(websockets.exceptions.InvalidStatusCode) as exc_info: -- async with websockets.client.connect(url): -+ with pytest.raises(InvalidStatus) as exc_info: -+ async with connect(url): - pass # pragma: no cover -- assert exc_info.value.status_code == 404 -+ assert exc_info.value.response.status_code == 404 - - config = Config(app=app, ws=ws_protocol_cls, http=http_protocol_cls, lifespan="off", port=unused_tcp_port) - async with run_server(config): -@@ -1014,17 +1015,17 @@ async def test_server_multiple_websocket_http_response_start_events( - exception_message = str(exc) - - async def websocket_session(url: str): -- with pytest.raises(websockets.exceptions.InvalidStatusCode) as exc_info: -- async with websockets.client.connect(url): -+ with pytest.raises(InvalidStatus) as exc_info: -+ async with connect(url): - pass # pragma: no cover -- assert exc_info.value.status_code == 404 -+ assert exc_info.value.response.status_code == 404 - - config = Config(app=app, ws=ws_protocol_cls, http=http_protocol_cls, lifespan="off", port=unused_tcp_port) - async with run_server(config): - await websocket_session(f"ws://127.0.0.1:{unused_tcp_port}") - - assert exception_message == ( -- "Expected ASGI message 'websocket.http.response.body' but got " "'websocket.http.response.start'." -+ "Expected ASGI message 'websocket.http.response.body' but got 'websocket.http.response.start'." - ) - - -@@ -1053,7 +1054,7 @@ async def test_server_can_read_messages_in_buffer_after_close( - - config = Config(app=App, ws=ws_protocol_cls, http=http_protocol_cls, lifespan="off", port=unused_tcp_port) - async with run_server(config): -- async with websockets.client.connect(f"ws://127.0.0.1:{unused_tcp_port}") as websocket: -+ async with connect(f"ws://127.0.0.1:{unused_tcp_port}") as websocket: - await websocket.send(b"abc") - await websocket.send(b"abc") - await websocket.send(b"abc") -@@ -1070,8 +1071,9 @@ async def test_default_server_headers( - await self.send({"type": "websocket.accept"}) - - async def open_connection(url: str): -- async with websockets.client.connect(url) as websocket: -- return websocket.response_headers -+ async with connect(url) as websocket: -+ assert websocket.response -+ return websocket.response.headers - - config = Config(app=App, ws=ws_protocol_cls, http=http_protocol_cls, lifespan="off", port=unused_tcp_port) - async with run_server(config): -@@ -1085,8 +1087,9 @@ async def test_no_server_headers(ws_protocol_cls: WSProtocol, http_protocol_cls: - await self.send({"type": "websocket.accept"}) - - async def open_connection(url: str): -- async with websockets.client.connect(url) as websocket: -- return websocket.response_headers -+ async with connect(url) as websocket: -+ assert websocket.response -+ return websocket.response.headers - - config = Config( - app=App, -@@ -1108,8 +1111,9 @@ async def test_no_date_header_on_wsproto(http_protocol_cls: HTTPProtocol, unused - await self.send({"type": "websocket.accept"}) - - async def open_connection(url: str): -- async with websockets.client.connect(url) as websocket: -- return websocket.response_headers -+ async with connect(url) as websocket: -+ assert websocket.response -+ return websocket.response.headers - - config = Config( - app=App, -@@ -1140,8 +1144,9 @@ async def test_multiple_server_header( - ) - - async def open_connection(url: str): -- async with websockets.client.connect(url) as websocket: -- return websocket.response_headers -+ async with connect(url) as websocket: -+ assert websocket.response -+ return websocket.response.headers - - config = Config(app=App, ws=ws_protocol_cls, http=http_protocol_cls, lifespan="off", port=unused_tcp_port) - async with run_server(config): -@@ -1176,8 +1181,8 @@ async def test_lifespan_state(ws_protocol_cls: WSProtocol, http_protocol_cls: HT - await self.send({"type": "websocket.accept"}) - - async def open_connection(url: str): -- async with websockets.client.connect(url) as websocket: -- return websocket.open -+ async with connect(url): -+ return True - - async def app_wrapper(scope: Scope, receive: ASGIReceiveCallable, send: ASGISendCallable): - if scope["type"] == "lifespan": -diff --git a/uvicorn/protocols/websockets/websockets_impl.py b/uvicorn/protocols/websockets/websockets_impl.py -index cd6c54f..685d6b6 100644 ---- a/uvicorn/protocols/websockets/websockets_impl.py -+++ b/uvicorn/protocols/websockets/websockets_impl.py -@@ -13,8 +13,7 @@ from websockets.datastructures import Headers - from websockets.exceptions import ConnectionClosed - from websockets.extensions.base import ServerExtensionFactory - from websockets.extensions.permessage_deflate import ServerPerMessageDeflateFactory --from websockets.legacy.server import HTTPResponse --from websockets.server import WebSocketServerProtocol -+from websockets.legacy.server import HTTPResponse, WebSocketServerProtocol - from websockets.typing import Subprotocol - - from uvicorn._types import ( -diff --git a/uvicorn/protocols/websockets/wsproto_impl.py b/uvicorn/protocols/websockets/wsproto_impl.py -index 828afe5..5d84bff 100644 ---- a/uvicorn/protocols/websockets/wsproto_impl.py -+++ b/uvicorn/protocols/websockets/wsproto_impl.py -@@ -149,12 +149,13 @@ class WSProtocol(asyncio.Protocol): - self.writable.set() # pragma: full coverage - - def shutdown(self) -> None: -- if self.handshake_complete: -- self.queue.put_nowait({"type": "websocket.disconnect", "code": 1012}) -- output = self.conn.send(wsproto.events.CloseConnection(code=1012)) -- self.transport.write(output) -- else: -- self.send_500_response() -+ if not self.response_started: -+ if self.handshake_complete: -+ self.queue.put_nowait({"type": "websocket.disconnect", "code": 1012}) -+ output = self.conn.send(wsproto.events.CloseConnection(code=1012)) -+ self.transport.write(output) -+ else: -+ self.send_500_response() - self.transport.close() - - def on_task_complete(self, task: asyncio.Task[None]) -> None: -@@ -221,13 +222,15 @@ class WSProtocol(asyncio.Protocol): - def send_500_response(self) -> None: - if self.response_started or self.handshake_complete: - return # we cannot send responses anymore -+ reject_data = b"Internal Server Error" - headers: list[tuple[bytes, bytes]] = [ - (b"content-type", b"text/plain; charset=utf-8"), -+ (b"content-length", str(len(reject_data)).encode()), - (b"connection", b"close"), - (b"content-length", b"21"), - ] - output = self.conn.send(wsproto.events.RejectConnection(status_code=500, headers=headers, has_body=True)) -- output += self.conn.send(wsproto.events.RejectData(data=b"Internal Server Error")) -+ output += self.conn.send(wsproto.events.RejectData(data=reject_data)) - self.transport.write(output) - - async def run_asgi(self) -> None: diff --git a/ilot/uvicorn/APKBUILD b/ilot/uvicorn/APKBUILD deleted file mode 100644 index 1f14918..0000000 --- a/ilot/uvicorn/APKBUILD +++ /dev/null @@ -1,59 +0,0 @@ -maintainer="Michał Polański " -pkgname=uvicorn -pkgver=0.34.0 -pkgrel=0 -pkgdesc="Lightning-fast ASGI server" -url="https://www.uvicorn.org/" -license="BSD-3-Clause" -# disable due to lack of support for websockets 14 -# https://gitlab.alpinelinux.org/alpine/aports/-/issues/16646 -arch="noarch" -depends="py3-click py3-h11" -makedepends="py3-gpep517 py3-hatchling" -checkdepends=" - py3-a2wsgi - py3-dotenv - py3-httptools - py3-httpx - py3-pytest - py3-pytest-mock - py3-trustme - py3-typing-extensions - py3-watchfiles - py3-websockets - py3-wsproto - py3-yaml - " -subpackages="$pkgname-pyc" -source="https://github.com/encode/uvicorn/archive/$pkgver/uvicorn-$pkgver.tar.gz - test_multiprocess.patch - 2540_add-websocketssansioprotocol.patch - 2541_bump-wesockets-on-requirements.patch - fix-test-wsgi.patch - " - -build() { - gpep517 build-wheel \ - --wheel-dir .dist \ - --output-fd 3 3>&1 >&2 -} - -check() { - python3 -m venv --clear --without-pip --system-site-packages .testenv - .testenv/bin/python3 -m installer .dist/*.whl - .testenv/bin/python3 -m pytest \ - -k "not test_close_connection_with_multiple_requests" # a known issue -} - -package() { - python3 -m installer -d "$pkgdir" \ - .dist/uvicorn-$pkgver-py3-none-any.whl -} - -sha512sums=" -260782e385a2934049da8c474750958826afe1bfe23b38fe2f6420f355af7a537563f8fe6ac3830814c7469203703d10f4f9f3d6e53e79113bfd2fd34f7a7c72 uvicorn-0.34.0.tar.gz -cfad91dd84f8974362f52d754d7a29f09d07927a46acaa0eb490b6115a5729d84d6df94fead10ccd4cce7f5ea376f1348b0f59daede661dd8373a3851c313c46 test_multiprocess.patch -858e9a7baaf1c12e076aecd81aaaf622b35a59dcaabea4ee1bfc4cda704c9fe271b1cc616a5910d845393717e4989cecb3b04be249cb5d0df1001ec5224c293f 2540_add-websocketssansioprotocol.patch -f8a8c190981b9070232ea985880685bc801947cc7f673d59abf73d3e68bc2e13515ad200232a1de2af0808bc85da48a341f57d47caf87bcc190bfdc3c45718e0 2541_bump-wesockets-on-requirements.patch -379963f9ccbda013e4a0bc3441eee70a581c91f60206aedc15df6a8737950824b7cb8d867774fc415763449bb3e0bba66601e8551101bfc1741098acd035f0cc fix-test-wsgi.patch -" diff --git a/ilot/uvicorn/fix-test-wsgi.patch b/ilot/uvicorn/fix-test-wsgi.patch deleted file mode 100644 index ed49e52..0000000 --- a/ilot/uvicorn/fix-test-wsgi.patch +++ /dev/null @@ -1,13 +0,0 @@ -diff --git a/tests/middleware/test_wsgi.py.orig b/tests/middleware/test_wsgi.py -index 6003f27..2750487 100644 ---- a/tests/middleware/test_wsgi.py.orig -+++ b/tests/middleware/test_wsgi.py -@@ -73,7 +73,7 @@ async def test_wsgi_post(wsgi_middleware: Callable) -> None: - async with httpx.AsyncClient(transport=transport, base_url="http://testserver") as client: - response = await client.post("/", json={"example": 123}) - assert response.status_code == 200 -- assert response.text == '{"example":123}' -+ assert response.text == '{"example": 123}' - - - @pytest.mark.anyio diff --git a/ilot/uvicorn/test_multiprocess.patch b/ilot/uvicorn/test_multiprocess.patch deleted file mode 100644 index 231526e..0000000 --- a/ilot/uvicorn/test_multiprocess.patch +++ /dev/null @@ -1,14 +0,0 @@ -Wait a bit longer, otherwise the workers might -not have time to finish restarting. - ---- a/tests/supervisors/test_multiprocess.py -+++ b/tests/supervisors/test_multiprocess.py -@@ -132,7 +132,7 @@ def test_multiprocess_sighup() -> None: - time.sleep(1) - pids = [p.pid for p in supervisor.processes] - supervisor.signal_queue.append(signal.SIGHUP) -- time.sleep(1) -+ time.sleep(3) - assert pids != [p.pid for p in supervisor.processes] - supervisor.signal_queue.append(signal.SIGINT) - supervisor.join_all() diff --git a/ilot/wikijs/APKBUILD b/ilot/wikijs/APKBUILD index bdf9fc2..3340f14 100644 --- a/ilot/wikijs/APKBUILD +++ b/ilot/wikijs/APKBUILD @@ -1,7 +1,7 @@ # Maintainer: Antoine Martin (ayakael) # Contributor: Antoine Martin (ayakael) pkgname=wikijs -pkgver=2.5.307 +pkgver=2.5.305 pkgrel=0 pkgdesc="Wiki.js | A modern, lightweight and powerful wiki app built on Node.js" license="AGPL-3.0" @@ -49,14 +49,11 @@ package() { install -Dm644 "$builddir"/package.json -t "$pkgdir"/usr/lib/bundles/wikijs cp -aR "$builddir"/assets "$builddir"/server "$builddir"/node_modules "$pkgdir"/usr/lib/bundles/wikijs - # remove prebuilts - rm -Rf "$pkgdir"/usr/lib/bundles/wikijs/node_modules/*/prebuilds - mkdir -p "$pkgdir"/var/lib/wikijs chown 5494:5494 "$pkgdir"/var/lib/wikijs } sha512sums=" -8bf22ae87a9e3b8dd6f7114d0cf59913ad2cb05a2ed0e9bb7ac302b546d71f34a14de64cbe6e0f8b887d5df65e9d2b065ca18fe4493d3939895b8fa7076dd567 wikijs-2.5.307.tar.gz +e715e2d93fd176dc93676b3dd97d8dd745589552a7d67971fce0c1097f607fa44a3147534709a82b3ad13dda95d7c5833bc30ec37538c6cdef54ac309e6b44d1 wikijs-2.5.305.tar.gz 355131ee5617348b82681cb8543c784eea59689990a268ecd3b77d44fe9abcca9c86fb8b047f0a8faeba079c650faa7790c5dd65418d313cd7561f38bb590c03 wikijs.initd 07b536c20e370d2a926038165f0e953283259c213a80a8648419565f5359ab05f528ac310e81606914013da212270df6feddb22e514cbcb2464c8274c956e4af config.sample.yml.patch "